diff --git a/bin/mprocs.yaml b/bin/mprocs.yaml index cb3761250d035..f57efc845d834 100644 --- a/bin/mprocs.yaml +++ b/bin/mprocs.yaml @@ -19,8 +19,8 @@ procs: shell: 'bin/check_kafka_clickhouse_up && bin/check_temporal_up && python manage.py start_temporal_worker' docker-compose: - shell: 'docker compose -f docker-compose.dev.yml up' - stop: - send-keys: [''] + # docker-compose makes sure the stack is up, and then follows its logs - but doesn't tear down on exit for speed + shell: 'docker compose -f docker-compose.dev.yml up -d && docker compose -f docker-compose.dev.yml logs --tail=0 -f' mouse_scroll_speed: 1 +scrollback: 10000 diff --git a/bin/start b/bin/start index ceaddede2140f..7ec881e57b81c 100755 --- a/bin/start +++ b/bin/start @@ -9,4 +9,14 @@ export HOG_HOOK_URL=${HOG_HOOK_URL:-http://localhost:3300/hoghook} [ ! -f ./share/GeoLite2-City.mmdb ] && ( curl -L "https://mmdbcdn.posthog.net/" --http1.1 | brotli --decompress --output=./share/GeoLite2-City.mmdb ) +if ! command -v mprocs &> /dev/null; then + if command -v brew &> /dev/null; then + echo "🔁 Installing mprocs via Homebrew..." + brew install mprocs + else + echo "👉 To run bin/start, install mprocs: https://github.com/pvolok/mprocs#installation" + exit 1 + fi +fi + exec mprocs --config bin/mprocs.yaml diff --git a/cypress/e2e/experiments.cy.ts b/cypress/e2e/experiments.cy.ts index a635cf7841cad..e0e8339920bee 100644 --- a/cypress/e2e/experiments.cy.ts +++ b/cypress/e2e/experiments.cy.ts @@ -1,5 +1,3 @@ -import { setupFeatureFlags } from '../support/decide' - describe('Experiments', () => { let randomNum let experimentName @@ -47,10 +45,6 @@ describe('Experiments', () => { }) const createExperimentInNewUi = (): void => { - setupFeatureFlags({ - 'new-experiments-ui': true, - }) - cy.visit('/experiments') // Name, flag key, description @@ -96,6 +90,7 @@ describe('Experiments', () => { cy.get('[data-attr="experiment-creation-date"]').contains('a few seconds ago').should('be.visible') cy.get('[data-attr="experiment-start-date"]').should('not.exist') + cy.wait(1000) cy.get('[data-attr="launch-experiment"]').first().click() cy.get('[data-attr="experiment-creation-date"]').should('not.exist') cy.get('[data-attr="experiment-start-date"]').contains('a few seconds ago').should('be.visible') @@ -114,6 +109,7 @@ describe('Experiments', () => { it('move start date', () => { createExperimentInNewUi() + cy.wait(1000) cy.get('[data-attr="launch-experiment"]').first().click() cy.get('[data-attr="move-experiment-start-date"]').first().click() diff --git a/cypress/e2e/insights-reload-query.ts b/cypress/e2e/insights-reload-query.ts index 2f944f8993da7..55685ae309fb8 100644 --- a/cypress/e2e/insights-reload-query.ts +++ b/cypress/e2e/insights-reload-query.ts @@ -1,5 +1,3 @@ -import JSONCrush from 'jsoncrush' - describe('ReloadInsight component', () => { beforeEach(() => { // Clear local storage before each test to ensure a clean state @@ -21,8 +19,7 @@ describe('ReloadInsight component', () => { const draftQuery = window.localStorage.getItem(`draft-query-${currentTeamId}`) expect(draftQuery).to.not.be.null - const draftQueryObjUncrushed = JSONCrush.uncrush(draftQuery) - const draftQueryObj = JSON.parse(draftQueryObjUncrushed) + const draftQueryObj = JSON.parse(draftQuery) expect(draftQueryObj).to.have.property('query') diff --git a/ee/api/test/test_hooks.py b/ee/api/test/test_hooks.py index 0017079f4a77d..3cfa9595ce1c8 100644 --- a/ee/api/test/test_hooks.py +++ b/ee/api/test/test_hooks.py @@ -139,8 +139,9 @@ def test_create_hog_function_via_hook(self): "target": "https://hooks.zapier.com/{inputs.hook}", }, }, + "order": 2, }, - "debug": {}, + "debug": {"order": 1}, "hook": { "bytecode": [ "_H", @@ -149,6 +150,7 @@ def test_create_hog_function_via_hook(self): "hooks/standard/1234/abcd", ], "value": "hooks/standard/1234/abcd", + "order": 0, }, } diff --git a/ee/hogai/django_checkpoint/checkpointer.py b/ee/hogai/django_checkpoint/checkpointer.py index 78817dca9df76..a57140fecdc13 100644 --- a/ee/hogai/django_checkpoint/checkpointer.py +++ b/ee/hogai/django_checkpoint/checkpointer.py @@ -94,7 +94,9 @@ def _get_checkpoint_channel_values( query = Q() for channel, version in loaded_checkpoint["channel_versions"].items(): query |= Q(channel=channel, version=version) - return checkpoint.blobs.filter(query) + return ConversationCheckpointBlob.objects.filter( + Q(thread_id=checkpoint.thread_id, checkpoint_ns=checkpoint.checkpoint_ns) & query + ) def list( self, @@ -238,6 +240,7 @@ def put( blobs.append( ConversationCheckpointBlob( checkpoint=updated_checkpoint, + thread_id=thread_id, channel=channel, version=str(version), type=type, diff --git a/ee/hogai/django_checkpoint/test/test_checkpointer.py b/ee/hogai/django_checkpoint/test/test_checkpointer.py index 2f8fd7f4a60ed..d7c7a9117862d 100644 --- a/ee/hogai/django_checkpoint/test/test_checkpointer.py +++ b/ee/hogai/django_checkpoint/test/test_checkpointer.py @@ -1,6 +1,7 @@ # type: ignore -from typing import Any, TypedDict +import operator +from typing import Annotated, Any, Optional, TypedDict from langchain_core.runnables import RunnableConfig from langgraph.checkpoint.base import ( @@ -13,6 +14,7 @@ from langgraph.errors import NodeInterrupt from langgraph.graph import END, START from langgraph.graph.state import CompiledStateGraph, StateGraph +from pydantic import BaseModel, Field from ee.hogai.django_checkpoint.checkpointer import DjangoCheckpointer from ee.models.assistant import ( @@ -272,3 +274,152 @@ def test_resuming(self): self.assertEqual(res, {"val": 3}) snapshot = graph.get_state(config) self.assertFalse(snapshot.next) + + def test_checkpoint_blobs_are_bound_to_thread(self): + class State(TypedDict, total=False): + messages: Annotated[list[str], operator.add] + string: Optional[str] + + graph = StateGraph(State) + + def handle_node1(state: State): + return + + def handle_node2(state: State): + raise NodeInterrupt("test") + + graph.add_node("node1", handle_node1) + graph.add_node("node2", handle_node2) + + graph.add_edge(START, "node1") + graph.add_edge("node1", "node2") + graph.add_edge("node2", END) + + compiled = graph.compile(checkpointer=DjangoCheckpointer()) + + thread = Conversation.objects.create(user=self.user, team=self.team) + config = {"configurable": {"thread_id": str(thread.id)}} + compiled.invoke({"messages": ["hello"], "string": "world"}, config=config) + + snapshot = compiled.get_state(config) + self.assertIsNotNone(snapshot.next) + self.assertEqual(snapshot.tasks[0].interrupts[0].value, "test") + saved_state = snapshot.values + self.assertEqual(saved_state["messages"], ["hello"]) + self.assertEqual(saved_state["string"], "world") + + def test_checkpoint_can_save_and_load_pydantic_state(self): + class State(BaseModel): + messages: Annotated[list[str], operator.add] + string: Optional[str] + + class PartialState(BaseModel): + messages: Optional[list[str]] = Field(default=None) + string: Optional[str] = Field(default=None) + + graph = StateGraph(State) + + def handle_node1(state: State): + return PartialState() + + def handle_node2(state: State): + raise NodeInterrupt("test") + + graph.add_node("node1", handle_node1) + graph.add_node("node2", handle_node2) + + graph.add_edge(START, "node1") + graph.add_edge("node1", "node2") + graph.add_edge("node2", END) + + compiled = graph.compile(checkpointer=DjangoCheckpointer()) + + thread = Conversation.objects.create(user=self.user, team=self.team) + config = {"configurable": {"thread_id": str(thread.id)}} + compiled.invoke({"messages": ["hello"], "string": "world"}, config=config) + + snapshot = compiled.get_state(config) + self.assertIsNotNone(snapshot.next) + self.assertEqual(snapshot.tasks[0].interrupts[0].value, "test") + saved_state = snapshot.values + self.assertEqual(saved_state["messages"], ["hello"]) + self.assertEqual(saved_state["string"], "world") + + def test_saved_blobs(self): + class State(TypedDict, total=False): + messages: Annotated[list[str], operator.add] + + graph = StateGraph(State) + + def handle_node1(state: State): + return {"messages": ["world"]} + + graph.add_node("node1", handle_node1) + + graph.add_edge(START, "node1") + graph.add_edge("node1", END) + + checkpointer = DjangoCheckpointer() + compiled = graph.compile(checkpointer=checkpointer) + + thread = Conversation.objects.create(user=self.user, team=self.team) + config = {"configurable": {"thread_id": str(thread.id)}} + compiled.invoke({"messages": ["hello"]}, config=config) + + snapshot = compiled.get_state(config) + self.assertFalse(snapshot.next) + saved_state = snapshot.values + self.assertEqual(saved_state["messages"], ["hello", "world"]) + + blobs = list(ConversationCheckpointBlob.objects.filter(thread=thread)) + self.assertEqual(len(blobs), 7) + + # Set initial state + self.assertEqual(blobs[0].channel, "__start__") + self.assertEqual(blobs[0].type, "msgpack") + self.assertEqual( + checkpointer.serde.loads_typed((blobs[0].type, blobs[0].blob)), + {"messages": ["hello"]}, + ) + + # Set first node + self.assertEqual(blobs[1].channel, "__start__") + self.assertEqual(blobs[1].type, "empty") + self.assertIsNone(blobs[1].blob) + + # Set value channels before start + self.assertEqual(blobs[2].channel, "messages") + self.assertEqual(blobs[2].type, "msgpack") + self.assertEqual( + checkpointer.serde.loads_typed((blobs[2].type, blobs[2].blob)), + ["hello"], + ) + + # Transition to node1 + self.assertEqual(blobs[3].channel, "start:node1") + self.assertEqual(blobs[3].type, "msgpack") + self.assertEqual( + checkpointer.serde.loads_typed((blobs[3].type, blobs[3].blob)), + "__start__", + ) + + # Set new state for messages + self.assertEqual(blobs[4].channel, "messages") + self.assertEqual(blobs[4].type, "msgpack") + self.assertEqual( + checkpointer.serde.loads_typed((blobs[4].type, blobs[4].blob)), + ["hello", "world"], + ) + + # After setting a state + self.assertEqual(blobs[5].channel, "start:node1") + self.assertEqual(blobs[5].type, "empty") + self.assertIsNone(blobs[5].blob) + + # Set last step + self.assertEqual(blobs[6].channel, "node1") + self.assertEqual(blobs[6].type, "msgpack") + self.assertEqual( + checkpointer.serde.loads_typed((blobs[6].type, blobs[6].blob)), + "node1", + ) diff --git a/ee/hogai/eval/conftest.py b/ee/hogai/eval/conftest.py index d0bc75348eeac..1a88ebffa2e33 100644 --- a/ee/hogai/eval/conftest.py +++ b/ee/hogai/eval/conftest.py @@ -1,28 +1,107 @@ +import functools +from collections.abc import Generator +from pathlib import Path + import pytest +from django.conf import settings +from django.test import override_settings +from langchain_core.runnables import RunnableConfig + +from ee.models import Conversation +from posthog.demo.matrix.manager import MatrixManager +from posthog.models import Organization, Project, Team, User +from posthog.tasks.demo_create_data import HedgeboxMatrix +from posthog.test.base import BaseTest + + +# Flaky is a handy tool, but it always runs setup fixtures for retries. +# This decorator will just retry without re-running setup. +def retry_test_only(max_retries=3): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + last_error: Exception | None = None + for attempt in range(max_retries): + try: + return func(*args, **kwargs) + except Exception as e: + last_error = e + print(f"\nRetrying test (attempt {attempt + 1}/{max_retries})...") # noqa + if last_error: + raise last_error + + return wrapper + + return decorator + + +# Apply decorators to all tests in the package. +def pytest_collection_modifyitems(items): + current_dir = Path(__file__).parent + for item in items: + if Path(item.fspath).is_relative_to(current_dir): + item.add_marker( + pytest.mark.skipif(not settings.IN_EVAL_TESTING, reason="Only runs for the assistant evaluation") + ) + # Apply our custom retry decorator to the test function + item.obj = retry_test_only(max_retries=3)(item.obj) + + +@pytest.fixture(scope="package") +def team(django_db_blocker) -> Generator[Team, None, None]: + with django_db_blocker.unblock(): + organization = Organization.objects.create(name=BaseTest.CONFIG_ORGANIZATION_NAME) + project = Project.objects.create(id=Team.objects.increment_id_sequence(), organization=organization) + team = Team.objects.create( + id=project.id, + project=project, + organization=organization, + test_account_filters=[ + { + "key": "email", + "value": "@posthog.com", + "operator": "not_icontains", + "type": "person", + } + ], + has_completed_onboarding_for={"product_analytics": True}, + ) + yield team + organization.delete() -from posthog.test.base import run_clickhouse_statement_in_parallel +@pytest.fixture(scope="package") +def user(team, django_db_blocker) -> Generator[User, None, None]: + with django_db_blocker.unblock(): + user = User.objects.create_and_join(team.organization, "eval@posthog.com", "password1234") + yield user + user.delete() -@pytest.fixture(scope="module", autouse=True) -def setup_kafka_tables(django_db_setup): - from posthog.clickhouse.client import sync_execute - from posthog.clickhouse.schema import ( - CREATE_KAFKA_TABLE_QUERIES, - build_query, - ) - from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE - kafka_queries = list(map(build_query, CREATE_KAFKA_TABLE_QUERIES)) - run_clickhouse_statement_in_parallel(kafka_queries) +@pytest.mark.django_db(transaction=True) +@pytest.fixture +def runnable_config(team, user) -> Generator[RunnableConfig, None, None]: + conversation = Conversation.objects.create(team=team, user=user) + yield { + "configurable": { + "thread_id": conversation.id, + } + } + conversation.delete() - yield - kafka_tables = sync_execute( - f""" - SELECT name - FROM system.tables - WHERE database = '{CLICKHOUSE_DATABASE}' AND name LIKE 'kafka_%' - """, - ) - kafka_truncate_queries = [f"DROP TABLE {table[0]} ON CLUSTER '{CLICKHOUSE_CLUSTER}'" for table in kafka_tables] - run_clickhouse_statement_in_parallel(kafka_truncate_queries) +@pytest.fixture(scope="package", autouse=True) +def setup_test_data(django_db_setup, team, user, django_db_blocker): + with django_db_blocker.unblock(): + matrix = HedgeboxMatrix( + seed="b1ef3c66-5f43-488a-98be-6b46d92fbcef", # this seed generates all events + days_past=120, + days_future=30, + n_clusters=500, + group_type_index_offset=0, + ) + matrix_manager = MatrixManager(matrix, print_steps=True) + with override_settings(TEST=False): + # Simulation saving should occur in non-test mode, so that Kafka isn't mocked. Normally in tests we don't + # want to ingest via Kafka, but simulation saving is specifically designed to use that route for speed + matrix_manager.run_on_team(team, user) diff --git a/ee/hogai/eval/tests/test_eval_funnel_generator.py b/ee/hogai/eval/tests/test_eval_funnel_generator.py index 4d7876ca6f73c..5f0f29243296a 100644 --- a/ee/hogai/eval/tests/test_eval_funnel_generator.py +++ b/ee/hogai/eval/tests/test_eval_funnel_generator.py @@ -1,40 +1,46 @@ +from collections.abc import Callable from typing import cast +import pytest from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph -from ee.hogai.eval.utils import EvalBaseTest from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import AssistantFunnelsQuery, HumanMessage, VisualizationMessage -class TestEvalFunnelGenerator(EvalBaseTest): - def _call_node(self, query: str, plan: str) -> AssistantFunnelsQuery: - graph: CompiledStateGraph = ( - AssistantGraph(self.team) - .add_edge(AssistantNodeName.START, AssistantNodeName.FUNNEL_GENERATOR) - .add_funnel_generator(AssistantNodeName.END) - .compile() - ) +@pytest.fixture +def call_node(team, runnable_config) -> Callable[[str, str], AssistantFunnelsQuery]: + graph: CompiledStateGraph = ( + AssistantGraph(team) + .add_edge(AssistantNodeName.START, AssistantNodeName.FUNNEL_GENERATOR) + .add_funnel_generator(AssistantNodeName.END) + .compile() + ) + + def callable(query: str, plan: str) -> AssistantFunnelsQuery: state = graph.invoke( AssistantState(messages=[HumanMessage(content=query)], plan=plan), - self._get_config(), + runnable_config, ) return cast(VisualizationMessage, AssistantState.model_validate(state).messages[-1]).answer - def test_node_replaces_equals_with_contains(self): - query = "what is the conversion rate from a page view to sign up for users with name John?" - plan = """Sequence: - 1. $pageview - - property filter 1 - - person - - name - - equals - - John - 2. signed_up - """ - actual_output = self._call_node(query, plan).model_dump_json(exclude_none=True) - assert "exact" not in actual_output - assert "icontains" in actual_output - assert "John" not in actual_output - assert "john" in actual_output + return callable + + +def test_node_replaces_equals_with_contains(call_node): + query = "what is the conversion rate from a page view to sign up for users with name John?" + plan = """Sequence: + 1. $pageview + - property filter 1 + - person + - name + - equals + - John + 2. signed_up + """ + actual_output = call_node(query, plan).model_dump_json(exclude_none=True) + assert "exact" not in actual_output + assert "icontains" in actual_output + assert "John" not in actual_output + assert "john" in actual_output diff --git a/ee/hogai/eval/tests/test_eval_funnel_planner.py b/ee/hogai/eval/tests/test_eval_funnel_planner.py index 9adbd75e77c6c..c8bc25bc0b5dc 100644 --- a/ee/hogai/eval/tests/test_eval_funnel_planner.py +++ b/ee/hogai/eval/tests/test_eval_funnel_planner.py @@ -1,208 +1,224 @@ +from collections.abc import Callable + +import pytest from deepeval import assert_test from deepeval.metrics import GEval from deepeval.test_case import LLMTestCase, LLMTestCaseParams +from langchain_core.runnables.config import RunnableConfig from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph -from ee.hogai.eval.utils import EvalBaseTest from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage -class TestEvalFunnelPlanner(EvalBaseTest): - def _get_plan_correctness_metric(self): - return GEval( - name="Funnel Plan Correctness", - criteria="You will be given expected and actual generated plans to provide a taxonomy to answer a user's question with a funnel insight. Compare the plans to determine whether the taxonomy of the actual plan matches the expected plan. Do not apply general knowledge about funnel insights.", - evaluation_steps=[ - "A plan must define at least two series in the sequence, but it is not required to define any filters, exclusion steps, or a breakdown.", - "Compare events, properties, math types, and property values of 'expected output' and 'actual output'. Do not penalize if the actual output does not include a timeframe.", - "Check if the combination of events, properties, and property values in 'actual output' can answer the user's question according to the 'expected output'.", - # The criteria for aggregations must be more specific because there isn't a way to bypass them. - "Check if the math types in 'actual output' match those in 'expected output.' If the aggregation type is specified by a property, user, or group in 'expected output', the same property, user, or group must be used in 'actual output'.", - "If 'expected output' contains exclusion steps, check if 'actual output' contains those, and heavily penalize if the exclusion steps are not present or different.", - "If 'expected output' contains a breakdown, check if 'actual output' contains a similar breakdown, and heavily penalize if the breakdown is not present or different. Plans may only have one breakdown.", - # We don't want to see in the output unnecessary property filters. The assistant tries to use them all the time. - "Heavily penalize if the 'actual output' contains any excessive output not present in the 'expected output'. For example, the `is set` operator in filters should not be used unless the user explicitly asks for it.", - ], - evaluation_params=[ - LLMTestCaseParams.INPUT, - LLMTestCaseParams.EXPECTED_OUTPUT, - LLMTestCaseParams.ACTUAL_OUTPUT, - ], - threshold=0.7, - ) +@pytest.fixture(scope="module") +def metric(): + return GEval( + name="Funnel Plan Correctness", + criteria="You will be given expected and actual generated plans to provide a taxonomy to answer a user's question with a funnel insight. Compare the plans to determine whether the taxonomy of the actual plan matches the expected plan. Do not apply general knowledge about funnel insights.", + evaluation_steps=[ + "A plan must define at least two series in the sequence, but it is not required to define any filters, exclusion steps, or a breakdown.", + "Compare events, properties, math types, and property values of 'expected output' and 'actual output'. Do not penalize if the actual output does not include a timeframe.", + "Check if the combination of events, properties, and property values in 'actual output' can answer the user's question according to the 'expected output'.", + # The criteria for aggregations must be more specific because there isn't a way to bypass them. + "Check if the math types in 'actual output' match those in 'expected output.' If the aggregation type is specified by a property, user, or group in 'expected output', the same property, user, or group must be used in 'actual output'.", + "If 'expected output' contains exclusion steps, check if 'actual output' contains those, and heavily penalize if the exclusion steps are not present or different.", + "If 'expected output' contains a breakdown, check if 'actual output' contains a similar breakdown, and heavily penalize if the breakdown is not present or different. Plans may only have one breakdown.", + # We don't want to see in the output unnecessary property filters. The assistant tries to use them all the time. + "Heavily penalize if the 'actual output' contains any excessive output not present in the 'expected output'. For example, the `is set` operator in filters should not be used unless the user explicitly asks for it.", + ], + evaluation_params=[ + LLMTestCaseParams.INPUT, + LLMTestCaseParams.EXPECTED_OUTPUT, + LLMTestCaseParams.ACTUAL_OUTPUT, + ], + threshold=0.7, + ) - def _call_node(self, query): - graph: CompiledStateGraph = ( - AssistantGraph(self.team) - .add_edge(AssistantNodeName.START, AssistantNodeName.FUNNEL_PLANNER) - .add_funnel_planner(AssistantNodeName.END) - .compile() - ) + +@pytest.fixture +def call_node(team, runnable_config: RunnableConfig) -> Callable[[str], str]: + graph: CompiledStateGraph = ( + AssistantGraph(team) + .add_edge(AssistantNodeName.START, AssistantNodeName.FUNNEL_PLANNER) + .add_funnel_planner(AssistantNodeName.END) + .compile() + ) + + def callable(query: str) -> str: state = graph.invoke( AssistantState(messages=[HumanMessage(content=query)]), - self._get_config(), + runnable_config, ) return AssistantState.model_validate(state).plan or "" - def test_basic_funnel(self): - query = "what was the conversion from a page view to sign up?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. $pageview - 2. signed_up - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_outputs_at_least_two_events(self): - """ - Ambigious query. The funnel must return at least two events. - """ - query = "how many users paid a bill?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. any event - 2. upgrade_plan - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_no_excessive_property_filters(self): - query = "Show the user conversion from a sign up to a file download" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. signed_up - 2. downloaded_file - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) + return callable - def test_basic_filtering(self): - query = ( - "What was the conversion from uploading a file to downloading it from Chrome and Safari in the last 30d?" - ) - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. uploaded_file - - property filter 1: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Chrome - - property filter 2: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Safari - 2. downloaded_file - - property filter 1: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Chrome - - property filter 2: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Safari - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_exclusion_steps(self): - query = "What was the conversion from uploading a file to downloading it in the last 30d excluding users that deleted a file?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. uploaded_file - 2. downloaded_file - - Exclusions: - - deleted_file - - start index: 0 - - end index: 1 - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_breakdown(self): - query = "Show a conversion from uploading a file to downloading it segmented by a browser" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. uploaded_file - 2. downloaded_file - - Breakdown by: - - entity: event - - property name: $browser - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_needle_in_a_haystack(self): - query = "What was the conversion from a sign up to a paying customer on the personal-pro plan?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. signed_up - 2. paid_bill - - property filter 1: - - entity: event - - property name: plan - - property type: String - - operator: equals - - property value: personal/pro - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_planner_outputs_multiple_series_from_a_single_series_question(self): - query = "What's our sign-up funnel?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. $pageview - 2. signed_up - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_funnel_does_not_include_timeframe(self): - query = "what was the conversion from a page view to sign up for event time before 2024-01-01?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Sequence: - 1. $pageview - 2. signed_up - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) + +def test_basic_funnel(metric, call_node): + query = "what was the conversion from a page view to sign up?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. $pageview + 2. signed_up + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_outputs_at_least_two_events(metric, call_node): + """ + Ambigious query. The funnel must return at least two events. + """ + query = "how many users paid a bill?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. any event + 2. upgrade_plan + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_no_excessive_property_filters(metric, call_node): + query = "Show the user conversion from a sign up to a file download" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. signed_up + 2. downloaded_file + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_basic_filtering(metric, call_node): + query = "What was the conversion from uploading a file to downloading it from Chrome and Safari in the last 30d?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. uploaded_file + - property filter 1: + - entity: event + - property name: $browser + - property type: String + - operator: equals + - property value: Chrome + - property filter 2: + - entity: event + - property name: $browser + - property type: String + - operator: equals + - property value: Safari + 2. downloaded_file + - property filter 1: + - entity: event + - property name: $browser + - property type: String + - operator: equals + - property value: Chrome + - property filter 2: + - entity: event + - property name: $browser + - property type: String + - operator: equals + - property value: Safari + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_exclusion_steps(metric, call_node): + query = "What was the conversion from uploading a file to downloading it in the last 30d excluding users that deleted a file?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. uploaded_file + 2. downloaded_file + + Exclusions: + - deleted_file + - start index: 0 + - end index: 1 + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_breakdown(metric, call_node): + query = "Show a conversion from uploading a file to downloading it segmented by a browser" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. uploaded_file + 2. downloaded_file + + Breakdown by: + - entity: event + - property name: $browser + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_needle_in_a_haystack(metric, call_node): + query = "What was the conversion from a sign up to a paying customer on the personal-pro plan?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. signed_up + 2. paid_bill + - property filter 1: + - entity: event + - property name: plan + - property type: String + - operator: equals + - property value: personal/pro + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_planner_outputs_multiple_series_from_a_single_series_question(metric, call_node): + query = "What's our sign-up funnel?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. $pageview + 2. signed_up + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_funnel_does_not_include_timeframe(metric, call_node): + query = "what was the conversion from a page view to sign up for event time before 2024-01-01?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Sequence: + 1. $pageview + 2. signed_up + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) diff --git a/ee/hogai/eval/tests/test_eval_router.py b/ee/hogai/eval/tests/test_eval_router.py index c1307e9d40f00..84e5c4c809972 100644 --- a/ee/hogai/eval/tests/test_eval_router.py +++ b/ee/hogai/eval/tests/test_eval_router.py @@ -1,69 +1,80 @@ +from collections.abc import Callable from typing import cast +import pytest from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph -from ee.hogai.eval.utils import EvalBaseTest from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage, RouterMessage -class TestEvalRouter(EvalBaseTest): - def _call_node(self, query: str | list): - graph: CompiledStateGraph = ( - AssistantGraph(self.team) - .add_start() - .add_router(path_map={"trends": AssistantNodeName.END, "funnel": AssistantNodeName.END}) - .compile() - ) +@pytest.fixture +def call_node(team, runnable_config) -> Callable[[str | list], str]: + graph: CompiledStateGraph = ( + AssistantGraph(team) + .add_start() + .add_router(path_map={"trends": AssistantNodeName.END, "funnel": AssistantNodeName.END}) + .compile() + ) + + def callable(query: str | list) -> str: messages = [HumanMessage(content=query)] if isinstance(query, str) else query state = graph.invoke( AssistantState(messages=messages), - self._get_config(), + runnable_config, ) return cast(RouterMessage, AssistantState.model_validate(state).messages[-1]).content - def test_outputs_basic_trends_insight(self): - query = "Show the $pageview trend" - res = self._call_node(query) - self.assertEqual(res, "trends") - - def test_outputs_basic_funnel_insight(self): - query = "What is the conversion rate of users who uploaded a file to users who paid for a plan?" - res = self._call_node(query) - self.assertEqual(res, "funnel") - - def test_converts_trends_to_funnel(self): - conversation = [ - HumanMessage(content="Show trends of $pageview and $identify"), - RouterMessage(content="trends"), - HumanMessage(content="Convert this insight to a funnel"), - ] - res = self._call_node(conversation[:1]) - self.assertEqual(res, "trends") - res = self._call_node(conversation) - self.assertEqual(res, "funnel") - - def test_converts_funnel_to_trends(self): - conversation = [ - HumanMessage(content="What is the conversion from a page view to a sign up?"), - RouterMessage(content="funnel"), - HumanMessage(content="Convert this insight to a trends"), - ] - res = self._call_node(conversation[:1]) - self.assertEqual(res, "funnel") - res = self._call_node(conversation) - self.assertEqual(res, "trends") - - def test_outputs_single_trends_insight(self): - """ - Must display a trends insight because it's not possible to build a funnel with a single series. - """ - query = "how many users upgraded their plan to personal pro?" - res = self._call_node(query) - self.assertEqual(res, "trends") - - def test_classifies_funnel_with_single_series(self): - query = "What's our sign-up funnel?" - res = self._call_node(query) - self.assertEqual(res, "funnel") + return callable + + +def test_outputs_basic_trends_insight(call_node): + query = "Show the $pageview trend" + res = call_node(query) + assert res == "trends" + + +def test_outputs_basic_funnel_insight(call_node): + query = "What is the conversion rate of users who uploaded a file to users who paid for a plan?" + res = call_node(query) + assert res == "funnel" + + +def test_converts_trends_to_funnel(call_node): + conversation = [ + HumanMessage(content="Show trends of $pageview and $identify"), + RouterMessage(content="trends"), + HumanMessage(content="Convert this insight to a funnel"), + ] + res = call_node(conversation[:1]) + assert res == "trends" + res = call_node(conversation) + assert res == "funnel" + + +def test_converts_funnel_to_trends(call_node): + conversation = [ + HumanMessage(content="What is the conversion from a page view to a sign up?"), + RouterMessage(content="funnel"), + HumanMessage(content="Convert this insight to a trends"), + ] + res = call_node(conversation[:1]) + assert res == "funnel" + res = call_node(conversation) + assert res == "trends" + + +def test_outputs_single_trends_insight(call_node): + """ + Must display a trends insight because it's not possible to build a funnel with a single series. + """ + query = "how many users upgraded their plan to personal pro?" + res = call_node(query) + assert res == "trends" + + +def test_classifies_funnel_with_single_series(call_node): + query = "What's our sign-up funnel?" + res = call_node(query) + assert res == "funnel" diff --git a/ee/hogai/eval/tests/test_eval_trends_generator.py b/ee/hogai/eval/tests/test_eval_trends_generator.py index 496bbf0100b51..c8491957c868f 100644 --- a/ee/hogai/eval/tests/test_eval_trends_generator.py +++ b/ee/hogai/eval/tests/test_eval_trends_generator.py @@ -1,58 +1,65 @@ +from collections.abc import Callable from typing import cast +import pytest from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph -from ee.hogai.eval.utils import EvalBaseTest from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import AssistantTrendsQuery, HumanMessage, VisualizationMessage -class TestEvalTrendsGenerator(EvalBaseTest): - def _call_node(self, query: str, plan: str) -> AssistantTrendsQuery: - graph: CompiledStateGraph = ( - AssistantGraph(self.team) - .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_GENERATOR) - .add_trends_generator(AssistantNodeName.END) - .compile() - ) +@pytest.fixture +def call_node(team, runnable_config) -> Callable[[str, str], AssistantTrendsQuery]: + graph: CompiledStateGraph = ( + AssistantGraph(team) + .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_GENERATOR) + .add_trends_generator(AssistantNodeName.END) + .compile() + ) + + def callable(query: str, plan: str) -> AssistantTrendsQuery: state = graph.invoke( AssistantState(messages=[HumanMessage(content=query)], plan=plan), - self._get_config(), + runnable_config, ) return cast(VisualizationMessage, AssistantState.model_validate(state).messages[-1]).answer - def test_node_replaces_equals_with_contains(self): - query = "what is pageview trend for users with name John?" - plan = """Events: - - $pageview - - math operation: total count - - property filter 1 - - person - - name - - equals - - John - """ - actual_output = self._call_node(query, plan).model_dump_json(exclude_none=True) - assert "exact" not in actual_output - assert "icontains" in actual_output - assert "John" not in actual_output - assert "john" in actual_output - - def test_node_leans_towards_line_graph(self): - query = "How often do users download files?" - # We ideally want to consider both total count of downloads per period, as well as how often a median user downloads - plan = """Events: - - downloaded_file - - math operation: total count - - downloaded_file - - math operation: median count per user - """ - actual_output = self._call_node(query, plan) - assert actual_output.trendsFilter.display == "ActionsLineGraph" - assert actual_output.series[0].kind == "EventsNode" - assert actual_output.series[0].event == "downloaded_file" - assert actual_output.series[0].math == "total" - assert actual_output.series[1].kind == "EventsNode" - assert actual_output.series[1].event == "downloaded_file" - assert actual_output.series[1].math == "median_count_per_actor" + return callable + + +def test_node_replaces_equals_with_contains(call_node): + query = "what is pageview trend for users with name John?" + plan = """Events: + - $pageview + - math operation: total count + - property filter 1 + - person + - name + - equals + - John + """ + actual_output = call_node(query, plan).model_dump_json(exclude_none=True) + assert "exact" not in actual_output + assert "icontains" in actual_output + assert "John" not in actual_output + assert "john" in actual_output + + +def test_node_leans_towards_line_graph(call_node): + query = "How often do users download files?" + # We ideally want to consider both total count of downloads per period, as well as how often a median user downloads + plan = """Events: + - downloaded_file + - math operation: total count + - downloaded_file + - math operation: median count per user + """ + actual_output = call_node(query, plan) + assert actual_output.trendsFilter.display == "ActionsLineGraph" + assert actual_output.series[0].kind == "EventsNode" + assert actual_output.series[0].event == "downloaded_file" + assert actual_output.series[0].math == "total" + assert actual_output.series[1].kind == "EventsNode" + assert actual_output.series[1].event == "downloaded_file" + assert actual_output.series[1].math == "median_count_per_actor" diff --git a/ee/hogai/eval/tests/test_eval_trends_planner.py b/ee/hogai/eval/tests/test_eval_trends_planner.py index d4fbff456a91c..4d4ea4c41dfbf 100644 --- a/ee/hogai/eval/tests/test_eval_trends_planner.py +++ b/ee/hogai/eval/tests/test_eval_trends_planner.py @@ -1,179 +1,196 @@ +from collections.abc import Callable + +import pytest from deepeval import assert_test from deepeval.metrics import GEval from deepeval.test_case import LLMTestCase, LLMTestCaseParams +from langchain_core.runnables.config import RunnableConfig from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph -from ee.hogai.eval.utils import EvalBaseTest from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage -class TestEvalTrendsPlanner(EvalBaseTest): - def _get_plan_correctness_metric(self): - return GEval( - name="Trends Plan Correctness", - criteria="You will be given expected and actual generated plans to provide a taxonomy to answer a user's question with a trends insight. Compare the plans to determine whether the taxonomy of the actual plan matches the expected plan. Do not apply general knowledge about trends insights.", - evaluation_steps=[ - "A plan must define at least one event and a math type, but it is not required to define any filters, breakdowns, or formulas.", - "Compare events, properties, math types, and property values of 'expected output' and 'actual output'. Do not penalize if the actual output does not include a timeframe.", - "Check if the combination of events, properties, and property values in 'actual output' can answer the user's question according to the 'expected output'.", - # The criteria for aggregations must be more specific because there isn't a way to bypass them. - "Check if the math types in 'actual output' match those in 'expected output'. Math types sometimes are interchangeable, so use your judgement. If the aggregation type is specified by a property, user, or group in 'expected output', the same property, user, or group must be used in 'actual output'.", - "If 'expected output' contains a breakdown, check if 'actual output' contains a similar breakdown, and heavily penalize if the breakdown is not present or different.", - "If 'expected output' contains a formula, check if 'actual output' contains a similar formula, and heavily penalize if the formula is not present or different.", - # We don't want to see in the output unnecessary property filters. The assistant tries to use them all the time. - "Heavily penalize if the 'actual output' contains any excessive output not present in the 'expected output'. For example, the `is set` operator in filters should not be used unless the user explicitly asks for it.", - ], - evaluation_params=[ - LLMTestCaseParams.INPUT, - LLMTestCaseParams.EXPECTED_OUTPUT, - LLMTestCaseParams.ACTUAL_OUTPUT, - ], - threshold=0.7, - ) +@pytest.fixture(scope="module") +def metric(): + return GEval( + name="Trends Plan Correctness", + criteria="You will be given expected and actual generated plans to provide a taxonomy to answer a user's question with a trends insight. Compare the plans to determine whether the taxonomy of the actual plan matches the expected plan. Do not apply general knowledge about trends insights.", + evaluation_steps=[ + "A plan must define at least one event and a math type, but it is not required to define any filters, breakdowns, or formulas.", + "Compare events, properties, math types, and property values of 'expected output' and 'actual output'. Do not penalize if the actual output does not include a timeframe.", + "Check if the combination of events, properties, and property values in 'actual output' can answer the user's question according to the 'expected output'.", + # The criteria for aggregations must be more specific because there isn't a way to bypass them. + "Check if the math types in 'actual output' match those in 'expected output'. Math types sometimes are interchangeable, so use your judgement. If the aggregation type is specified by a property, user, or group in 'expected output', the same property, user, or group must be used in 'actual output'.", + "If 'expected output' contains a breakdown, check if 'actual output' contains a similar breakdown, and heavily penalize if the breakdown is not present or different.", + "If 'expected output' contains a formula, check if 'actual output' contains a similar formula, and heavily penalize if the formula is not present or different.", + # We don't want to see in the output unnecessary property filters. The assistant tries to use them all the time. + "Heavily penalize if the 'actual output' contains any excessive output not present in the 'expected output'. For example, the `is set` operator in filters should not be used unless the user explicitly asks for it.", + ], + evaluation_params=[ + LLMTestCaseParams.INPUT, + LLMTestCaseParams.EXPECTED_OUTPUT, + LLMTestCaseParams.ACTUAL_OUTPUT, + ], + threshold=0.7, + ) - def _call_node(self, query): - graph: CompiledStateGraph = ( - AssistantGraph(self.team) - .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) - .add_trends_planner(AssistantNodeName.END) - .compile() - ) + +@pytest.fixture +def call_node(team, runnable_config: RunnableConfig) -> Callable[[str], str]: + graph: CompiledStateGraph = ( + AssistantGraph(team) + .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) + .add_trends_planner(AssistantNodeName.END) + .compile() + ) + + def callable(query: str) -> str: state = graph.invoke( AssistantState(messages=[HumanMessage(content=query)]), - self._get_config(), + runnable_config, ) return AssistantState.model_validate(state).plan or "" - def test_no_excessive_property_filters(self): - query = "Show the $pageview trend" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - $pageview - - math operation: total count - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_no_excessive_property_filters_for_a_defined_math_type(self): - query = "What is the MAU?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - $pageview - - math operation: unique users - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_basic_filtering(self): - query = "can you compare how many Chrome vs Safari users uploaded a file in the last 30d?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - uploaded_file - - math operation: total count - - property filter 1: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Chrome - - property filter 2: - - entity: event - - property name: $browser - - property type: String - - operator: equals - - property value: Safari - - Breakdown by: - - breakdown 1: + return callable + + +def test_no_excessive_property_filters(metric, call_node): + query = "Show the $pageview trend" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - $pageview + - math operation: total count + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_no_excessive_property_filters_for_a_defined_math_type(metric, call_node): + query = "What is the MAU?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - $pageview + - math operation: unique users + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_basic_filtering(metric, call_node): + query = "can you compare how many Chrome vs Safari users uploaded a file in the last 30d?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - uploaded_file + - math operation: total count + - property filter 1: - entity: event - property name: $browser - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_formula_mode(self): - query = "i want to see a ratio of identify divided by page views" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - $identify - - math operation: total count - - $pageview - - math operation: total count - - Formula: - `A/B`, where `A` is the total count of `$identify` and `B` is the total count of `$pageview` - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_math_type_by_a_property(self): - query = "what is the average session duration?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - All Events - - math operation: average by `$session_duration` - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_math_type_by_a_user(self): - query = "What is the median page view count for a user?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - $pageview - - math operation: median by users - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_needle_in_a_haystack(self): - query = "How frequently do people pay for a personal-pro plan?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - paid_bill - - math operation: total count - - property filter 1: - - entity: event - - property name: plan - - property type: String - - operator: contains - - property value: personal/pro - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) - - def test_funnel_does_not_include_timeframe(self): - query = "what is the pageview trend for event time before 2024-01-01?" - test_case = LLMTestCase( - input=query, - expected_output=""" - Events: - - $pageview - - math operation: total count - """, - actual_output=self._call_node(query), - ) - assert_test(test_case, [self._get_plan_correctness_metric()]) + - property type: String + - operator: equals + - property value: Chrome + - property filter 2: + - entity: event + - property name: $browser + - property type: String + - operator: equals + - property value: Safari + + Breakdown by: + - breakdown 1: + - entity: event + - property name: $browser + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_formula_mode(metric, call_node): + query = "i want to see a ratio of identify divided by page views" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - $identify + - math operation: total count + - $pageview + - math operation: total count + + Formula: + `A/B`, where `A` is the total count of `$identify` and `B` is the total count of `$pageview` + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_math_type_by_a_property(metric, call_node): + query = "what is the average session duration?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - All Events + - math operation: average by `$session_duration` + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_math_type_by_a_user(metric, call_node): + query = "What is the median page view count for a user?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - $pageview + - math operation: median by users + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_needle_in_a_haystack(metric, call_node): + query = "How frequently do people pay for a personal-pro plan?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - paid_bill + - math operation: total count + - property filter 1: + - entity: event + - property name: plan + - property type: String + - operator: contains + - property value: personal/pro + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) + + +def test_trends_does_not_include_timeframe(metric, call_node): + query = "what is the pageview trend for event time before 2024-01-01?" + test_case = LLMTestCase( + input=query, + expected_output=""" + Events: + - $pageview + - math operation: total count + """, + actual_output=call_node(query), + ) + assert_test(test_case, [metric]) diff --git a/ee/hogai/eval/utils.py b/ee/hogai/eval/utils.py deleted file mode 100644 index 6e03c4cfafa9f..0000000000000 --- a/ee/hogai/eval/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -import pytest -from django.test import override_settings -from flaky import flaky -from langchain_core.runnables import RunnableConfig - -from ee.models.assistant import Conversation -from posthog.demo.matrix.manager import MatrixManager -from posthog.tasks.demo_create_data import HedgeboxMatrix -from posthog.test.base import NonAtomicBaseTest - - -@pytest.mark.skipif(os.environ.get("DEEPEVAL") != "YES", reason="Only runs for the assistant evaluation") -@flaky(max_runs=3, min_passes=1) -class EvalBaseTest(NonAtomicBaseTest): - def _get_config(self) -> RunnableConfig: - conversation = Conversation.objects.create(team=self.team, user=self.user) - return { - "configurable": { - "thread_id": conversation.id, - } - } - - @classmethod - def setUpTestData(cls): - super().setUpTestData() - matrix = HedgeboxMatrix( - seed="b1ef3c66-5f43-488a-98be-6b46d92fbcef", # this seed generates all events - days_past=120, - days_future=30, - n_clusters=500, - group_type_index_offset=0, - ) - matrix_manager = MatrixManager(matrix, print_steps=True) - existing_user = cls.team.organization.members.first() - with override_settings(TEST=False): - # Simulation saving should occur in non-test mode, so that Kafka isn't mocked. Normally in tests we don't - # want to ingest via Kafka, but simulation saving is specifically designed to use that route for speed - matrix_manager.run_on_team(cls.team, existing_user) diff --git a/ee/migrations/0019_remove_conversationcheckpointblob_unique_checkpoint_blob_and_more.py b/ee/migrations/0019_remove_conversationcheckpointblob_unique_checkpoint_blob_and_more.py new file mode 100644 index 0000000000000..377f85b3d29c2 --- /dev/null +++ b/ee/migrations/0019_remove_conversationcheckpointblob_unique_checkpoint_blob_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 4.2.15 on 2024-12-19 11:00 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + dependencies = [ + ("ee", "0018_conversation_conversationcheckpoint_and_more"), + ] + + operations = [ + migrations.RemoveConstraint( + model_name="conversationcheckpointblob", + name="unique_checkpoint_blob", + ), + migrations.AddField( + model_name="conversationcheckpointblob", + name="checkpoint_ns", + field=models.TextField( + default="", + help_text='Checkpoint namespace. Denotes the path to the subgraph node the checkpoint originates from, separated by `|` character, e.g. `"child|grandchild"`. Defaults to "" (root graph).', + ), + ), + migrations.AddField( + model_name="conversationcheckpointblob", + name="thread", + field=models.ForeignKey( + null=True, on_delete=django.db.models.deletion.CASCADE, related_name="blobs", to="ee.conversation" + ), + ), + migrations.AddConstraint( + model_name="conversationcheckpointblob", + constraint=models.UniqueConstraint( + fields=("thread_id", "checkpoint_ns", "channel", "version"), name="unique_checkpoint_blob" + ), + ), + ] diff --git a/ee/migrations/max_migration.txt b/ee/migrations/max_migration.txt index fb889f1cc34cf..aec0628d960c8 100644 --- a/ee/migrations/max_migration.txt +++ b/ee/migrations/max_migration.txt @@ -1 +1 @@ -0018_conversation_conversationcheckpoint_and_more +0019_remove_conversationcheckpointblob_unique_checkpoint_blob_and_more diff --git a/ee/models/assistant.py b/ee/models/assistant.py index 390a7ab7a117f..f2a31d938f5d0 100644 --- a/ee/models/assistant.py +++ b/ee/models/assistant.py @@ -46,6 +46,14 @@ def pending_writes(self) -> Iterable["ConversationCheckpointWrite"]: class ConversationCheckpointBlob(UUIDModel): checkpoint = models.ForeignKey(ConversationCheckpoint, on_delete=models.CASCADE, related_name="blobs") + """ + The checkpoint that created the blob. Do not use this field to query blobs. + """ + thread = models.ForeignKey(Conversation, on_delete=models.CASCADE, related_name="blobs", null=True) + checkpoint_ns = models.TextField( + default="", + help_text='Checkpoint namespace. Denotes the path to the subgraph node the checkpoint originates from, separated by `|` character, e.g. `"child|grandchild"`. Defaults to "" (root graph).', + ) channel = models.TextField( help_text="An arbitrary string defining the channel name. For example, it can be a node name or a reserved LangGraph's enum." ) @@ -56,7 +64,7 @@ class ConversationCheckpointBlob(UUIDModel): class Meta: constraints = [ models.UniqueConstraint( - fields=["checkpoint_id", "channel", "version"], + fields=["thread_id", "checkpoint_ns", "channel", "version"], name="unique_checkpoint_blob", ) ] diff --git a/frontend/__snapshots__/components-command-bar--search--dark.png b/frontend/__snapshots__/components-command-bar--search--dark.png index 599e3c20f7aea..0692091cc3e32 100644 Binary files a/frontend/__snapshots__/components-command-bar--search--dark.png and b/frontend/__snapshots__/components-command-bar--search--dark.png differ diff --git a/frontend/__snapshots__/components-command-bar--search--light.png b/frontend/__snapshots__/components-command-bar--search--light.png index 75bd57cddaff1..8231897e6233f 100644 Binary files a/frontend/__snapshots__/components-command-bar--search--light.png and b/frontend/__snapshots__/components-command-bar--search--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-input--search--dark.png b/frontend/__snapshots__/lemon-ui-lemon-input--search--dark.png index 2618d982587b5..8dd6e08c842af 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-input--search--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-input--search--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-input--search--light.png b/frontend/__snapshots__/lemon-ui-lemon-input--search--light.png index 171b0fb4ae3bc..42f6a48d5f2a9 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-input--search--light.png and b/frontend/__snapshots__/lemon-ui-lemon-input--search--light.png differ diff --git a/frontend/__snapshots__/replay-player-success--second-recording-in-list--dark.png b/frontend/__snapshots__/replay-player-success--second-recording-in-list--dark.png index 93ad7064fd68b..deadbaa964285 100644 Binary files a/frontend/__snapshots__/replay-player-success--second-recording-in-list--dark.png and b/frontend/__snapshots__/replay-player-success--second-recording-in-list--dark.png differ diff --git a/frontend/__snapshots__/replay-player-success--second-recording-in-list--light.png b/frontend/__snapshots__/replay-player-success--second-recording-in-list--light.png index 537566ccb05ab..f25c112bb904b 100644 Binary files a/frontend/__snapshots__/replay-player-success--second-recording-in-list--light.png and b/frontend/__snapshots__/replay-player-success--second-recording-in-list--light.png differ diff --git a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png index 02bc921745ecd..6724b2a2d5179 100644 Binary files a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png and b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png differ diff --git a/frontend/src/lib/components/CommandBar/CommandBar.tsx b/frontend/src/lib/components/CommandBar/CommandBar.tsx index fe4b9c2e4555e..5d25486df6862 100644 --- a/frontend/src/lib/components/CommandBar/CommandBar.tsx +++ b/frontend/src/lib/components/CommandBar/CommandBar.tsx @@ -26,7 +26,7 @@ const CommandBarOverlay = forwardRef(fun data-attr="command-bar" className={`w-full ${ barStatus === BarStatus.SHOW_SEARCH && 'h-full' - } bg-bg-3000 rounded overflow-hidden border border-border-bold`} + } w-full bg-bg-3000 rounded overflow-hidden border border-border-bold`} ref={ref} > {children} diff --git a/frontend/src/lib/components/CommandBar/SearchBar.tsx b/frontend/src/lib/components/CommandBar/SearchBar.tsx index 3eba8e50e2aad..d9b27c7e806db 100644 --- a/frontend/src/lib/components/CommandBar/SearchBar.tsx +++ b/frontend/src/lib/components/CommandBar/SearchBar.tsx @@ -13,9 +13,10 @@ export const SearchBar = (): JSX.Element => { const inputRef = useRef(null) return ( -
+
-
+ {/* 49px = height of search input, 40rem = height of search results */} +
diff --git a/frontend/src/lib/components/CommandBar/SearchResult.tsx b/frontend/src/lib/components/CommandBar/SearchResult.tsx index 354470759518e..6bb04fce0693c 100644 --- a/frontend/src/lib/components/CommandBar/SearchResult.tsx +++ b/frontend/src/lib/components/CommandBar/SearchResult.tsx @@ -1,6 +1,8 @@ import { LemonSkeleton } from '@posthog/lemon-ui' import clsx from 'clsx' import { useActions, useValues } from 'kea' +import { TAILWIND_BREAKPOINTS } from 'lib/constants' +import { useWindowSize } from 'lib/hooks/useWindowSize' import { capitalizeFirstLetter } from 'lib/utils' import { useLayoutEffect, useRef } from 'react' import { useSummarizeInsight } from 'scenes/insights/summarizeInsight' @@ -22,10 +24,12 @@ type SearchResultProps = { export const SearchResult = ({ result, resultIndex, focused }: SearchResultProps): JSX.Element => { const { aggregationLabel } = useValues(searchBarLogic) - const { openResult } = useActions(searchBarLogic) + const { setActiveResultIndex, openResult } = useActions(searchBarLogic) const ref = useRef(null) + const { width } = useWindowSize() + useLayoutEffect(() => { if (focused) { // :HACKY: This uses the non-standard scrollIntoViewIfNeeded api @@ -40,27 +44,33 @@ export const SearchResult = ({ result, resultIndex, focused }: SearchResultProps }, [focused]) return ( -
{ - openResult(resultIndex) - }} - ref={ref} - > -
- - {result.type !== 'group' - ? tabToName[result.type] - : `${capitalizeFirstLetter(aggregationLabel(result.extra_fields.group_type_index).plural)}`} - - - - + <> +
{ + if (width && width <= TAILWIND_BREAKPOINTS.md) { + openResult(resultIndex) + } else { + setActiveResultIndex(resultIndex) + } + }} + ref={ref} + > +
+ + {result.type !== 'group' + ? tabToName[result.type] + : `${capitalizeFirstLetter(aggregationLabel(result.extra_fields.group_type_index).plural)}`} + + + + +
-
+ ) } diff --git a/frontend/src/lib/components/CommandBar/SearchResultPreview.tsx b/frontend/src/lib/components/CommandBar/SearchResultPreview.tsx index 498150ebada3a..7fe2a6313bfdd 100644 --- a/frontend/src/lib/components/CommandBar/SearchResultPreview.tsx +++ b/frontend/src/lib/components/CommandBar/SearchResultPreview.tsx @@ -1,11 +1,15 @@ -import { useValues } from 'kea' +import { useActions, useValues } from 'kea' import { ResultDescription, ResultName } from 'lib/components/CommandBar/SearchResult' +import { LemonButton } from 'lib/lemon-ui/LemonButton' + +import { KeyboardShortcut } from '~/layout/navigation-3000/components/KeyboardShortcut' import { tabToName } from './constants' import { searchBarLogic, urlForResult } from './searchBarLogic' export const SearchResultPreview = (): JSX.Element | null => { const { activeResultIndex, combinedSearchResults } = useValues(searchBarLogic) + const { openResult } = useActions(searchBarLogic) if (!combinedSearchResults || combinedSearchResults.length === 0) { return null @@ -14,17 +18,33 @@ export const SearchResultPreview = (): JSX.Element | null => { const result = combinedSearchResults[activeResultIndex] return ( -
-
{tabToName[result.type]}
-
- -
- - {location.host} - {urlForResult(result)} - -
- +
+
+
+
{tabToName[result.type as keyof typeof tabToName]}
+
+ +
+ + {location.host} + {urlForResult(result)} + +
+ +
+
+
+ { + openResult(activeResultIndex) + }} + aria-label="Open search result" + > + Open + +
) diff --git a/frontend/src/lib/components/CommandBar/SearchResults.tsx b/frontend/src/lib/components/CommandBar/SearchResults.tsx index 2dde6f78cbead..3e6abbc35a27d 100644 --- a/frontend/src/lib/components/CommandBar/SearchResults.tsx +++ b/frontend/src/lib/components/CommandBar/SearchResults.tsx @@ -1,6 +1,4 @@ -import clsx from 'clsx' import { useValues } from 'kea' -import { useResizeBreakpoints } from 'lib/hooks/useResizeObserver' import { DetectiveHog } from '../hedgehogs' import { searchBarLogic } from './searchBarLogic' @@ -10,27 +8,17 @@ import { SearchResultPreview } from './SearchResultPreview' export const SearchResults = (): JSX.Element => { const { combinedSearchResults, combinedSearchLoading, activeResultIndex } = useValues(searchBarLogic) - const { ref, size } = useResizeBreakpoints({ - 0: 'small', - 550: 'normal', - }) - return ( -
+ <> {!combinedSearchLoading && combinedSearchResults?.length === 0 ? ( -
+

No results

This doesn't happen often, but we're stumped!

) : ( -
-
+
+
{combinedSearchLoading && ( <> @@ -48,13 +36,11 @@ export const SearchResults = (): JSX.Element => { /> ))}
- {size !== 'small' ? ( -
- -
- ) : null} +
+ +
)} -
+ ) } diff --git a/frontend/src/lib/components/CommandBar/SearchTabs.tsx b/frontend/src/lib/components/CommandBar/SearchTabs.tsx index 37ff41ff30a53..aa3ddb67e8496 100644 --- a/frontend/src/lib/components/CommandBar/SearchTabs.tsx +++ b/frontend/src/lib/components/CommandBar/SearchTabs.tsx @@ -12,11 +12,13 @@ type SearchTabsProps = { export const SearchTabs = ({ inputRef }: SearchTabsProps): JSX.Element | null => { const { tabsGrouped } = useValues(searchBarLogic) return ( -
+
{Object.entries(tabsGrouped).map(([group, tabs]) => (
{group !== 'all' && ( - {groupToName[group]} + + {groupToName[group as keyof typeof groupToName]} + )} {tabs.map((tab) => ( diff --git a/frontend/src/lib/components/CommandBar/index.scss b/frontend/src/lib/components/CommandBar/index.scss index 02aa24cb7a11d..3150d46ed5ada 100644 --- a/frontend/src/lib/components/CommandBar/index.scss +++ b/frontend/src/lib/components/CommandBar/index.scss @@ -16,11 +16,6 @@ } } -.SearchResults { - // offset container height by input - height: calc(100% - 2.875rem); -} - .CommandBar__overlay { position: fixed; top: 0; diff --git a/frontend/src/lib/components/CommandBar/searchBarLogic.ts b/frontend/src/lib/components/CommandBar/searchBarLogic.ts index b3576ac482d10..1b96c64c34b81 100644 --- a/frontend/src/lib/components/CommandBar/searchBarLogic.ts +++ b/frontend/src/lib/components/CommandBar/searchBarLogic.ts @@ -61,6 +61,7 @@ export const searchBarLogic = kea([ onArrowUp: (activeIndex: number, maxIndex: number) => ({ activeIndex, maxIndex }), onArrowDown: (activeIndex: number, maxIndex: number) => ({ activeIndex, maxIndex }), openResult: (index: number) => ({ index }), + setActiveResultIndex: (index: number) => ({ index }), }), loaders(({ values, actions }) => ({ rawSearchResponse: [ @@ -208,6 +209,7 @@ export const searchBarLogic = kea([ openResult: () => 0, onArrowUp: (_, { activeIndex, maxIndex }) => (activeIndex > 0 ? activeIndex - 1 : maxIndex), onArrowDown: (_, { activeIndex, maxIndex }) => (activeIndex < maxIndex ? activeIndex + 1 : 0), + setActiveResultIndex: (_, { index }) => index, }, ], activeTab: [ diff --git a/frontend/src/lib/constants.tsx b/frontend/src/lib/constants.tsx index d41a232518b18..f7168483b3e98 100644 --- a/frontend/src/lib/constants.tsx +++ b/frontend/src/lib/constants.tsx @@ -181,7 +181,6 @@ export const FEATURE_FLAGS = { SQL_EDITOR: 'sql-editor', // owner: @EDsCODE #team-data-warehouse SESSION_REPLAY_DOCTOR: 'session-replay-doctor', // owner: #team-replay SAVED_NOT_PINNED: 'saved-not-pinned', // owner: #team-replay - NEW_EXPERIMENTS_UI: 'new-experiments-ui', // owner: @jurajmajerik #team-feature-success AUDIT_LOGS_ACCESS: 'audit-logs-access', // owner: #team-growth SUBSCRIBE_FROM_PAYGATE: 'subscribe-from-paygate', // owner: #team-growth HEATMAPS_UI: 'heatmaps-ui', // owner: @benjackwhite @@ -236,6 +235,7 @@ export const FEATURE_FLAGS = { EXPERIMENT_STATS_V2: 'experiment-stats-v2', // owner: @danielbachhuber #team-experiments WEB_ANALYTICS_PERIOD_COMPARISON: 'web-analytics-period-comparison', // owner: @rafaeelaudibert #team-web-analytics WEB_ANALYTICS_CONVERSION_GOAL_FILTERS: 'web-analytics-conversion-goal-filters', // owner: @rafaeelaudibert #team-web-analytics + COOKIELESS_SERVER_HASH_MODE_SETTING: 'cookieless-server-hash-mode-setting', // owner: @robbie-c #team-web-analytics } as const export type FeatureFlagKey = (typeof FEATURE_FLAGS)[keyof typeof FEATURE_FLAGS] @@ -314,3 +314,11 @@ export const SESSION_REPLAY_MINIMUM_DURATION_OPTIONS: LemonSelectOptions> & LemonButtonProps +export type MoreProps = Partial> & + LemonButtonProps & { dropdown?: Partial } export function More({ overlay, + dropdown, 'data-attr': dataAttr, placement = 'bottom-end', ...buttonProps @@ -17,11 +19,14 @@ export function More({ aria-label="more" data-attr={dataAttr ?? 'more-button'} icon={} - dropdown={{ - placement: placement, - actionable: true, - overlay, - }} + dropdown={ + { + placement: placement, + actionable: true, + ...dropdown, + overlay, + } as LemonButtonDropdown + } size="small" {...buttonProps} disabled={!overlay} diff --git a/frontend/src/lib/lemon-ui/LemonInput/LemonInput.scss b/frontend/src/lib/lemon-ui/LemonInput/LemonInput.scss index 9f0ecf27a7c4a..fa2ddaea8d2fd 100644 --- a/frontend/src/lib/lemon-ui/LemonInput/LemonInput.scss +++ b/frontend/src/lib/lemon-ui/LemonInput/LemonInput.scss @@ -97,6 +97,7 @@ &.LemonInput--type-search { // NOTE Design: Search inputs are given a specific small width max-width: 240px; + border-radius: 0; } &.LemonInput--type-number { diff --git a/frontend/src/lib/utils/concurrencyController.ts b/frontend/src/lib/utils/concurrencyController.ts index 941af92f33b74..7326165b623a5 100644 --- a/frontend/src/lib/utils/concurrencyController.ts +++ b/frontend/src/lib/utils/concurrencyController.ts @@ -1,5 +1,8 @@ import FastPriorityQueue from 'fastpriorityqueue' import { promiseResolveReject } from 'lib/utils' + +// Note that this file also exists in the plugin-server, please keep them in sync as the tests only exist for this version + class ConcurrencyControllerItem { _debugTag?: string _runFn: () => Promise @@ -8,7 +11,7 @@ class ConcurrencyControllerItem { constructor( concurrencyController: ConcurrencyController, userFn: () => Promise, - abortController: AbortController, + abortController: AbortController | undefined, priority: number = Infinity, debugTag: string | undefined ) { @@ -17,7 +20,7 @@ class ConcurrencyControllerItem { const { promise, resolve, reject } = promiseResolveReject() this._promise = promise this._runFn = async () => { - if (abortController.signal.aborted) { + if (abortController?.signal.aborted) { reject(new FakeAbortError(abortController.signal.reason || 'AbortError')) return } @@ -32,7 +35,7 @@ class ConcurrencyControllerItem { reject(error) } } - abortController.signal.addEventListener('abort', () => { + abortController?.signal.addEventListener('abort', () => { reject(new FakeAbortError(abortController.signal.reason || 'AbortError')) }) promise @@ -76,7 +79,7 @@ export class ConcurrencyController { }: { fn: () => Promise priority?: number - abortController: AbortController + abortController?: AbortController debugTag?: string }): Promise => { const item = new ConcurrencyControllerItem(this, fn, abortController, priority, debugTag) diff --git a/frontend/src/queries/schema.json b/frontend/src/queries/schema.json index 1d2a4d94012aa..005fb78c94497 100644 --- a/frontend/src/queries/schema.json +++ b/frontend/src/queries/schema.json @@ -2132,6 +2132,9 @@ "significant": { "type": "boolean" }, + "stats_version": { + "type": "integer" + }, "timezone": { "type": "string" }, @@ -4511,6 +4514,9 @@ "significant": { "type": "boolean" }, + "stats_version": { + "type": "integer" + }, "variants": { "items": { "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" @@ -5991,6 +5997,9 @@ }, "response": { "$ref": "#/definitions/ExperimentFunnelsQueryResponse" + }, + "stats_version": { + "type": "integer" } }, "required": ["funnels_query", "kind"], @@ -6041,6 +6050,9 @@ "significant": { "type": "boolean" }, + "stats_version": { + "type": "integer" + }, "variants": { "items": { "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" @@ -9787,6 +9799,9 @@ "significant": { "type": "boolean" }, + "stats_version": { + "type": "integer" + }, "variants": { "items": { "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" @@ -10418,6 +10433,9 @@ "significant": { "type": "boolean" }, + "stats_version": { + "type": "integer" + }, "variants": { "items": { "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" diff --git a/frontend/src/queries/schema.ts b/frontend/src/queries/schema.ts index 5360ae06d99f4..9e4252c989b38 100644 --- a/frontend/src/queries/schema.ts +++ b/frontend/src/queries/schema.ts @@ -2003,6 +2003,7 @@ export interface ExperimentFunnelsQueryResponse { significance_code: ExperimentSignificanceCode expected_loss: number credible_intervals: Record + stats_version?: integer } export type CachedExperimentFunnelsQueryResponse = CachedQueryResponse @@ -2012,6 +2013,7 @@ export interface ExperimentFunnelsQuery extends DataNode { diff --git a/frontend/src/scenes/experiments/ExperimentView/CumulativeExposuresChart.tsx b/frontend/src/scenes/experiments/ExperimentView/CumulativeExposuresChart.tsx index 7f4378a7ec5ef..6efdd992f5988 100644 --- a/frontend/src/scenes/experiments/ExperimentView/CumulativeExposuresChart.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/CumulativeExposuresChart.tsx @@ -10,11 +10,11 @@ import { BaseMathType, ChartDisplayType, InsightType, PropertyFilterType, Proper import { experimentLogic } from '../experimentLogic' export function CumulativeExposuresChart(): JSX.Element { - const { experiment, experimentResults, getMetricType } = useValues(experimentLogic) + const { experiment, metricResults, getMetricType } = useValues(experimentLogic) const metricIdx = 0 const metricType = getMetricType(metricIdx) - + const result = metricResults?.[metricIdx] const variants = experiment.parameters?.feature_flag_variants?.map((variant) => variant.key) || [] if (experiment.holdout) { variants.push(`holdout-${experiment.holdout.id}`) @@ -25,7 +25,7 @@ export function CumulativeExposuresChart(): JSX.Element { if (metricType === InsightType.TRENDS) { query = { kind: NodeKind.InsightVizNode, - source: (experimentResults as CachedExperimentTrendsQueryResponse)?.exposure_query || { + source: (result as CachedExperimentTrendsQueryResponse)?.exposure_query || { kind: NodeKind.TrendsQuery, series: [], interval: 'day', diff --git a/frontend/src/scenes/experiments/ExperimentView/DataCollection.tsx b/frontend/src/scenes/experiments/ExperimentView/DataCollection.tsx index 2463e1dd791a5..b22eb57b35d4b 100644 --- a/frontend/src/scenes/experiments/ExperimentView/DataCollection.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/DataCollection.tsx @@ -32,7 +32,7 @@ export function DataCollection(): JSX.Element { const experimentProgressPercent = metricType === InsightType.FUNNELS - ? (funnelResultsPersonsTotal / recommendedSampleSize) * 100 + ? (funnelResultsPersonsTotal(0) / recommendedSampleSize) * 100 : (actualRunningTime / recommendedRunningTime) * 100 const hasHighRunningTime = recommendedRunningTime > 62 @@ -109,7 +109,7 @@ export function DataCollection(): JSX.Element { Saw  - {humanFriendlyNumber(funnelResultsPersonsTotal)} of{' '} + {humanFriendlyNumber(funnelResultsPersonsTotal(0))} of{' '} {humanFriendlyNumber(recommendedSampleSize)}{' '} {' '} {formatUnitByQuantity(recommendedSampleSize, 'participant')} diff --git a/frontend/src/scenes/experiments/ExperimentView/DistributionTable.tsx b/frontend/src/scenes/experiments/ExperimentView/DistributionTable.tsx index b3c2962d95c55..caee718efb726 100644 --- a/frontend/src/scenes/experiments/ExperimentView/DistributionTable.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/DistributionTable.tsx @@ -137,9 +137,11 @@ export function DistributionModal({ experimentId }: { experimentId: Experiment[' export function DistributionTable(): JSX.Element { const { openDistributionModal } = useActions(experimentLogic) - const { experimentId, experiment, experimentResults } = useValues(experimentLogic) + const { experimentId, experiment, metricResults } = useValues(experimentLogic) const { reportExperimentReleaseConditionsViewed } = useActions(experimentLogic) + const result = metricResults?.[0] + const onSelectElement = (variant: string): void => { LemonDialog.open({ title: 'Select a domain', @@ -166,7 +168,7 @@ export function DistributionTable(): JSX.Element { key: 'key', title: 'Variant', render: function Key(_, item): JSX.Element { - if (!experimentResults || !experimentResults.insight) { + if (!result || !result.insight) { return {item.key} } return diff --git a/frontend/src/scenes/experiments/ExperimentView/ExperimentView.tsx b/frontend/src/scenes/experiments/ExperimentView/ExperimentView.tsx index 75aa23d2f6284..9a9ad237665f4 100644 --- a/frontend/src/scenes/experiments/ExperimentView/ExperimentView.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/ExperimentView.tsx @@ -25,9 +25,9 @@ import { Results } from './Results' import { SecondaryMetricsTable } from './SecondaryMetricsTable' const ResultsTab = (): JSX.Element => { - const { experiment, experimentResults, featureFlags } = useValues(experimentLogic) - - const hasResultsInsight = experimentResults && experimentResults.insight + const { experiment, metricResults, featureFlags } = useValues(experimentLogic) + const result = metricResults?.[0] + const hasResultsInsight = result && result.insight return (
@@ -69,12 +69,12 @@ const VariantsTab = (): JSX.Element => { } export function ExperimentView(): JSX.Element { - const { experimentLoading, experimentResultsLoading, experimentId, experimentResults, tabKey, featureFlags } = + const { experimentLoading, metricResultsLoading, experimentId, metricResults, tabKey, featureFlags } = useValues(experimentLogic) const { setTabKey } = useActions(experimentLogic) - - const hasResultsInsight = experimentResults && experimentResults.insight + const result = metricResults?.[0] + const hasResultsInsight = result && result.insight return ( <> @@ -85,7 +85,7 @@ export function ExperimentView(): JSX.Element { ) : ( <> - {experimentResultsLoading ? ( + {metricResultsLoading ? ( ) : ( <> diff --git a/frontend/src/scenes/experiments/ExperimentView/Overview.tsx b/frontend/src/scenes/experiments/ExperimentView/Overview.tsx index 2095309364143..c8c44c8ea5c04 100644 --- a/frontend/src/scenes/experiments/ExperimentView/Overview.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/Overview.tsx @@ -1,17 +1,29 @@ import { useValues } from 'kea' +import { CachedExperimentFunnelsQueryResponse, CachedExperimentTrendsQueryResponse } from '~/queries/schema' + import { experimentLogic } from '../experimentLogic' import { VariantTag } from './components' export function Overview(): JSX.Element { - const { experimentId, experimentResults, getIndexForVariant, getHighestProbabilityVariant, areResultsSignificant } = + const { experimentId, metricResults, getIndexForVariant, getHighestProbabilityVariant, areResultsSignificant } = useValues(experimentLogic) + const result = metricResults?.[0] + if (!result) { + return <> + } + function WinningVariantText(): JSX.Element { - const highestProbabilityVariant = getHighestProbabilityVariant(experimentResults) - const index = getIndexForVariant(experimentResults, highestProbabilityVariant || '') - if (highestProbabilityVariant && index !== null && experimentResults) { - const { probability } = experimentResults + const highestProbabilityVariant = getHighestProbabilityVariant( + result as CachedExperimentFunnelsQueryResponse | CachedExperimentTrendsQueryResponse + ) + const index = getIndexForVariant( + result as CachedExperimentFunnelsQueryResponse | CachedExperimentTrendsQueryResponse, + highestProbabilityVariant || '' + ) + if (highestProbabilityVariant && index !== null && result) { + const { probability } = result return (
@@ -32,7 +44,9 @@ export function Overview(): JSX.Element { return (
Your results are  - {`${areResultsSignificant ? 'significant' : 'not significant'}`}. + + {`${areResultsSignificant(0) ? 'significant' : 'not significant'}`}. +
) } diff --git a/frontend/src/scenes/experiments/ExperimentView/Results.tsx b/frontend/src/scenes/experiments/ExperimentView/Results.tsx index c4e7a4b05ed62..61574de1b3966 100644 --- a/frontend/src/scenes/experiments/ExperimentView/Results.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/Results.tsx @@ -5,13 +5,17 @@ import { ResultsHeader, ResultsQuery } from './components' import { SummaryTable } from './SummaryTable' export function Results(): JSX.Element { - const { experimentResults } = useValues(experimentLogic) + const { metricResults } = useValues(experimentLogic) + const result = metricResults?.[0] + if (!result) { + return <> + } return (
- +
) } diff --git a/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx b/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx index 8369038f00cbb..7f5bcbabfa3a1 100644 --- a/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx @@ -21,7 +21,7 @@ export function SecondaryMetricsTable({ experimentId }: { experimentId: Experime const [modalMetricIdx, setModalMetricIdx] = useState(null) const { - experimentResults, + metricResults, secondaryMetricResultsLoading, experiment, getSecondaryMetricType, @@ -65,7 +65,7 @@ export function SecondaryMetricsTable({ experimentId }: { experimentId: Experime { title:
Variant
, render: function Key(_, item: TabularSecondaryMetricResults): JSX.Element { - if (!experimentResults || !experimentResults.insight) { + if (!metricResults?.[0] || !metricResults?.[0].insight) { return {item.variant} } return ( diff --git a/frontend/src/scenes/experiments/ExperimentView/SummaryTable.tsx b/frontend/src/scenes/experiments/ExperimentView/SummaryTable.tsx index 6150d4e7b7826..adb1bc9d69616 100644 --- a/frontend/src/scenes/experiments/ExperimentView/SummaryTable.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/SummaryTable.tsx @@ -10,7 +10,6 @@ import { urls } from 'scenes/urls' import { FilterLogicalOperator, - FunnelExperimentVariant, InsightType, PropertyFilterType, PropertyOperator, @@ -27,7 +26,7 @@ export function SummaryTable(): JSX.Element { const { experimentId, experiment, - experimentResults, + metricResults, tabularExperimentResults, getMetricType, exposureCountDataForVariant, @@ -38,14 +37,14 @@ export function SummaryTable(): JSX.Element { credibleIntervalForVariant, } = useValues(experimentLogic) const metricType = getMetricType(0) - - if (!experimentResults) { + const result = metricResults?.[0] + if (!result) { return <> } - const winningVariant = getHighestProbabilityVariant(experimentResults) + const winningVariant = getHighestProbabilityVariant(result) - const columns: LemonTableColumns = [ + const columns: LemonTableColumns = [ { key: 'variants', title: 'Variant', @@ -64,14 +63,14 @@ export function SummaryTable(): JSX.Element { key: 'counts', title: (
- {experimentResults.insight?.[0] && 'action' in experimentResults.insight[0] && ( - + {result.insight?.[0] && 'action' in result.insight[0] && ( + )} {experimentMathAggregationForTrends() ? 'metric' : 'count'}
), render: function Key(_, variant): JSX.Element { - const count = countDataForVariant(experimentResults, variant.key) + const count = countDataForVariant(result, variant.key) if (!count) { return <>— } @@ -83,7 +82,7 @@ export function SummaryTable(): JSX.Element { key: 'exposure', title: 'Exposure', render: function Key(_, variant): JSX.Element { - const exposure = exposureCountDataForVariant(experimentResults, variant.key) + const exposure = exposureCountDataForVariant(result, variant.key) if (!exposure) { return <>— } @@ -120,7 +119,7 @@ export function SummaryTable(): JSX.Element { return Baseline } - const controlVariant = (experimentResults.variants as TrendExperimentVariant[]).find( + const controlVariant = (result.variants as TrendExperimentVariant[]).find( ({ key }) => key === 'control' ) as TrendExperimentVariant @@ -161,7 +160,7 @@ export function SummaryTable(): JSX.Element { return Baseline } - const credibleInterval = credibleIntervalForVariant(experimentResults || null, variant.key, metricType) + const credibleInterval = credibleIntervalForVariant(result || null, variant.key, metricType) if (!credibleInterval) { return <>— } @@ -181,7 +180,7 @@ export function SummaryTable(): JSX.Element { key: 'conversionRate', title: 'Conversion rate', render: function Key(_, item): JSX.Element { - const conversionRate = conversionRateForVariant(experimentResults, item.key) + const conversionRate = conversionRateForVariant(result, item.key) if (!conversionRate) { return <>— } @@ -204,8 +203,8 @@ export function SummaryTable(): JSX.Element { return Baseline } - const controlConversionRate = conversionRateForVariant(experimentResults, 'control') - const variantConversionRate = conversionRateForVariant(experimentResults, item.key) + const controlConversionRate = conversionRateForVariant(result, 'control') + const variantConversionRate = conversionRateForVariant(result, item.key) if (!controlConversionRate || !variantConversionRate) { return <>— @@ -235,7 +234,7 @@ export function SummaryTable(): JSX.Element { return Baseline } - const credibleInterval = credibleIntervalForVariant(experimentResults || null, item.key, metricType) + const credibleInterval = credibleIntervalForVariant(result || null, item.key, metricType) if (!credibleInterval) { return <>— } @@ -254,15 +253,13 @@ export function SummaryTable(): JSX.Element { key: 'winProbability', title: 'Win probability', sorter: (a, b) => { - const aPercentage = (experimentResults?.probability?.[a.key] || 0) * 100 - const bPercentage = (experimentResults?.probability?.[b.key] || 0) * 100 + const aPercentage = (result?.probability?.[a.key] || 0) * 100 + const bPercentage = (result?.probability?.[b.key] || 0) * 100 return aPercentage - bPercentage }, render: function Key(_, item): JSX.Element { const variantKey = item.key - const percentage = - experimentResults?.probability?.[variantKey] != undefined && - experimentResults.probability?.[variantKey] * 100 + const percentage = result?.probability?.[variantKey] != undefined && result.probability?.[variantKey] * 100 const isWinning = variantKey === winningVariant return ( @@ -351,7 +348,7 @@ export function SummaryTable(): JSX.Element { return (
- +
) } diff --git a/frontend/src/scenes/experiments/ExperimentView/components.tsx b/frontend/src/scenes/experiments/ExperimentView/components.tsx index 9e5bfb7c2c4b0..87885a3761b79 100644 --- a/frontend/src/scenes/experiments/ExperimentView/components.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/components.tsx @@ -62,7 +62,11 @@ export function VariantTag({ muted?: boolean fontSize?: number }): JSX.Element { - const { experiment, experimentResults, getIndexForVariant } = useValues(experimentLogic({ experimentId })) + const { experiment, getIndexForVariant, metricResults } = useValues(experimentLogic({ experimentId })) + + if (!metricResults) { + return <> + } if (experiment.holdout && variantKey === `holdout-${experiment.holdout_id}`) { return ( @@ -71,7 +75,7 @@ export function VariantTag({ className="w-2 h-2 rounded-full mr-0.5" // eslint-disable-next-line react/forbid-dom-props style={{ - backgroundColor: getExperimentInsightColour(getIndexForVariant(experimentResults, variantKey)), + backgroundColor: getExperimentInsightColour(getIndexForVariant(metricResults[0], variantKey)), }} /> {experiment.holdout.name} @@ -85,7 +89,7 @@ export function VariantTag({ className="w-2 h-2 rounded-full mr-0.5" // eslint-disable-next-line react/forbid-dom-props style={{ - backgroundColor: getExperimentInsightColour(getIndexForVariant(experimentResults, variantKey)), + backgroundColor: getExperimentInsightColour(getIndexForVariant(metricResults[0], variantKey)), }} /> + {result.label} @@ -205,8 +209,15 @@ export function ResultsQuery({ ) } -export function ExploreButton({ icon = }: { icon?: JSX.Element }): JSX.Element { - const { experimentResults, experiment, featureFlags } = useValues(experimentLogic) +export function ExploreButton({ + icon = , + metricIndex = 0, +}: { + icon?: JSX.Element + metricIndex?: number +}): JSX.Element { + const { metricResults, experiment, featureFlags } = useValues(experimentLogic) + const result = metricResults?.[metricIndex] // keep in sync with https://github.com/PostHog/posthog/blob/master/ee/clickhouse/queries/experiments/funnel_experiment_result.py#L71 // :TRICKY: In the case of no results, we still want users to explore the query, so they can debug further. @@ -223,7 +234,7 @@ export function ExploreButton({ icon = }: { icon?: JSX.Element let query: InsightVizNode if (featureFlags[FEATURE_FLAGS.EXPERIMENTS_HOGQL]) { - const newQueryResults = experimentResults as unknown as + const newQueryResults = result as unknown as | CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse @@ -237,7 +248,7 @@ export function ExploreButton({ icon = }: { icon?: JSX.Element source: source as InsightQueryNode, } } else { - const oldQueryResults = experimentResults as ExperimentResults['result'] + const oldQueryResults = result as unknown as ExperimentResults['result'] if (!oldQueryResults?.filters) { return <> @@ -272,7 +283,9 @@ export function ExploreButton({ icon = }: { icon?: JSX.Element } export function ResultsHeader(): JSX.Element { - const { experimentResults } = useValues(experimentLogic) + const { metricResults } = useValues(experimentLogic) + + const result = metricResults?.[0] return (
@@ -284,16 +297,17 @@ export function ResultsHeader(): JSX.Element {
-
{experimentResults && }
+
{result && }
) } -export function NoResultsEmptyState(): JSX.Element { +export function NoResultsEmptyState({ metricIndex = 0 }: { metricIndex?: number }): JSX.Element { type ErrorCode = 'no-events' | 'no-flag-info' | 'no-control-variant' | 'no-test-variant' - const { experimentResultsLoading, experimentResultCalculationError } = useValues(experimentLogic) + const { metricResultsLoading, primaryMetricsResultErrors } = useValues(experimentLogic) + const metricError = primaryMetricsResultErrors?.[metricIndex] function ChecklistItem({ errorCode, value }: { errorCode: ErrorCode; value: boolean }): JSX.Element { const failureText = { @@ -327,28 +341,25 @@ export function NoResultsEmptyState(): JSX.Element { ) } - if (experimentResultsLoading) { + if (metricResultsLoading) { return <> } // Validation errors return 400 and are rendered as a checklist - if (experimentResultCalculationError?.statusCode === 400) { - let parsedDetail: Record - try { - parsedDetail = JSON.parse(experimentResultCalculationError.detail) - } catch (error) { + if (metricError?.statusCode === 400) { + if (!metricError.hasDiagnostics) { return (
Experiment results could not be calculated
-
{experimentResultCalculationError.detail}
+
{metricError.detail}
) } const checklistItems = [] - for (const [errorCode, value] of Object.entries(parsedDetail)) { + for (const [errorCode, value] of Object.entries(metricError.detail as Record)) { checklistItems.push() } @@ -377,14 +388,14 @@ export function NoResultsEmptyState(): JSX.Element { ) } - if (experimentResultCalculationError?.statusCode === 504) { + if (metricError?.statusCode === 504) { return (

Experiment results timed out

- {!!experimentResultCalculationError && ( + {!!metricError && (
This may occur when the experiment has a large amount of data or is particularly complex. We are actively working on fixing this. In the meantime, please try refreshing @@ -404,11 +415,7 @@ export function NoResultsEmptyState(): JSX.Element {

Experiment results could not be calculated

- {!!experimentResultCalculationError && ( -
- {experimentResultCalculationError.detail} -
- )} + {!!metricError &&
{metricError.detail}
}
@@ -464,7 +471,7 @@ export function PageHeaderCustom(): JSX.Element { launchExperiment, endExperiment, archiveExperiment, - loadExperimentResults, + loadMetricResults, loadSecondaryMetricResults, createExposureCohort, openShipVariantModal, @@ -505,7 +512,7 @@ export function PageHeaderCustom(): JSX.Element { {exposureCohortId ? 'View' : 'Create'} exposure cohort loadExperimentResults(true)} + onClick={() => loadMetricResults(true)} fullWidth data-attr="refresh-experiment" > @@ -588,7 +595,7 @@ export function PageHeaderCustom(): JSX.Element {
)} {featureFlags[FEATURE_FLAGS.EXPERIMENT_MAKE_DECISION] && - areResultsSignificant && + areResultsSignificant(0) && !isSingleVariantShipped && ( <> @@ -615,7 +622,7 @@ export function ShipVariantModal({ experimentId }: { experimentId: Experiment['i const { aggregationLabel } = useValues(groupsModel) const [selectedVariantKey, setSelectedVariantKey] = useState() - useEffect(() => setSelectedVariantKey(sortedWinProbabilities[0]?.key), [sortedWinProbabilities]) + useEffect(() => setSelectedVariantKey(sortedWinProbabilities(0)[0]?.key), [sortedWinProbabilities(0)]) const aggregationTargetName = experiment.filters.aggregation_group_type_index != null @@ -656,12 +663,12 @@ export function ShipVariantModal({ experimentId }: { experimentId: Experiment['i data-attr="metrics-selector" value={selectedVariantKey} onChange={(variantKey) => setSelectedVariantKey(variantKey)} - options={sortedWinProbabilities.map(({ key }) => ({ + options={sortedWinProbabilities(0).map(({ key }) => ({ value: key, label: (
- {key === sortedWinProbabilities[0]?.key && ( + {key === sortedWinProbabilities(0)[0]?.key && ( Winning @@ -693,9 +700,9 @@ export function ActionBanner(): JSX.Element { const { experiment, getMetricType, - experimentResults, + metricResults, experimentLoading, - experimentResultsLoading, + metricResultsLoading, isExperimentRunning, areResultsSignificant, isExperimentStopped, @@ -706,6 +713,7 @@ export function ActionBanner(): JSX.Element { featureFlags, } = useValues(experimentLogic) + const result = metricResults?.[0] const { archiveExperiment } = useActions(experimentLogic) const { aggregationLabel } = useValues(groupsModel) @@ -720,7 +728,7 @@ export function ActionBanner(): JSX.Element { const recommendedRunningTime = experiment?.parameters?.recommended_running_time || 1 const recommendedSampleSize = experiment?.parameters?.recommended_sample_size || 100 - if (!experiment || experimentLoading || experimentResultsLoading) { + if (!experiment || experimentLoading || metricResultsLoading) { return <> } @@ -766,12 +774,12 @@ export function ActionBanner(): JSX.Element { } // Running, results present, not significant - if (isExperimentRunning && experimentResults && !isExperimentStopped && !areResultsSignificant) { + if (isExperimentRunning && result && !isExperimentStopped && !areResultsSignificant(0)) { // Results insignificant, but a large enough sample/running time has been achieved // Further collection unlikely to change the result -> recommmend cutting the losses if ( metricType === InsightType.FUNNELS && - funnelResultsPersonsTotal > Math.max(recommendedSampleSize, 500) && + funnelResultsPersonsTotal(0) > Math.max(recommendedSampleSize, 500) && dayjs().diff(experiment.start_date, 'day') > 2 // at least 2 days running ) { return ( @@ -800,9 +808,9 @@ export function ActionBanner(): JSX.Element { } // Running, results significant - if (isExperimentRunning && !isExperimentStopped && areResultsSignificant && experimentResults) { - const { probability } = experimentResults - const winningVariant = getHighestProbabilityVariant(experimentResults) + if (isExperimentRunning && !isExperimentStopped && areResultsSignificant(0) && result) { + const { probability } = result + const winningVariant = getHighestProbabilityVariant(result) if (!winningVariant) { return <> } @@ -812,7 +820,7 @@ export function ActionBanner(): JSX.Element { // Win probability only slightly over 0.9 and the recommended sample/time just met -> proceed with caution if ( metricType === InsightType.FUNNELS && - funnelResultsPersonsTotal < recommendedSampleSize + 50 && + funnelResultsPersonsTotal(0) < recommendedSampleSize + 50 && winProbability < 0.93 ) { return ( @@ -848,7 +856,7 @@ export function ActionBanner(): JSX.Element { } // Stopped, results significant - if (isExperimentStopped && areResultsSignificant) { + if (isExperimentStopped && areResultsSignificant(0)) { return ( You have stopped this experiment, and it is no longer collecting data. With significant results in hand, @@ -866,7 +874,7 @@ export function ActionBanner(): JSX.Element { } // Stopped, results not significant - if (isExperimentStopped && experimentResults && !areResultsSignificant) { + if (isExperimentStopped && result && !areResultsSignificant(0)) { return ( You have stopped this experiment, and it is no longer collecting data. Because your results are not diff --git a/frontend/src/scenes/experiments/experimentLogic.tsx b/frontend/src/scenes/experiments/experimentLogic.tsx index 698c4182dc84d..42f6b2cd37652 100644 --- a/frontend/src/scenes/experiments/experimentLogic.tsx +++ b/frontend/src/scenes/experiments/experimentLogic.tsx @@ -1,4 +1,3 @@ -import { IconInfo } from '@posthog/icons' import { actions, connect, kea, key, listeners, path, props, reducers, selectors } from 'kea' import { forms } from 'kea-forms' import { loaders } from 'kea-loaders' @@ -8,11 +7,9 @@ import { EXPERIMENT_DEFAULT_DURATION, FunnelLayout } from 'lib/constants' import { FEATURE_FLAGS } from 'lib/constants' import { dayjs } from 'lib/dayjs' import { lemonToast } from 'lib/lemon-ui/LemonToast/LemonToast' -import { Tooltip } from 'lib/lemon-ui/Tooltip' import { featureFlagLogic } from 'lib/logic/featureFlagLogic' import { hasFormErrors, toParams } from 'lib/utils' import { eventUsageLogic } from 'lib/utils/eventUsageLogic' -import { ReactElement } from 'react' import { validateFeatureFlagKey } from 'scenes/feature-flags/featureFlagLogic' import { funnelDataLogic } from 'scenes/funnels/funnelDataLogic' import { insightDataLogic } from 'scenes/insights/insightDataLogic' @@ -31,6 +28,7 @@ import { CachedExperimentFunnelsQueryResponse, CachedExperimentTrendsQueryResponse, ExperimentFunnelsQuery, + ExperimentSignificanceCode, ExperimentTrendsQuery, NodeKind, } from '~/queries/schema' @@ -55,7 +53,6 @@ import { ProductKey, PropertyMathType, SecondaryMetricResults, - SignificanceCode, TrendExperimentVariant, TrendResult, TrendsFilterType, @@ -177,7 +174,6 @@ export const experimentLogic = kea([ setExperimentType: (type?: string) => ({ type }), removeExperimentGroup: (idx: number) => ({ idx }), setEditExperiment: (editing: boolean) => ({ editing }), - setExperimentResultCalculationError: (error: ExperimentResultCalculationError) => ({ error }), setFlagImplementationWarning: (warning: boolean) => ({ warning }), setExposureAndSampleSize: (exposure: number, sampleSize: number) => ({ exposure, sampleSize }), updateExperimentGoal: (filters: Partial) => ({ filters }), @@ -420,12 +416,6 @@ export const experimentLogic = kea([ setEditExperiment: (_, { editing }) => editing, }, ], - experimentResultCalculationError: [ - null as ExperimentResultCalculationError | null, - { - setExperimentResultCalculationError: (_, { error }) => error, - }, - ], flagImplementationWarning: [ false as boolean, { @@ -597,10 +587,7 @@ export const experimentLogic = kea([ experiment && actions.reportExperimentViewed(experiment) if (experiment?.start_date) { - actions.loadExperimentResults() - if (values.featureFlags[FEATURE_FLAGS.EXPERIMENTS_MULTIPLE_METRICS]) { - actions.loadMetricResults() - } + actions.loadMetricResults() actions.loadSecondaryMetricResults() } }, @@ -618,7 +605,7 @@ export const experimentLogic = kea([ actions.updateExperiment({ end_date: endDate.toISOString() }) const duration = endDate.diff(values.experiment?.start_date, 'second') values.experiment && - actions.reportExperimentCompleted(values.experiment, endDate, duration, values.areResultsSignificant) + actions.reportExperimentCompleted(values.experiment, endDate, duration, values.areResultsSignificant(0)) }, archiveExperiment: async () => { actions.updateExperiment({ archived: true }) @@ -683,13 +670,11 @@ export const experimentLogic = kea([ resetRunningExperiment: async () => { actions.updateExperiment({ start_date: null, end_date: null, archived: false }) values.experiment && actions.reportExperimentReset(values.experiment) - - actions.loadExperimentResultsSuccess(null) actions.loadSecondaryMetricResultsSuccess([]) }, updateExperimentSuccess: async ({ experiment }) => { actions.updateExperiments(experiment) - actions.loadExperimentResults() + actions.loadMetricResults() actions.loadSecondaryMetricResults() }, setExperiment: async ({ experiment }) => { @@ -838,69 +823,6 @@ export const experimentLogic = kea([ return response }, }, - experimentResults: [ - null as - | ExperimentResults['result'] - | CachedExperimentTrendsQueryResponse - | CachedExperimentFunnelsQueryResponse - | null, - { - loadExperimentResults: async ( - refresh?: boolean - ): Promise< - | ExperimentResults['result'] - | CachedExperimentTrendsQueryResponse - | CachedExperimentFunnelsQueryResponse - | null - > => { - try { - // :FLAG: CLEAN UP AFTER MIGRATION - if (values.featureFlags[FEATURE_FLAGS.EXPERIMENTS_HOGQL]) { - // Queries are shareable, so we need to set the experiment_id for the backend to correctly associate the query with the experiment - const queryWithExperimentId = { - ...values.experiment.metrics[0], - experiment_id: values.experimentId, - } - if ( - queryWithExperimentId.kind === NodeKind.ExperimentTrendsQuery && - values.featureFlags[FEATURE_FLAGS.EXPERIMENT_STATS_V2] - ) { - queryWithExperimentId.stats_version = 2 - } - - const response = await performQuery(queryWithExperimentId, undefined, refresh) - - return { - ...response, - fakeInsightId: Math.random().toString(36).substring(2, 15), - } as unknown as CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse - } - - const refreshParam = refresh ? '?refresh=true' : '' - const response: ExperimentResults = await api.get( - `api/projects/${values.currentProjectId}/experiments/${values.experimentId}/results${refreshParam}` - ) - return { - ...response.result, - fakeInsightId: Math.random().toString(36).substring(2, 15), - last_refresh: response.last_refresh, - } - } catch (error: any) { - let errorDetail = error.detail - // :HANDLE FLAG: CLEAN UP AFTER MIGRATION - if (values.featureFlags[FEATURE_FLAGS.EXPERIMENTS_HOGQL]) { - const errorDetailMatch = error.detail.match(/\{.*\}/) - errorDetail = errorDetailMatch ? errorDetailMatch[0] : error.detail - } - actions.setExperimentResultCalculationError({ detail: errorDetail, statusCode: error.status }) - if (error.status === 504) { - actions.reportExperimentResultsLoadingTimeout(values.experimentId) - } - return null - } - }, - }, - ], metricResults: [ null as (CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse | null)[] | null, { @@ -1187,83 +1109,40 @@ export const experimentLogic = kea([ }, ], areResultsSignificant: [ - (s) => [s.experimentResults], - (experimentResults): boolean => { - return experimentResults?.significant || false - }, - ], - // TODO: remove with the old UI - significanceBannerDetails: [ - (s) => [s.experimentResults], - (experimentResults): string | ReactElement => { - if (experimentResults?.significance_code === SignificanceCode.HighLoss) { - return ( - <> - This is because the expected loss in conversion is greater than 1% - Current value is {((experimentResults?.expected_loss || 0) * 100)?.toFixed(2)}% - } - > - - - . - - ) - } - - if (experimentResults?.significance_code === SignificanceCode.HighPValue) { - return ( - <> - This is because the p value is greater than 0.05 - Current value is {experimentResults?.p_value?.toFixed(3) || 1}.} - > - - - . - - ) - } - - if (experimentResults?.significance_code === SignificanceCode.LowWinProbability) { - return 'This is because the win probability of all test variants combined is less than 90%.' - } - - if (experimentResults?.significance_code === SignificanceCode.NotEnoughExposure) { - return 'This is because we need at least 100 people per variant to declare significance.' - } - - return '' - }, + (s) => [s.metricResults], + (metricResults: (CachedExperimentFunnelsQueryResponse | CachedExperimentTrendsQueryResponse | null)[]) => + (metricIndex: number = 0): boolean => { + return metricResults?.[metricIndex]?.significant || false + }, ], significanceDetails: [ - (s) => [s.experimentResults], - (experimentResults): string => { - if (experimentResults?.significance_code === SignificanceCode.HighLoss) { - return `This is because the expected loss in conversion is greater than 1% (current value is ${( - (experimentResults?.expected_loss || 0) * 100 - )?.toFixed(2)}%).` - } + (s) => [s.metricResults], + (metricResults: (CachedExperimentFunnelsQueryResponse | CachedExperimentTrendsQueryResponse | null)[]) => + (metricIndex: number = 0): string => { + const results = metricResults?.[metricIndex] + + if (results?.significance_code === ExperimentSignificanceCode.HighLoss) { + return `This is because the expected loss in conversion is greater than 1% (current value is ${( + (results as CachedExperimentFunnelsQueryResponse)?.expected_loss || 0 + )?.toFixed(2)}%).` + } - if (experimentResults?.significance_code === SignificanceCode.HighPValue) { - return `This is because the p value is greater than 0.05 (current value is ${ - experimentResults?.p_value?.toFixed(3) || 1 - }).` - } + if (results?.significance_code === ExperimentSignificanceCode.HighPValue) { + return `This is because the p value is greater than 0.05 (current value is ${ + (results as CachedExperimentTrendsQueryResponse)?.p_value?.toFixed(3) || 1 + }).` + } - if (experimentResults?.significance_code === SignificanceCode.LowWinProbability) { - return 'This is because the win probability of all test variants combined is less than 90%.' - } + if (results?.significance_code === ExperimentSignificanceCode.LowWinProbability) { + return 'This is because the win probability of all test variants combined is less than 90%.' + } - if (experimentResults?.significance_code === SignificanceCode.NotEnoughExposure) { - return 'This is because we need at least 100 people per variant to declare significance.' - } + if (results?.significance_code === ExperimentSignificanceCode.NotEnoughExposure) { + return 'This is because we need at least 100 people per variant to declare significance.' + } - return '' - }, + return '' + }, ], recommendedSampleSize: [ (s) => [s.conversionMetrics, s.minimumSampleSizePerVariant, s.variants], @@ -1353,17 +1232,17 @@ export const experimentLogic = kea([ () => [], () => ( - experimentResults: + metricResult: | Partial | CachedExperimentFunnelsQueryResponse | CachedExperimentTrendsQueryResponse | null, variantKey: string ): number | null => { - if (!experimentResults || !experimentResults.insight) { + if (!metricResult || !metricResult.insight) { return null } - const variantResults = (experimentResults.insight as FunnelStep[][]).find( + const variantResults = (metricResult.insight as FunnelStep[][]).find( (variantFunnel: FunnelStep[]) => { const breakdownValue = variantFunnel[0]?.breakdown_value return Array.isArray(breakdownValue) && breakdownValue[0] === variantKey @@ -1380,7 +1259,7 @@ export const experimentLogic = kea([ () => [], () => ( - experimentResults: + metricResult: | Partial | CachedSecondaryMetricExperimentFunnelsQueryResponse | CachedSecondaryMetricExperimentTrendsQueryResponse @@ -1388,13 +1267,13 @@ export const experimentLogic = kea([ variantKey: string, metricType: InsightType ): [number, number] | null => { - const credibleInterval = experimentResults?.credible_intervals?.[variantKey] + const credibleInterval = metricResult?.credible_intervals?.[variantKey] if (!credibleInterval) { return null } if (metricType === InsightType.FUNNELS) { - const controlVariant = (experimentResults.variants as FunnelExperimentVariant[]).find( + const controlVariant = (metricResult.variants as FunnelExperimentVariant[]).find( ({ key }) => key === 'control' ) as FunnelExperimentVariant const controlConversionRate = @@ -1411,7 +1290,7 @@ export const experimentLogic = kea([ return [lowerBound, upperBound] } - const controlVariant = (experimentResults.variants as TrendExperimentVariant[]).find( + const controlVariant = (metricResult.variants as TrendExperimentVariant[]).find( ({ key }) => key === 'control' ) as TrendExperimentVariant @@ -1428,7 +1307,7 @@ export const experimentLogic = kea([ (s) => [s.getMetricType], (getMetricType) => ( - experimentResults: + metricResult: | Partial | CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse @@ -1437,14 +1316,14 @@ export const experimentLogic = kea([ ): number | null => { // Ensures we get the right index from results, so the UI can // display the right colour for the variant - if (!experimentResults || !experimentResults.insight) { + if (!metricResult || !metricResult.insight) { return null } let index = -1 if (getMetricType(0) === InsightType.FUNNELS) { // Funnel Insight is displayed in order of decreasing count - index = (Array.isArray(experimentResults.insight) ? [...experimentResults.insight] : []) + index = (Array.isArray(metricResult.insight) ? [...metricResult.insight] : []) .sort((a, b) => { const aCount = (a && Array.isArray(a) && a[0]?.count) || 0 const bCount = (b && Array.isArray(b) && b[0]?.count) || 0 @@ -1458,7 +1337,7 @@ export const experimentLogic = kea([ return Array.isArray(breakdownValue) && breakdownValue[0] === variant }) } else { - index = (experimentResults.insight as TrendResult[]).findIndex( + index = (metricResult.insight as TrendResult[]).findIndex( (variantTrend: TrendResult) => variantTrend.breakdown_value === variant ) } @@ -1474,7 +1353,7 @@ export const experimentLogic = kea([ (s) => [s.experimentMathAggregationForTrends], (experimentMathAggregationForTrends) => ( - experimentResults: + metricResult: | Partial | CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse @@ -1483,10 +1362,10 @@ export const experimentLogic = kea([ type: 'primary' | 'secondary' = 'primary' ): number | null => { const usingMathAggregationType = type === 'primary' ? experimentMathAggregationForTrends() : false - if (!experimentResults || !experimentResults.insight) { + if (!metricResult || !metricResult.insight) { return null } - const variantResults = (experimentResults.insight as TrendResult[]).find( + const variantResults = (metricResult.insight as TrendResult[]).find( (variantTrend: TrendResult) => variantTrend.breakdown_value === variant ) if (!variantResults) { @@ -1524,17 +1403,17 @@ export const experimentLogic = kea([ () => [], () => ( - experimentResults: + metricResult: | Partial | CachedExperimentTrendsQueryResponse | CachedExperimentFunnelsQueryResponse | null, variant: string ): number | null => { - if (!experimentResults || !experimentResults.variants) { + if (!metricResult || !metricResult.variants) { return null } - const variantResults = (experimentResults.variants as TrendExperimentVariant[]).find( + const variantResults = (metricResult.variants as TrendExperimentVariant[]).find( (variantTrend: TrendExperimentVariant) => variantTrend.key === variant ) if (!variantResults || !variantResults.absolute_exposure) { @@ -1564,58 +1443,50 @@ export const experimentLogic = kea([ } }, ], - sortedExperimentResultVariants: [ - (s) => [s.experimentResults, s.experiment], - (experimentResults, experiment): string[] => { - if (experimentResults) { - const sortedResults = Object.keys(experimentResults.probability).sort( - (a, b) => experimentResults.probability[b] - experimentResults.probability[a] - ) - - experiment?.parameters?.feature_flag_variants?.forEach((variant) => { - if (!sortedResults.includes(variant.key)) { - sortedResults.push(variant.key) - } - }) - return sortedResults - } - return [] - }, - ], tabularExperimentResults: [ - (s) => [s.experiment, s.experimentResults, s.getMetricType], - (experiment, experimentResults, getMetricType): any => { - const tabularResults = [] - const metricType = getMetricType(0) - - if (experimentResults) { - for (const variantObj of experimentResults.variants) { - if (metricType === InsightType.FUNNELS) { - const { key, success_count, failure_count } = variantObj as FunnelExperimentVariant - tabularResults.push({ key, success_count, failure_count }) - } else if (metricType === InsightType.TRENDS) { - const { key, count, exposure, absolute_exposure } = variantObj as TrendExperimentVariant - tabularResults.push({ key, count, exposure, absolute_exposure }) + (s) => [s.experiment, s.metricResults, s.getMetricType], + ( + experiment, + metricResults: ( + | CachedExperimentFunnelsQueryResponse + | CachedExperimentTrendsQueryResponse + | null + )[], + getMetricType + ) => + (metricIndex: number = 0): any[] => { + const tabularResults = [] + const metricType = getMetricType(metricIndex) + const result = metricResults?.[metricIndex] + + if (result) { + for (const variantObj of result.variants) { + if (metricType === InsightType.FUNNELS) { + const { key, success_count, failure_count } = variantObj as FunnelExperimentVariant + tabularResults.push({ key, success_count, failure_count }) + } else if (metricType === InsightType.TRENDS) { + const { key, count, exposure, absolute_exposure } = variantObj as TrendExperimentVariant + tabularResults.push({ key, count, exposure, absolute_exposure }) + } } } - } - if (experiment.feature_flag?.filters.multivariate?.variants) { - for (const { key } of experiment.feature_flag.filters.multivariate.variants) { - if (tabularResults.find((variantObj) => variantObj.key === key)) { - continue - } + if (experiment.feature_flag?.filters.multivariate?.variants) { + for (const { key } of experiment.feature_flag.filters.multivariate.variants) { + if (tabularResults.find((variantObj) => variantObj.key === key)) { + continue + } - if (metricType === InsightType.FUNNELS) { - tabularResults.push({ key, success_count: null, failure_count: null }) - } else if (metricType === InsightType.TRENDS) { - tabularResults.push({ key, count: null, exposure: null, absolute_exposure: null }) + if (metricType === InsightType.FUNNELS) { + tabularResults.push({ key, success_count: null, failure_count: null }) + } else if (metricType === InsightType.TRENDS) { + tabularResults.push({ key, count: null, exposure: null, absolute_exposure: null }) + } } } - } - return tabularResults - }, + return tabularResults + }, ], tabularSecondaryMetricResults: [ (s) => [s.experiment, s.secondaryMetricResults, s.conversionRateForVariant, s.countDataForVariant], @@ -1655,39 +1526,56 @@ export const experimentLogic = kea([ }, ], sortedWinProbabilities: [ - (s) => [s.experimentResults, s.conversionRateForVariant], + (s) => [s.metricResults, s.conversionRateForVariant], ( - experimentResults, - conversionRateForVariant - ): { key: string; winProbability: number; conversionRate: number | null }[] => { - if (!experimentResults) { - return [] - } + metricResults: ( + | CachedExperimentFunnelsQueryResponse + | CachedExperimentTrendsQueryResponse + | null + )[], + conversionRateForVariant + ) => + (metricIndex: number = 0) => { + const result = metricResults?.[metricIndex] + + if (!result || !result.probability) { + return [] + } - return Object.keys(experimentResults.probability) - .map((key) => ({ - key, - winProbability: experimentResults.probability[key], - conversionRate: conversionRateForVariant(experimentResults, key), - })) - .sort((a, b) => b.winProbability - a.winProbability) - }, + return Object.keys(result.probability) + .map((key) => ({ + key, + winProbability: result.probability[key], + conversionRate: conversionRateForVariant(result, key), + })) + .sort((a, b) => b.winProbability - a.winProbability) + }, ], funnelResultsPersonsTotal: [ - (s) => [s.experimentResults, s.getMetricType], - (experimentResults, getMetricType): number => { - if (getMetricType(0) !== InsightType.FUNNELS || !experimentResults?.insight) { - return 0 - } + (s) => [s.metricResults, s.getMetricType], + ( + metricResults: ( + | CachedExperimentFunnelsQueryResponse + | CachedExperimentTrendsQueryResponse + | null + )[], + getMetricType + ) => + (metricIndex: number = 0): number => { + const result = metricResults?.[metricIndex] - let sum = 0 - experimentResults.insight.forEach((variantResult) => { - if (variantResult[0]?.count) { - sum += variantResult[0].count + if (getMetricType(metricIndex) !== InsightType.FUNNELS || !result?.insight) { + return 0 } - }) - return sum - }, + + let sum = 0 + result.insight.forEach((variantResult) => { + if (variantResult[0]?.count) { + sum += variantResult[0].count + } + }) + return sum + }, ], actualRunningTime: [ (s) => [s.experiment], diff --git a/frontend/src/scenes/insights/insightDataLogic.tsx b/frontend/src/scenes/insights/insightDataLogic.tsx index 1de631587cf35..b5036267488b0 100644 --- a/frontend/src/scenes/insights/insightDataLogic.tsx +++ b/frontend/src/scenes/insights/insightDataLogic.tsx @@ -219,6 +219,7 @@ export const insightDataLogic = kea([ if (isQueryTooLarge(query)) { localStorage.removeItem(`draft-query-${values.currentTeamId}`) } + localStorage.setItem( `draft-query-${values.currentTeamId}`, crushDraftQueryForLocalStorage(query, Date.now()) diff --git a/frontend/src/scenes/insights/utils.tsx b/frontend/src/scenes/insights/utils.tsx index 5a1b4d56ec7d9..04e55519ab05b 100644 --- a/frontend/src/scenes/insights/utils.tsx +++ b/frontend/src/scenes/insights/utils.tsx @@ -1,4 +1,3 @@ -import JSONCrush from 'jsoncrush' import api from 'lib/api' import { dayjs } from 'lib/dayjs' import { CORE_FILTER_DEFINITIONS_BY_GROUP } from 'lib/taxonomy' @@ -445,40 +444,27 @@ export function isQueryTooLarge(query: Node>): boolean { export function parseDraftQueryFromLocalStorage( query: string ): { query: Node>; timestamp: number } | null { - // First try to uncrush the query if it's a JSONCrush query else fall back to parsing it as a JSON try { - const uncrushedQuery = JSONCrush.uncrush(query) - return JSON.parse(uncrushedQuery) + return JSON.parse(query) } catch (e) { - console.error('Error parsing uncrushed query', e) - try { - return JSON.parse(query) - } catch (e) { - console.error('Error parsing query', e) - return null - } + console.error('Error parsing query', e) + return null } } export function crushDraftQueryForLocalStorage(query: Node>, timestamp: number): string { - return JSONCrush.crush(JSON.stringify({ query, timestamp })) + return JSON.stringify({ query, timestamp }) } export function parseDraftQueryFromURL(query: string): Node> | null { try { - const uncrushedQuery = JSONCrush.uncrush(query) - return JSON.parse(uncrushedQuery) + return JSON.parse(query) } catch (e) { - console.error('Error parsing uncrushed query', e) - try { - return JSON.parse(query) - } catch (e) { - console.error('Error parsing query', e) - return null - } + console.error('Error parsing query', e) + return null } } export function crushDraftQueryForURL(query: Node>): string { - return JSONCrush.crush(JSON.stringify(query)) + return JSON.stringify(query) } diff --git a/frontend/src/scenes/notebooks/Nodes/NotebookNodeExperiment.tsx b/frontend/src/scenes/notebooks/Nodes/NotebookNodeExperiment.tsx index e7bc3a324202c..afde3f1836415 100644 --- a/frontend/src/scenes/notebooks/Nodes/NotebookNodeExperiment.tsx +++ b/frontend/src/scenes/notebooks/Nodes/NotebookNodeExperiment.tsx @@ -18,7 +18,7 @@ import { INTEGER_REGEX_MATCH_GROUPS } from './utils' const Component = ({ attributes }: NotebookNodeProps): JSX.Element => { const { id } = attributes - const { experiment, experimentLoading, experimentMissing, isExperimentRunning, experimentResults } = useValues( + const { experiment, experimentLoading, experimentMissing, isExperimentRunning, metricResults } = useValues( experimentLogic({ experimentId: id }) ) const { loadExperiment } = useActions(experimentLogic({ experimentId: id })) @@ -41,6 +41,10 @@ const Component = ({ attributes }: NotebookNodeProps } + if (!metricResults) { + return <> + } + return (
@@ -78,7 +82,7 @@ const Component = ({ attributes }: NotebookNodeProps
- +
)} diff --git a/frontend/src/scenes/pipeline/hogfunctions/HogFunctionTest.tsx b/frontend/src/scenes/pipeline/hogfunctions/HogFunctionTest.tsx index 1861b06f369ed..787d46245c4cb 100644 --- a/frontend/src/scenes/pipeline/hogfunctions/HogFunctionTest.tsx +++ b/frontend/src/scenes/pipeline/hogfunctions/HogFunctionTest.tsx @@ -1,9 +1,19 @@ import { TZLabel } from '@posthog/apps-common' import { IconInfo, IconX } from '@posthog/icons' -import { LemonButton, LemonLabel, LemonSwitch, LemonTable, LemonTag, Tooltip } from '@posthog/lemon-ui' +import { + LemonButton, + LemonDivider, + LemonLabel, + LemonSwitch, + LemonTable, + LemonTag, + Spinner, + Tooltip, +} from '@posthog/lemon-ui' import clsx from 'clsx' import { useActions, useValues } from 'kea' import { Form } from 'kea-forms' +import { More } from 'lib/lemon-ui/LemonButton/More' import { LemonField } from 'lib/lemon-ui/LemonField' import { CodeEditorResizeable } from 'lib/monaco/CodeEditorResizable' @@ -62,11 +72,25 @@ export function HogFunctionTestPlaceholder({ } export function HogFunctionTest(props: HogFunctionTestLogicProps): JSX.Element { - const { isTestInvocationSubmitting, testResult, expanded, sampleGlobalsLoading, sampleGlobalsError, type } = - useValues(hogFunctionTestLogic(props)) - const { submitTestInvocation, setTestResult, toggleExpanded, loadSampleGlobals } = useActions( - hogFunctionTestLogic(props) - ) + const { + isTestInvocationSubmitting, + testResult, + expanded, + sampleGlobalsLoading, + sampleGlobalsError, + type, + savedGlobals, + testInvocation, + } = useValues(hogFunctionTestLogic(props)) + const { + submitTestInvocation, + setTestResult, + toggleExpanded, + loadSampleGlobals, + deleteSavedGlobals, + setSampleGlobals, + saveGlobals, + } = useActions(hogFunctionTestLogic(props)) return (
@@ -75,7 +99,10 @@ export function HogFunctionTest(props: HogFunctionTestLogicProps): JSX.Element { >
-

Testing

+

+ Testing + {sampleGlobalsLoading ? : null} +

{!expanded && (type === 'email' ? (

Click here to test the provider with a sample e-mail

@@ -87,7 +114,7 @@ export function HogFunctionTest(props: HogFunctionTestLogicProps): JSX.Element {
{!expanded ? ( - toggleExpanded()}> + toggleExpanded()}> Start testing ) : ( @@ -97,46 +124,100 @@ export function HogFunctionTest(props: HogFunctionTestLogicProps): JSX.Element { type="primary" onClick={() => setTestResult(null)} loading={isTestInvocationSubmitting} + data-attr="clear-hog-test-result" > Clear test result ) : ( <> - - Refresh globals - - - {({ value, onChange }) => ( - - When selected, async functions such as `fetch` will not - actually be called but instead will be mocked out with - the fetch content logged instead - - } + + + {({ value, onChange }) => ( + onChange(!v)} + checked={!value} + data-attr="toggle-hog-test-mocking" + className="px-2 py-1" + label={ + + When disabled, async functions such as + `fetch` will not be called. Instead they + will be mocked out and logged. + + } + > + + Make real HTTP requests + + + + } + /> + )} + + + + Fetch new event + + + {savedGlobals.map(({ name, globals }, index) => ( +
+ setSampleGlobals(globals)} + fullWidth + className="flex-1" + > + {name} + + } + onClick={() => deleteSavedGlobals(index)} + tooltip="Delete saved test data" + /> +
+ ))} + {testInvocation.globals && ( + { + const name = prompt('Name this test data') + if (name) { + saveGlobals(name, JSON.parse(testInvocation.globals)) + } + }} + disabledReason={(() => { + try { + JSON.parse(testInvocation.globals) + } catch (e) { + return 'Invalid globals JSON' + } + return undefined + })()} > - - Mock out HTTP requests - - - - } - /> - )} -
+ Save test data + + )} + + } + /> @@ -145,7 +226,12 @@ export function HogFunctionTest(props: HogFunctionTestLogicProps): JSX.Element { )} - } onClick={() => toggleExpanded()} tooltip="Hide testing" /> + } + onClick={() => toggleExpanded()} + tooltip="Hide testing" + /> )}
diff --git a/frontend/src/scenes/pipeline/hogfunctions/hogFunctionConfigurationLogic.tsx b/frontend/src/scenes/pipeline/hogfunctions/hogFunctionConfigurationLogic.tsx index d6f7c98884a7c..d38b39ce21c59 100644 --- a/frontend/src/scenes/pipeline/hogfunctions/hogFunctionConfigurationLogic.tsx +++ b/frontend/src/scenes/pipeline/hogfunctions/hogFunctionConfigurationLogic.tsx @@ -1,6 +1,6 @@ import { lemonToast } from '@posthog/lemon-ui' import equal from 'fast-deep-equal' -import { actions, afterMount, connect, kea, key, listeners, path, props, reducers, selectors } from 'kea' +import { actions, afterMount, connect, isBreakpoint, kea, key, listeners, path, props, reducers, selectors } from 'kea' import { forms } from 'kea-forms' import { loaders } from 'kea-loaders' import { beforeUnload, router } from 'kea-router' @@ -231,8 +231,15 @@ export const hogFunctionConfigurationLogic = kea ({ configuration }), persistForUnload: true, setSampleGlobalsError: (error) => ({ error }), + setSampleGlobals: (sampleGlobals: HogFunctionInvocationGlobals | null) => ({ sampleGlobals }), }), reducers(({ props }) => ({ + sampleGlobals: [ + null as HogFunctionInvocationGlobals | null, + { + setSampleGlobals: (_, { sampleGlobals }) => sampleGlobals, + }, + ], showSource: [ // Show source by default for blank templates when creating a new function !!(!props.id && props.templateId?.startsWith('template-blank-')), @@ -440,7 +447,9 @@ export const hogFunctionConfigurationLogic = kea([ ], actions: [ hogFunctionConfigurationLogic({ id: props.id }), - ['touchConfigurationField', 'loadSampleGlobalsSuccess', 'loadSampleGlobals'], + ['touchConfigurationField', 'loadSampleGlobalsSuccess', 'loadSampleGlobals', 'setSampleGlobals'], ], })), actions({ setTestResult: (result: HogFunctionTestInvocationResult | null) => ({ result }), toggleExpanded: (expanded?: boolean) => ({ expanded }), + saveGlobals: (name: string, globals: HogFunctionInvocationGlobals) => ({ name, globals }), + deleteSavedGlobals: (index: number) => ({ index }), }), reducers({ expanded: [ false as boolean, { - toggleExpanded: (_, { expanded }) => (expanded === undefined ? !_ : expanded), + toggleExpanded: (state, { expanded }) => (expanded === undefined ? !state : expanded), }, ], @@ -66,11 +69,23 @@ export const hogFunctionTestLogic = kea([ setTestResult: (_, { result }) => result, }, ], + + savedGlobals: [ + [] as { name: string; globals: HogFunctionInvocationGlobals }[], + { persist: true, prefix: `${getCurrentTeamId()}__` }, + { + saveGlobals: (state, { name, globals }) => [...state, { name, globals }], + deleteSavedGlobals: (state, { index }) => state.filter((_, i) => i !== index), + }, + ], }), listeners(({ values, actions }) => ({ loadSampleGlobalsSuccess: () => { actions.setTestInvocationValue('globals', JSON.stringify(values.sampleGlobals, null, 2)) }, + setSampleGlobals: ({ sampleGlobals }) => { + actions.setTestInvocationValue('globals', JSON.stringify(sampleGlobals, null, 2)) + }, })), forms(({ props, actions, values }) => ({ testInvocation: { diff --git a/frontend/src/scenes/session-recordings/player/PlayerMeta.tsx b/frontend/src/scenes/session-recordings/player/PlayerMeta.tsx index 5c811872d4134..6e4e69a038f79 100644 --- a/frontend/src/scenes/session-recordings/player/PlayerMeta.tsx +++ b/frontend/src/scenes/session-recordings/player/PlayerMeta.tsx @@ -7,7 +7,7 @@ import { CopyToClipboardInline } from 'lib/components/CopyToClipboard' import { useResizeBreakpoints } from 'lib/hooks/useResizeObserver' import { LemonSkeleton } from 'lib/lemon-ui/LemonSkeleton' import { Tooltip } from 'lib/lemon-ui/Tooltip' -import { percentage } from 'lib/utils' +import { isObject, percentage } from 'lib/utils' import { DraggableToNotebook } from 'scenes/notebooks/AddToNotebook/DraggableToNotebook' import { IconWindow } from 'scenes/session-recordings/player/icons' import { PlayerMetaLinks } from 'scenes/session-recordings/player/PlayerMetaLinks' @@ -20,6 +20,12 @@ import { Logo } from '~/toolbar/assets/Logo' import { sessionRecordingPlayerLogic, SessionRecordingPlayerMode } from './sessionRecordingPlayerLogic' function URLOrScreen({ lastUrl }: { lastUrl: string | undefined }): JSX.Element | null { + if (isObject(lastUrl) && 'href' in lastUrl) { + // regression protection, we saw a user whose site was sometimes sending the string-ified location object + // this is a best-effort attempt to show the href in that case + lastUrl = lastUrl['href'] as string | undefined + } + if (!lastUrl) { return null } diff --git a/frontend/src/scenes/settings/SettingsMap.tsx b/frontend/src/scenes/settings/SettingsMap.tsx index 0a2e3e432a2fb..b7ca139fa3056 100644 --- a/frontend/src/scenes/settings/SettingsMap.tsx +++ b/frontend/src/scenes/settings/SettingsMap.tsx @@ -1,4 +1,5 @@ import { BounceRatePageViewModeSetting } from 'scenes/settings/environment/BounceRatePageViewMode' +import { CookielessServerHashModeSetting } from 'scenes/settings/environment/CookielessServerHashMode' import { CustomChannelTypes } from 'scenes/settings/environment/CustomChannelTypes' import { DeadClicksAutocaptureSettings } from 'scenes/settings/environment/DeadClicksAutocaptureSettings' import { PersonsJoinMode } from 'scenes/settings/environment/PersonsJoinMode' @@ -218,6 +219,12 @@ export const SETTINGS_MAP: SettingSection[] = [ title: 'Custom channel type', component: , }, + { + id: 'cookieless-server-hash-mode', + title: 'Cookieless server hash mode', + component: , + flag: 'COOKIELESS_SERVER_HASH_MODE_SETTING', + }, ], }, diff --git a/frontend/src/scenes/settings/environment/CookielessServerHashMode.tsx b/frontend/src/scenes/settings/environment/CookielessServerHashMode.tsx new file mode 100644 index 0000000000000..57cd21e0ff709 --- /dev/null +++ b/frontend/src/scenes/settings/environment/CookielessServerHashMode.tsx @@ -0,0 +1,65 @@ +import { useActions, useValues } from 'kea' +import { LemonButton } from 'lib/lemon-ui/LemonButton' +import { LemonRadio, LemonRadioOption } from 'lib/lemon-ui/LemonRadio' +import { useState } from 'react' +import { teamLogic } from 'scenes/teamLogic' + +import { CookielessServerHashMode } from '~/types' + +const options: LemonRadioOption[] = [ + { + value: CookielessServerHashMode.Stateful, + label: ( + <> +
Stateful
+ + ), + }, + { + value: CookielessServerHashMode.Stateless, + label: ( + <> +
Stateless
+ + ), + }, + { + value: CookielessServerHashMode.Disabled, + label: ( + <> +
Disabled
+ + ), + }, +] + +export function CookielessServerHashModeSetting(): JSX.Element { + const { updateCurrentTeam } = useActions(teamLogic) + const { currentTeam } = useValues(teamLogic) + + const savedSetting = currentTeam?.cookieless_server_hash_mode ?? CookielessServerHashMode.Disabled + const [setting, setSetting] = useState(savedSetting) + + const handleChange = (newSetting: CookielessServerHashMode): void => { + updateCurrentTeam({ cookieless_server_hash_mode: newSetting }) + } + + return ( + <> +

+ Use a cookieless server-side hash mode to hash user data. This is an experimental feature preview and + may result in dropped events. +

+ +
+ handleChange(setting)} + disabledReason={setting === savedSetting ? 'No changes to save' : undefined} + > + Save + +
+ + ) +} diff --git a/frontend/src/scenes/settings/types.ts b/frontend/src/scenes/settings/types.ts index 56db33d95d3cf..4281935f54190 100644 --- a/frontend/src/scenes/settings/types.ts +++ b/frontend/src/scenes/settings/types.ts @@ -104,6 +104,7 @@ export type SettingId = | 'web-vitals-autocapture' | 'dead-clicks-autocapture' | 'channel-type' + | 'cookieless-server-hash-mode' type FeatureFlagKey = keyof typeof FEATURE_FLAGS diff --git a/frontend/src/scenes/surveys/surveyViewViz.tsx b/frontend/src/scenes/surveys/surveyViewViz.tsx index 8e19c575fef10..7384fec294c94 100644 --- a/frontend/src/scenes/surveys/surveyViewViz.tsx +++ b/frontend/src/scenes/surveys/surveyViewViz.tsx @@ -207,6 +207,7 @@ export function RatingQuestionBarChart({ } useEffect(() => { loadSurveyRatingResults({ questionIndex, iteration }) + // eslint-disable-next-line react-hooks/exhaustive-deps }, [questionIndex]) return ( @@ -301,6 +302,7 @@ export function NPSSurveyResultsBarChart({ useEffect(() => { loadSurveyRecurringNPSResults({ questionIndex }) + // eslint-disable-next-line react-hooks/exhaustive-deps }, [questionIndex]) return ( @@ -397,6 +399,7 @@ export function SingleChoiceQuestionPieChart({ useEffect(() => { loadSurveySingleChoiceResults({ questionIndex }) + // eslint-disable-next-line react-hooks/exhaustive-deps }, [questionIndex]) return ( @@ -499,12 +502,15 @@ export function MultipleChoiceQuestionBarChart({ useEffect(() => { loadSurveyMultipleChoiceResults({ questionIndex }) + // eslint-disable-next-line react-hooks/exhaustive-deps }, [questionIndex]) useEffect(() => { if (surveyMultipleChoiceResults?.[questionIndex]?.data?.length) { setChartHeight(100 + 20 * surveyMultipleChoiceResults[questionIndex].data.length) } + // TODO this one maybe should have questionIndex as a dependency + // eslint-disable-next-line react-hooks/exhaustive-deps }, [surveyMultipleChoiceResults]) return ( @@ -581,6 +587,7 @@ export function OpenTextViz({ useEffect(() => { loadSurveyOpenTextResults({ questionIndex }) + // eslint-disable-next-line react-hooks/exhaustive-deps }, [questionIndex]) return ( @@ -736,9 +743,9 @@ function ResponseSummaryFeedback({ surveyId }: { surveyId: string }): JSX.Elemen return // Already rated } setRating(newRating) - posthog.capture('survey_resonse_rated', { + posthog.capture('ai_survey_summary_rated', { survey_id: surveyId, - answer_rating: rating, + answer_rating: newRating, }) } diff --git a/frontend/src/scenes/teamActivityDescriber.tsx b/frontend/src/scenes/teamActivityDescriber.tsx index 4bde2cf4d8e50..cc20a5a02c6db 100644 --- a/frontend/src/scenes/teamActivityDescriber.tsx +++ b/frontend/src/scenes/teamActivityDescriber.tsx @@ -365,6 +365,7 @@ const teamActionsMapping: Record< user_access_level: () => null, live_events_token: () => null, product_intents: () => null, + cookieless_server_hash_mode: () => null, } function nameAndLink(logItem?: ActivityLogItem): JSX.Element { diff --git a/frontend/src/scenes/urls.ts b/frontend/src/scenes/urls.ts index 293481f598e3b..477064de6497e 100644 --- a/frontend/src/scenes/urls.ts +++ b/frontend/src/scenes/urls.ts @@ -1,4 +1,3 @@ -import JSONCrush from 'jsoncrush' import { combineUrl } from 'kea-router' import { AlertType } from 'lib/components/Alerts/types' import { getCurrentTeamId } from 'lib/utils/getAppContext' @@ -75,8 +74,7 @@ export const urls = { insightNew: (type?: InsightType, dashboardId?: DashboardType['id'] | null, query?: Node): string => combineUrl('/insights/new', dashboardId ? { dashboard: dashboardId } : {}, { ...(type ? { insight: type } : {}), - // have to use JSONCrush directly rather than the util to avoid circular dep - ...(query ? { q: typeof query === 'string' ? query : JSONCrush.crush(JSON.stringify(query)) } : {}), + ...(query ? { q: typeof query === 'string' ? query : JSON.stringify(query) } : {}), }).url, insightNewHogQL: (query: string, filters?: HogQLFilters): string => combineUrl( diff --git a/frontend/src/types.ts b/frontend/src/types.ts index d8e85d4cdc027..f587d1b4b8398 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -543,6 +543,7 @@ export interface TeamType extends TeamBasicType { primary_dashboard: number // Dashboard shown on the project homepage live_events_columns: string[] | null // Custom columns shown on the Live Events page live_events_token: string + cookieless_server_hash_mode?: CookielessServerHashMode /** Effective access level of the user in this specific team. Null if user has no access. */ effective_membership_level: OrganizationMembershipLevel | null @@ -4829,6 +4830,12 @@ export type ReplayTemplateVariableType = { noTouch?: boolean } +export enum CookielessServerHashMode { + Disabled = 0, + Stateless = 1, + Stateful = 2, +} + /** * Assistant Conversation */ diff --git a/jest.config.ts b/jest.config.ts index 39ab8232a5592..53e5df5943413 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -8,7 +8,7 @@ process.env.TZ = process.env.TZ || 'UTC' * https://jestjs.io/docs/en/configuration.html */ -const esmModules = ['query-selector-shadow-dom', 'react-syntax-highlighter', '@react-hook', '@medv', 'monaco-editor', 'jsoncrush'] +const esmModules = ['query-selector-shadow-dom', 'react-syntax-highlighter', '@react-hook', '@medv', 'monaco-editor'] const eeFolderExists = fs.existsSync('ee/frontend/exports.ts') function rootDirectories() { const rootDirectories = ['/frontend/src'] diff --git a/package.json b/package.json index 6944c5b335e23..2b5b0eb027050 100644 --- a/package.json +++ b/package.json @@ -140,7 +140,6 @@ "hls.js": "^1.5.15", "husky": "^7.0.4", "image-blob-reduce": "^4.1.0", - "jsoncrush": "^1.1.8", "kea": "^3.1.5", "kea-forms": "^3.2.0", "kea-loaders": "^3.0.0", @@ -162,7 +161,7 @@ "pmtiles": "^2.11.0", "postcss": "^8.4.31", "postcss-preset-env": "^9.3.0", - "posthog-js": "1.202.2", + "posthog-js": "1.203.0", "posthog-js-lite": "3.0.0", "prettier": "^2.8.8", "prop-types": "^15.7.2", diff --git a/plugin-server/package.json b/plugin-server/package.json index 11df155e0757c..9014d19be548b 100644 --- a/plugin-server/package.json +++ b/plugin-server/package.json @@ -69,16 +69,17 @@ "express": "^4.18.2", "faker": "^5.5.3", "fast-deep-equal": "^3.1.3", + "fastpriorityqueue": "^0.7.5", "fernet-nodejs": "^1.0.6", "generic-pool": "^3.7.1", "graphile-worker": "0.13.0", "ioredis": "^4.27.6", "ipaddr.js": "^2.1.0", "kafkajs": "^2.2.0", - "lz4-kafkajs": "1.0.0", "kafkajs-snappy": "^1.1.0", "lru-cache": "^6.0.0", "luxon": "^3.4.4", + "lz4-kafkajs": "1.0.0", "node-fetch": "^2.6.1", "node-rdkafka": "^2.17.0", "node-schedule": "^2.1.0", diff --git a/plugin-server/pnpm-lock.yaml b/plugin-server/pnpm-lock.yaml index c297462845d8e..f187191553102 100644 --- a/plugin-server/pnpm-lock.yaml +++ b/plugin-server/pnpm-lock.yaml @@ -91,6 +91,9 @@ dependencies: fast-deep-equal: specifier: ^3.1.3 version: 3.1.3 + fastpriorityqueue: + specifier: ^0.7.5 + version: 0.7.5 fernet-nodejs: specifier: ^1.0.6 version: 1.0.6 @@ -6276,6 +6279,10 @@ packages: strnum: 1.0.5 dev: false + /fastpriorityqueue@0.7.5: + resolution: {integrity: sha512-3Pa0n9gwy8yIbEsT3m2j/E9DXgWvvjfiZjjqcJ+AdNKTAlVMIuFYrYG5Y3RHEM8O6cwv9hOpOWY/NaMfywoQVA==} + dev: false + /fastq@1.15.0: resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} dependencies: diff --git a/plugin-server/src/cdp/cdp-api.ts b/plugin-server/src/cdp/cdp-api.ts index ed4e60976b1e9..a5c3f84e4f276 100644 --- a/plugin-server/src/cdp/cdp-api.ts +++ b/plugin-server/src/cdp/cdp-api.ts @@ -136,6 +136,7 @@ export class CdpApi { id: team.id, name: team.name, url: `${this.hub.SITE_URL ?? 'http://localhost:8000'}/project/${team.id}`, + ...globals.project, }, }, compoundConfiguration, diff --git a/plugin-server/src/cdp/hog-executor.ts b/plugin-server/src/cdp/hog-executor.ts index 631f8c8438444..1b843ca04c513 100644 --- a/plugin-server/src/cdp/hog-executor.ts +++ b/plugin-server/src/cdp/hog-executor.ts @@ -10,6 +10,7 @@ import { status } from '../utils/status' import { HogFunctionManager } from './hog-function-manager' import { CyclotronFetchFailureInfo, + HogFunctionInputType, HogFunctionInvocation, HogFunctionInvocationGlobals, HogFunctionInvocationGlobalsWithInputs, @@ -103,6 +104,16 @@ const sanitizeLogMessage = (args: any[], sensitiveValues?: string[]): string => return message } +const orderInputsByDependency = (hogFunction: HogFunctionType): [string, HogFunctionInputType][] => { + const allInputs: HogFunctionType['inputs'] = { + ...hogFunction.inputs, + ...hogFunction.encrypted_inputs, + } + return Object.entries(allInputs).sort(([_, input1], [__, input2]) => { + return (input1.order ?? -1) - (input2.order ?? -1) + }) +} + export class HogExecutor { private telemetryMatcher: ValueMatcher @@ -529,30 +540,23 @@ export class HogExecutor { } buildHogFunctionGlobals(invocation: HogFunctionInvocation): HogFunctionInvocationGlobalsWithInputs { - const builtInputs: Record = {} + const newGlobals: HogFunctionInvocationGlobalsWithInputs = { + ...invocation.globals, + inputs: {}, + } - Object.entries(invocation.hogFunction.inputs ?? {}).forEach(([key, item]) => { - builtInputs[key] = item.value + const orderedInputs = orderInputsByDependency(invocation.hogFunction) - if (item.bytecode) { - // Use the bytecode to compile the field - builtInputs[key] = formatInput(item.bytecode, invocation.globals, key) - } - }) - - Object.entries(invocation.hogFunction.encrypted_inputs ?? {}).forEach(([key, item]) => { - builtInputs[key] = item.value + for (const [key, input] of orderedInputs) { + newGlobals.inputs[key] = input.value - if (item.bytecode) { + if (input.bytecode) { // Use the bytecode to compile the field - builtInputs[key] = formatInput(item.bytecode, invocation.globals, key) + newGlobals.inputs[key] = formatInput(input.bytecode, newGlobals, key) } - }) - - return { - ...invocation.globals, - inputs: builtInputs, } + + return newGlobals } getSensitiveValues(hogFunction: HogFunctionType, inputs: Record): string[] { diff --git a/plugin-server/src/cdp/types.ts b/plugin-server/src/cdp/types.ts index e9d506a7a7823..dfe0464a1f9ec 100644 --- a/plugin-server/src/cdp/types.ts +++ b/plugin-server/src/cdp/types.ts @@ -297,6 +297,7 @@ export type HogFunctionInputType = { value: any secret?: boolean bytecode?: HogBytecode | object + order?: number } export type IntegrationType = { diff --git a/plugin-server/src/types.ts b/plugin-server/src/types.ts index 47ac3764a3528..1263a896d04a3 100644 --- a/plugin-server/src/types.ts +++ b/plugin-server/src/types.ts @@ -528,6 +528,12 @@ export enum PluginLogLevel { Critical = 4, // only error type and system source } +export enum CookielessServerHashMode { + Disabled = 0, + Stateless = 1, + Stateful = 2, +} + export interface PluginLogEntry { id: string team_id: number @@ -633,13 +639,15 @@ export interface Team { api_token: string slack_incoming_webhook: string | null session_recording_opt_in: boolean - person_processing_opt_out?: boolean + person_processing_opt_out: boolean | null heatmaps_opt_in: boolean | null ingested_event: boolean person_display_name_properties: string[] | null test_account_filters: | (EventPropertyFilter | PersonPropertyFilter | ElementPropertyFilter | CohortPropertyFilter)[] | null + cookieless_server_hash_mode: CookielessServerHashMode | null + timezone: string } /** Properties shared by RawEventMessage and EventMessage. */ diff --git a/plugin-server/src/utils/concurrencyController.ts b/plugin-server/src/utils/concurrencyController.ts new file mode 100644 index 0000000000000..ac84d439fd507 --- /dev/null +++ b/plugin-server/src/utils/concurrencyController.ts @@ -0,0 +1,133 @@ +import FastPriorityQueue from 'fastpriorityqueue' + +export function promiseResolveReject(): { + resolve: (value: T) => void + reject: (reason?: any) => void + promise: Promise +} { + let resolve: (value: T) => void + let reject: (reason?: any) => void + const promise = new Promise((innerResolve, innerReject) => { + resolve = innerResolve + reject = innerReject + }) + return { resolve: resolve!, reject: reject!, promise } +} + +// Note that this file also exists in the frontend code, please keep them in sync as the tests only exist in the other version +class ConcurrencyControllerItem { + _debugTag?: string + _runFn: () => Promise + _priority: number = Infinity + _promise: Promise + constructor( + concurrencyController: ConcurrencyController, + userFn: () => Promise, + abortController: AbortController | undefined, + priority: number = Infinity, + debugTag: string | undefined + ) { + this._debugTag = debugTag + this._priority = priority + const { promise, resolve, reject } = promiseResolveReject() + this._promise = promise + this._runFn = async () => { + if (abortController?.signal.aborted) { + reject(new FakeAbortError(abortController.signal.reason || 'AbortError')) + return + } + if (concurrencyController._current.length >= concurrencyController._concurrencyLimit) { + throw new Error('Developer Error: ConcurrencyControllerItem: _runFn called while already running') + } + try { + concurrencyController._current.push(this) + const result = await userFn() + resolve(result) + } catch (error) { + reject(error) + } + } + abortController?.signal.addEventListener('abort', () => { + reject(new FakeAbortError(abortController.signal.reason || 'AbortError')) + }) + promise + .catch(() => { + // ignore + }) + .finally(() => { + if (concurrencyController._current.includes(this)) { + concurrencyController._current = concurrencyController._current.filter((item) => item !== this) + concurrencyController._runNext() + } + }) + } +} + +export class ConcurrencyController { + _concurrencyLimit: number + + _current: ConcurrencyControllerItem[] = [] + private _queue: FastPriorityQueue> = new FastPriorityQueue( + (a, b) => a._priority < b._priority + ) + + constructor(concurrencyLimit: number) { + this._concurrencyLimit = concurrencyLimit + } + + /** + * Run a function with a mutex. If the mutex is already running, the function will be queued and run when the mutex + * is available. + * @param fn The function to run + * @param priority The priority of the function. Lower numbers will be run first. Defaults to Infinity. + * @param abortController An AbortController that, if aborted, will reject the promise and immediately start the next item in the queue. + * @param debugTag + */ + run = ({ + fn, + priority, + abortController, + debugTag, + }: { + fn: () => Promise + priority?: number + abortController?: AbortController + debugTag?: string + }): Promise => { + const item = new ConcurrencyControllerItem(this, fn, abortController, priority, debugTag) + + this._queue.add(item) + + this._tryRunNext() + + return item._promise + } + + _runNext(): void { + const next = this._queue.poll() + if (next) { + next._runFn() + .catch(() => { + // ignore + }) + .finally(() => { + this._tryRunNext() + }) + } + } + + _tryRunNext(): void { + if (this._current.length < this._concurrencyLimit) { + this._runNext() + } + } + + setConcurrencyLimit = (limit: number): void => { + this._concurrencyLimit = limit + } +} + +// Create a fake AbortError that allows us to use e.name === 'AbortError' to check if an error is an AbortError +class FakeAbortError extends Error { + name = 'AbortError' +} diff --git a/plugin-server/src/worker/ingestion/team-manager.ts b/plugin-server/src/worker/ingestion/team-manager.ts index f70d96a5799a5..d787c50c6c948 100644 --- a/plugin-server/src/worker/ingestion/team-manager.ts +++ b/plugin-server/src/worker/ingestion/team-manager.ts @@ -170,7 +170,9 @@ export async function fetchTeam(client: PostgresRouter, teamId: Team['id']): Pro heatmaps_opt_in, ingested_event, person_display_name_properties, - test_account_filters + test_account_filters, + cookieless_server_hash_mode, + timezone FROM posthog_team WHERE id = $1 `, @@ -203,7 +205,10 @@ export async function fetchTeamByToken(client: PostgresRouter, token: string): P person_processing_opt_out, heatmaps_opt_in, ingested_event, - test_account_filters + person_display_name_properties, + test_account_filters, + cookieless_server_hash_mode, + timezone FROM posthog_team WHERE api_token = $1 LIMIT 1 diff --git a/plugin-server/tests/main/db.test.ts b/plugin-server/tests/main/db.test.ts index 10e514d8323c9..cf47cb7ed0167 100644 --- a/plugin-server/tests/main/db.test.ts +++ b/plugin-server/tests/main/db.test.ts @@ -855,7 +855,7 @@ describe('DB', () => { anonymize_ips: false, api_token: 'token1', id: teamId, - project_id: teamId, + project_id: teamId as Team['project_id'], ingested_event: true, name: 'TEST PROJECT', organization_id: organizationId, @@ -866,6 +866,8 @@ describe('DB', () => { uuid: expect.any(String), person_display_name_properties: [], test_account_filters: {} as any, // NOTE: Test insertion data gets set as an object weirdly + cookieless_server_hash_mode: null, + timezone: 'UTC', } as Team) }) @@ -885,17 +887,20 @@ describe('DB', () => { anonymize_ips: false, api_token: 'token2', id: teamId, - project_id: teamId, + project_id: teamId as Team['project_id'], ingested_event: true, name: 'TEST PROJECT', organization_id: organizationId, session_recording_opt_in: true, person_processing_opt_out: null, + person_display_name_properties: [], heatmaps_opt_in: null, slack_incoming_webhook: null, uuid: expect.any(String), test_account_filters: {} as any, // NOTE: Test insertion data gets set as an object weirdly - }) + cookieless_server_hash_mode: null, + timezone: 'UTC', + } as Team) }) it('returns null if the team does not exist', async () => { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f383926dd1807..2b2c29abd1164 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -241,9 +241,6 @@ dependencies: image-blob-reduce: specifier: ^4.1.0 version: 4.1.0 - jsoncrush: - specifier: ^1.1.8 - version: 1.1.8 kea: specifier: ^3.1.5 version: 3.1.5(react@18.2.0) @@ -308,8 +305,8 @@ dependencies: specifier: ^9.3.0 version: 9.3.0(postcss@8.4.31) posthog-js: - specifier: 1.202.2 - version: 1.202.2 + specifier: 1.203.0 + version: 1.203.0 posthog-js-lite: specifier: 3.0.0 version: 3.0.0 @@ -13319,7 +13316,7 @@ packages: gopd: 1.2.0 has-symbols: 1.1.0 hasown: 2.0.2 - math-intrinsics: 1.0.0 + math-intrinsics: 1.1.0 dev: true /get-nonce@1.0.1: @@ -14076,7 +14073,7 @@ packages: hogan.js: 3.0.2 htm: 3.1.1 instantsearch-ui-components: 0.3.0 - preact: 10.25.2 + preact: 10.25.3 qs: 6.9.7 search-insights: 2.13.0 dev: false @@ -15415,10 +15412,6 @@ packages: resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} dev: true - /jsoncrush@1.1.8: - resolution: {integrity: sha512-lvIMGzMUA0fjuqwNcxlTNRq2bibPZ9auqT/LyGdlR5hvydJtA/BasSgkx4qclqTKVeTidrJvsS/oVjlTCPQ4Nw==} - dev: false - /jsonfile@6.1.0: resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} dependencies: @@ -16003,8 +15996,8 @@ packages: resolution: {integrity: sha512-6qE4B9deFBIa9YSpOc9O0Sgc43zTeVYbgDT5veRKSlB2+ZuHNoVVxA1L/ckMUayV9Ay9y7Z/SZCLcGteW9i7bg==} dev: false - /math-intrinsics@1.0.0: - resolution: {integrity: sha512-4MqMiKP90ybymYvsut0CH2g4XWbfLtmlCkXmtmdcDCxNB+mQcu1w/1+L/VD7vi/PSv7X2JYV7SCcR+jiPXnQtA==} + /math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} dev: true @@ -17909,12 +17902,12 @@ packages: resolution: {integrity: sha512-dyajjnfzZD1tht4N7p7iwf7nBnR1MjVaVu+MKr+7gBgA39bn28wizCIJZztZPtHy4PY0YwtSGgwfBCuG/hnHgA==} dev: false - /posthog-js@1.202.2: - resolution: {integrity: sha512-9p7dAWuCfoM0WrasubGwtC8i38HU3iMqK3gd0mhyAoTrEVMVozTQq64Toc2VEv8H69NGNn6ikk5t2LclHT9XFA==} + /posthog-js@1.203.0: + resolution: {integrity: sha512-f8yvxZDVH30cjZZY8D9D6YyOqFh1S0aFUqi9k94PAQkN6szJTD/84lyjiFlCfJePr8M8C0iedZPev3A68mWbng==} dependencies: core-js: 3.39.0 fflate: 0.4.8 - preact: 10.25.2 + preact: 10.25.3 web-vitals: 4.2.4 dev: false @@ -17922,8 +17915,8 @@ packages: resolution: {integrity: sha512-Q+/tYsFU9r7xoOJ+y/ZTtdVQwTWfzjbiXBDMM/JKUux3+QPP02iUuIoeBQ+Ot6oEDlC+/PGjB/5A3K7KKb7hcw==} dev: false - /preact@10.25.2: - resolution: {integrity: sha512-GEts1EH3oMnqdOIeXhlbBSddZ9nrINd070WBOiPO2ous1orrKGUM4SMDbwyjSWD1iMS2dBvaDjAa5qUhz3TXqw==} + /preact@10.25.3: + resolution: {integrity: sha512-dzQmIFtM970z+fP9ziQ3yG4e3ULIbwZzJ734vaMVUTaKQ2+Ru1Ou/gjshOYVHCcd1rpAelC6ngjvjDXph98unQ==} dev: false /prelude-ls@1.2.1: diff --git a/posthog/api/hog_function.py b/posthog/api/hog_function.py index 3f50d710acc96..338f7b8500a6c 100644 --- a/posthog/api/hog_function.py +++ b/posthog/api/hog_function.py @@ -79,7 +79,7 @@ class HogFunctionMaskingSerializer(serializers.Serializer): bytecode = serializers.JSONField(required=False, allow_null=True) def validate(self, attrs): - attrs["bytecode"] = generate_template_bytecode(attrs["hash"]) + attrs["bytecode"] = generate_template_bytecode(attrs["hash"], input_collector=set()) return super().validate(attrs) @@ -360,13 +360,13 @@ def invocations(self, request: Request, *args, **kwargs): # Remove the team from the config configuration.pop("team") - globals = serializer.validated_data["globals"] + hog_globals = serializer.validated_data["globals"] mock_async_functions = serializer.validated_data["mock_async_functions"] res = create_hog_invocation_test( team_id=hog_function.team_id, hog_function_id=hog_function.id, - globals=globals, + globals=hog_globals, configuration=configuration, mock_async_functions=mock_async_functions, ) diff --git a/posthog/api/team.py b/posthog/api/team.py index d2b9ca018dbdf..4a9e956faf084 100644 --- a/posthog/api/team.py +++ b/posthog/api/team.py @@ -201,6 +201,7 @@ class Meta: "primary_dashboard", "live_events_columns", "recording_domains", + "cookieless_server_hash_mode", "person_on_events_querying_enabled", "inject_web_apps", "extra_settings", diff --git a/posthog/api/test/__snapshots__/test_action.ambr b/posthog/api/test/__snapshots__/test_action.ambr index 8ac1823a033c1..668ee08364e5a 100644 --- a/posthog/api/test/__snapshots__/test_action.ambr +++ b/posthog/api/test/__snapshots__/test_action.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -385,6 +386,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -896,6 +898,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_annotation.ambr b/posthog/api/test/__snapshots__/test_annotation.ambr index 9340e03a2a4d8..4637cf6f73ac1 100644 --- a/posthog/api/test/__snapshots__/test_annotation.ambr +++ b/posthog/api/test/__snapshots__/test_annotation.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -380,6 +381,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -824,6 +826,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_decide.ambr b/posthog/api/test/__snapshots__/test_decide.ambr index 277d209486401..94fa3c6b59986 100644 --- a/posthog/api/test/__snapshots__/test_decide.ambr +++ b/posthog/api/test/__snapshots__/test_decide.ambr @@ -317,6 +317,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -388,6 +389,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -472,6 +474,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -688,6 +691,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -770,6 +774,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -857,6 +862,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1225,6 +1231,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1296,6 +1303,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1382,6 +1390,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1457,6 +1466,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1657,6 +1667,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1866,6 +1877,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2066,6 +2078,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2172,6 +2185,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2263,6 +2277,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2334,6 +2349,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2420,6 +2436,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2495,6 +2512,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2695,6 +2713,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2892,6 +2911,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3092,6 +3112,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3198,6 +3219,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3268,6 +3290,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3391,6 +3414,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3465,6 +3489,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3540,6 +3565,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3756,6 +3782,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3931,6 +3958,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4002,6 +4030,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4077,6 +4106,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4523,6 +4553,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4594,6 +4625,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4678,6 +4710,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4894,6 +4927,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4976,6 +5010,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5063,6 +5098,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5177,6 +5213,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5393,6 +5430,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5536,6 +5574,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5752,6 +5791,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6080,6 +6120,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6151,6 +6192,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6237,6 +6279,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6312,6 +6355,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6512,6 +6556,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6598,6 +6643,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6810,6 +6856,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6937,6 +6984,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7100,6 +7148,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7207,6 +7256,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7554,6 +7604,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7660,6 +7711,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7751,6 +7803,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7822,6 +7875,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7908,6 +7962,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7983,6 +8038,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8183,6 +8239,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8269,6 +8326,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8481,6 +8539,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8604,6 +8663,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8767,6 +8827,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8874,6 +8935,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9213,6 +9275,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9319,6 +9382,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_early_access_feature.ambr b/posthog/api/test/__snapshots__/test_early_access_feature.ambr index 874a79147df2c..dc83f7f4c961a 100644 --- a/posthog/api/test/__snapshots__/test_early_access_feature.ambr +++ b/posthog/api/test/__snapshots__/test_early_access_feature.ambr @@ -50,6 +50,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -143,6 +144,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -218,6 +220,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -418,6 +421,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -524,6 +528,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -619,6 +624,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -789,6 +795,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -989,6 +996,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1086,6 +1094,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1156,6 +1165,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1338,6 +1348,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1413,6 +1424,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1613,6 +1625,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1687,6 +1700,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1844,6 +1858,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1919,6 +1934,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2119,6 +2135,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_element.ambr b/posthog/api/test/__snapshots__/test_element.ambr index e3ce7d60cebca..3d3f1b90bab1a 100644 --- a/posthog/api/test/__snapshots__/test_element.ambr +++ b/posthog/api/test/__snapshots__/test_element.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_feature_flag.ambr b/posthog/api/test/__snapshots__/test_feature_flag.ambr index a00efc8ba764b..8db7aa3b58aef 100644 --- a/posthog/api/test/__snapshots__/test_feature_flag.ambr +++ b/posthog/api/test/__snapshots__/test_feature_flag.ambr @@ -1361,6 +1361,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1528,6 +1529,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_insight.ambr b/posthog/api/test/__snapshots__/test_insight.ambr index 01390b5f4b341..f4c9641c28c3a 100644 --- a/posthog/api/test/__snapshots__/test_insight.ambr +++ b/posthog/api/test/__snapshots__/test_insight.ambr @@ -721,6 +721,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -784,6 +785,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -854,6 +856,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -924,6 +927,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1064,6 +1068,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1318,6 +1323,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1602,6 +1608,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1747,6 +1754,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1871,6 +1879,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2044,6 +2053,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2142,6 +2152,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr index 55bc1f7121eb1..2a2f570ca27b0 100644 --- a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr +++ b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr @@ -131,6 +131,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -206,6 +207,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -406,6 +408,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -480,6 +483,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -607,6 +611,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -705,6 +710,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -775,6 +781,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -915,6 +922,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1009,6 +1017,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1107,6 +1116,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1177,6 +1187,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1317,6 +1328,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1410,6 +1422,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1511,6 +1524,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1711,6 +1725,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1785,6 +1800,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1930,6 +1946,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2730,6 +2747,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_preflight.ambr b/posthog/api/test/__snapshots__/test_preflight.ambr index bbb5b5662471e..9c2a535c45915 100644 --- a/posthog/api/test/__snapshots__/test_preflight.ambr +++ b/posthog/api/test/__snapshots__/test_preflight.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/__snapshots__/test_survey.ambr b/posthog/api/test/__snapshots__/test_survey.ambr index aa3b526b3c9ee..85ee6dedec403 100644 --- a/posthog/api/test/__snapshots__/test_survey.ambr +++ b/posthog/api/test/__snapshots__/test_survey.ambr @@ -86,6 +86,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -149,6 +150,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -224,6 +226,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -449,6 +452,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -546,6 +550,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -633,6 +638,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -858,6 +864,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -955,6 +962,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1030,6 +1038,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1100,6 +1109,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1325,6 +1335,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1411,6 +1422,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1486,6 +1498,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1720,6 +1733,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1826,6 +1840,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2144,6 +2159,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr index dfd916657a89b..7a5e77c6be6c6 100644 --- a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr +++ b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -270,6 +271,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -590,6 +592,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -886,6 +889,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1359,6 +1363,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1429,6 +1434,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1557,6 +1563,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1620,6 +1627,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1690,6 +1698,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1760,6 +1769,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1900,6 +1910,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2022,6 +2033,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2236,6 +2248,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2688,6 +2701,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2786,6 +2800,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2916,6 +2931,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2979,6 +2995,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3073,6 +3090,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3195,6 +3213,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3397,6 +3416,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3597,6 +3617,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3695,6 +3716,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3832,6 +3854,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3974,6 +3997,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4044,6 +4068,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4114,6 +4139,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4254,6 +4280,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4376,6 +4403,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4572,6 +4600,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4890,6 +4919,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5186,6 +5216,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5392,6 +5423,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5807,6 +5839,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5930,6 +5963,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6112,6 +6146,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6383,6 +6418,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6481,6 +6517,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6579,6 +6616,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6681,6 +6719,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6751,6 +6790,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6891,6 +6931,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6961,6 +7002,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7031,6 +7073,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7182,6 +7225,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7304,6 +7348,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7518,6 +7563,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7718,6 +7764,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7816,6 +7863,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7926,6 +7974,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7996,6 +8045,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8066,6 +8116,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8206,6 +8257,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8328,6 +8380,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8530,6 +8583,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8737,6 +8791,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8835,6 +8890,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8933,6 +8989,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9003,6 +9060,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9073,6 +9131,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9266,6 +9325,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9552,6 +9612,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9738,6 +9799,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9918,6 +9980,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -9981,6 +10044,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -10104,6 +10168,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -10286,6 +10351,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -10565,6 +10631,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -10688,6 +10755,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -10870,6 +10938,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr index f585776717839..14c0b2cda0953 100644 --- a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr +++ b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr @@ -82,6 +82,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -235,6 +236,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -466,6 +468,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -864,6 +867,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/api/test/test_hog_function.py b/posthog/api/test/test_hog_function.py index 414f5f19aa51f..169b37e796765 100644 --- a/posthog/api/test/test_hog_function.py +++ b/posthog/api/test/test_hog_function.py @@ -479,6 +479,7 @@ def test_secret_inputs_not_returned(self, *args): "I AM SECRET", ], "value": "I AM SECRET", + "order": 0, }, } @@ -486,7 +487,7 @@ def test_secret_inputs_not_returned(self, *args): assert ( raw_encrypted_inputs - == "gAAAAABlkgC8AAAAAAAAAAAAAAAAAAAAAKvzDjuLG689YjjVhmmbXAtZSRoucXuT8VtokVrCotIx3ttPcVufoVt76dyr2phbuotMldKMVv_Y6uzMDZFjX1WLE6eeZEhBJqFv8fQacoHXhDbDh5fvL7DTr1sc2R_DmTwvPQDiSss790vZ6d_vm1Q=" + == "gAAAAABlkgC8AAAAAAAAAAAAAAAAAAAAAKvzDjuLG689YjjVhmmbXAtZSRoucXuT8VtokVrCotIx3ttPcVufoVt76dyr2phbuotMldKMVv_Y6uzMDZFjX1Uvej4GHsYRbsTN_txcQHNnU7zvLee83DhHIrThEjceoq8i7hbfKrvqjEi7GCGc_k_Gi3V5KFxDOfLKnke4KM4s" ) def test_secret_inputs_not_updated_if_not_changed(self, *args): @@ -642,6 +643,7 @@ def test_generates_inputs_bytecode(self, *args): 32, "http://localhost:2080/0e02d917-563f-4050-9725-aad881b69937", ], + "order": 0, }, "payload": { "value": { @@ -651,6 +653,7 @@ def test_generates_inputs_bytecode(self, *args): "person": "{person}", "event_url": "{f'{event.url}-test'}", }, + "order": 1, "bytecode": { "event": ["_H", HOGQL_BYTECODE_VERSION, 32, "event", 1, 1], "groups": ["_H", HOGQL_BYTECODE_VERSION, 32, "groups", 1, 1], @@ -673,7 +676,7 @@ def test_generates_inputs_bytecode(self, *args): ], }, }, - "method": {"value": "POST"}, + "method": {"value": "POST", "order": 2}, "headers": { "value": {"version": "v={event.properties.$lib_version}"}, "bytecode": { @@ -695,6 +698,7 @@ def test_generates_inputs_bytecode(self, *args): 2, ] }, + "order": 3, }, } @@ -1164,6 +1168,7 @@ def test_create_typescript_destination_with_inputs(self): inputs["message"]["transpiled"]["stl"].sort() assert result["inputs"] == { "message": { + "order": 0, "transpiled": { "code": 'concat("Hello, TypeScript ", arrayMap(__lambda((a) => a), [1, 2, 3]), "!")', "lang": "ts", diff --git a/posthog/cdp/site_functions.py b/posthog/cdp/site_functions.py index fa77c20a8f881..e02895f39d99a 100644 --- a/posthog/cdp/site_functions.py +++ b/posthog/cdp/site_functions.py @@ -20,8 +20,9 @@ def get_transpiled_function(hog_function: HogFunction) -> str: compiler = JavaScriptCompiler() - # TODO: reorder inputs to make dependencies work - for key, input in (hog_function.inputs or {}).items(): + all_inputs = hog_function.inputs or {} + all_inputs = sorted(all_inputs.items(), key=lambda x: x[1].get("order", -1)) + for key, input in all_inputs: value = input.get("value") key_string = json.dumps(str(key) or "") if (isinstance(value, str) and "{" in value) or isinstance(value, dict) or isinstance(value, list): diff --git a/posthog/cdp/test/test_site_functions.py b/posthog/cdp/test/test_site_functions.py index 658b16ba41be0..c9821435d848c 100644 --- a/posthog/cdp/test/test_site_functions.py +++ b/posthog/cdp/test/test_site_functions.py @@ -46,6 +46,12 @@ def compile_and_run(self): return result + def _execute_javascript(self, js) -> str: + with tempfile.NamedTemporaryFile(delete=False) as f: + f.write(js.encode("utf-8")) + f.flush() + return subprocess.check_output(["node", f.name]).decode("utf-8") + def test_get_transpiled_function_basic(self): result = self.compile_and_run() assert isinstance(result, str) @@ -343,8 +349,51 @@ def test_run_function_onevent(self): ) assert "Loaded" == response.strip() - def _execute_javascript(self, js) -> str: - with tempfile.NamedTemporaryFile(delete=False) as f: - f.write(js.encode("utf-8")) - f.flush() - return subprocess.check_output(["node", f.name]).decode("utf-8") + def test_get_transpiled_function_with_ordered_inputs(self): + self.hog_function.hog = "export function onLoad() { console.log(inputs); }" + self.hog_function.inputs = { + "first": {"value": "I am first", "order": 0}, + "second": {"value": "{person.properties.name}", "order": 1}, + "third": {"value": "{event.properties.url}", "order": 2}, + } + + result = self.compile_and_run() + + assert '"first": "I am first"' in result + idx_first = result.index('"first": "I am first"') + idx_second = result.index('inputs["second"] = getInputsKey("second");') + idx_third = result.index('inputs["third"] = getInputsKey("third");') + + assert idx_first < idx_second < idx_third + + def test_get_transpiled_function_without_order(self): + self.hog_function.hog = "export function onLoad() { console.log(inputs); }" + self.hog_function.inputs = { + "noOrder": {"value": "I have no order"}, + "alsoNoOrder": {"value": "{person.properties.name}"}, + "withOrder": {"value": "{event.properties.url}", "order": 10}, + } + + result = self.compile_and_run() + + idx_noOrder = result.index('"noOrder": "I have no order"') + idx_alsoNoOrder = result.index('inputs["alsoNoOrder"] = getInputsKey("alsoNoOrder");') + idx_withOrder = result.index('inputs["withOrder"] = getInputsKey("withOrder");') + + assert idx_noOrder < idx_alsoNoOrder < idx_withOrder + + def test_get_transpiled_function_with_duplicate_orders(self): + self.hog_function.hog = "export function onLoad() { console.log(inputs); }" + self.hog_function.inputs = { + "alpha": {"value": "{person.properties.alpha}", "order": 1}, + "beta": {"value": "{person.properties.beta}", "order": 1}, + "gamma": {"value": "Just gamma", "order": 1}, + } + + result = self.compile_and_run() + + idx_alpha = result.index('inputs["alpha"] = getInputsKey("alpha");') + idx_beta = result.index('inputs["beta"] = getInputsKey("beta");') + idx_gamma = result.index('"gamma": "Just gamma"') + + assert idx_alpha is not None and idx_beta is not None and idx_gamma is not None diff --git a/posthog/cdp/test/test_validation.py b/posthog/cdp/test/test_validation.py index 90a41f8cca653..15f6dbb879cbd 100644 --- a/posthog/cdp/test/test_validation.py +++ b/posthog/cdp/test/test_validation.py @@ -85,6 +85,7 @@ def test_validate_inputs(self): 32, "http://localhost:2080/0e02d917-563f-4050-9725-aad881b69937", ], + "order": 0, # Now that we have ordering, url should have some order assigned }, "payload": { "value": { @@ -115,8 +116,12 @@ def test_validate_inputs(self): 2, ], }, + "order": 1, + }, + "method": { + "value": "POST", + "order": 2, }, - "method": {"value": "POST"}, "headers": { "value": {"version": "v={event.properties.$lib_version}"}, "bytecode": { @@ -138,6 +143,7 @@ def test_validate_inputs(self): 2, ] }, + "order": 3, }, } ) @@ -180,6 +186,109 @@ def test_validate_inputs_creates_bytecode_for_html(self): 3, ], "value": '\n\n\n\n\n\n

Hi {person.properties.email}

\n\n', + "order": 0, }, } ) + + # New tests for ordering + def test_validate_inputs_with_dependencies_simple_chain(self): + # Schema: A->B->C + # A has no deps, B uses A, C uses B + inputs_schema = [ + {"key": "A", "type": "string", "required": True}, + {"key": "C", "type": "string", "required": True}, + {"key": "B", "type": "string", "required": True}, + ] + # Values: B depends on A, C depends on B + # We'll use templates referencing inputs.A, inputs.B + inputs = { + "A": {"value": "A value"}, + "C": {"value": "{inputs.B} + C value"}, + "B": {"value": "{inputs.A} + B value"}, + } + + validated = validate_inputs(inputs_schema, inputs) + # Order should be A=0, B=1, C=2 + assert validated["A"]["order"] == 0 + assert validated["B"]["order"] == 1 + assert validated["C"]["order"] == 2 + + def test_validate_inputs_with_multiple_dependencies(self): + # Schema: W, X, Y, Z + # Z depends on W and Y + # Y depends on X + # X depends on W + # So order: W=0, X=1, Y=2, Z=3 + inputs_schema = [ + {"key": "X", "type": "string", "required": True}, + {"key": "W", "type": "string", "required": True}, + {"key": "Z", "type": "string", "required": True}, + {"key": "Y", "type": "string", "required": True}, + ] + inputs = { + "X": {"value": "{inputs.W}_x"}, + "W": {"value": "w"}, + "Z": {"value": "{inputs.W}{inputs.Y}_z"}, # depends on W and Y + "Y": {"value": "{inputs.X}_y"}, + } + + validated = validate_inputs(inputs_schema, inputs) + assert validated["W"]["order"] == 0 + assert validated["X"]["order"] == 1 + assert validated["Y"]["order"] == 2 + assert validated["Z"]["order"] == 3 + + def test_validate_inputs_with_no_dependencies(self): + # All inputs have no references. Any order is fine but all should start from 0 and increment. + inputs_schema = [ + {"key": "one", "type": "string", "required": True}, + {"key": "two", "type": "string", "required": True}, + {"key": "three", "type": "string", "required": True}, + ] + inputs = { + "one": {"value": "1"}, + "two": {"value": "2"}, + "three": {"value": "3"}, + } + + validated = validate_inputs(inputs_schema, inputs) + # Should just assign order in any stable manner (likely alphabetical since no deps): + # Typically: one=0, two=1, three=2 + # The actual order might depend on dictionary ordering, but given code, it should be alphabetical keys since we topologically sort by dependencies. + assert validated["one"]["order"] == 0 + assert validated["two"]["order"] == 1 + assert validated["three"]["order"] == 2 + + def test_validate_inputs_with_circular_dependencies(self): + # A depends on B, B depends on A -> should fail + inputs_schema = [ + {"key": "A", "type": "string", "required": True}, + {"key": "B", "type": "string", "required": True}, + ] + + inputs = { + "A": {"value": "{inputs.B} + A"}, + "B": {"value": "{inputs.A} + B"}, + } + + try: + validate_inputs(inputs_schema, inputs) + raise AssertionError("Expected circular dependency error") + except Exception as e: + assert "Circular dependency" in str(e) + + def test_validate_inputs_with_extraneous_dependencies(self): + # A depends on a non-existing input X + # This should ignore X since it's not defined. + # So no error, but A has no real dependencies that matter. + inputs_schema = [ + {"key": "A", "type": "string", "required": True}, + ] + inputs = { + "A": {"value": "{inputs.X} + A"}, + } + + validated = validate_inputs(inputs_schema, inputs) + # Only A is present, so A=0 + assert validated["A"]["order"] == 0 diff --git a/posthog/cdp/validation.py b/posthog/cdp/validation.py index ac7f19405cfd5..1d4ebf12e2f0b 100644 --- a/posthog/cdp/validation.py +++ b/posthog/cdp/validation.py @@ -6,22 +6,46 @@ from posthog.hogql.compiler.bytecode import create_bytecode from posthog.hogql.compiler.javascript import JavaScriptCompiler from posthog.hogql.parser import parse_program, parse_string_template +from posthog.hogql.visitor import TraversingVisitor from posthog.models.hog_functions.hog_function import TYPES_WITH_JAVASCRIPT_SOURCE +from posthog.hogql import ast logger = logging.getLogger(__name__) -def generate_template_bytecode(obj: Any) -> Any: +class InputCollector(TraversingVisitor): + inputs: set[str] + + def __init__(self): + super().__init__() + self.inputs = set() + + def visit_field(self, node: ast.Field): + super().visit_field(node) + if node.chain[0] == "inputs": + if len(node.chain) > 1: + self.inputs.add(str(node.chain[1])) + + +def collect_inputs(node: ast.Expr) -> set[str]: + input_collector = InputCollector() + input_collector.visit(node) + return input_collector.inputs + + +def generate_template_bytecode(obj: Any, input_collector: set[str]) -> Any: """ Clones an object, compiling any string values to bytecode templates """ if isinstance(obj, dict): - return {key: generate_template_bytecode(value) for key, value in obj.items()} + return {key: generate_template_bytecode(value, input_collector) for key, value in obj.items()} elif isinstance(obj, list): - return [generate_template_bytecode(item) for item in obj] + return [generate_template_bytecode(item, input_collector) for item in obj] elif isinstance(obj, str): - return create_bytecode(parse_string_template(obj)).bytecode + node = parse_string_template(obj) + input_collector.update(collect_inputs(node)) + return create_bytecode(node).bytecode else: return obj @@ -81,6 +105,9 @@ def to_representation(self, value): class InputsItemSerializer(serializers.Serializer): value = AnyInputField(required=False) bytecode = serializers.ListField(required=False, read_only=True) + # input_deps = serializers.ListField(required=False) + order = serializers.IntegerField(required=False) + transpiled = serializers.JSONField(required=False) def validate(self, attrs): schema = self.context["schema"] @@ -112,9 +139,9 @@ def validate(self, attrs): elif item_type == "email": if not isinstance(value, dict): raise serializers.ValidationError({"inputs": {name: f"Value must be an Integration ID."}}) - for key in ["from", "to", "subject"]: - if not value.get(key): - raise serializers.ValidationError({"inputs": {name: f"Missing value for '{key}'."}}) + for key_ in ["from", "to", "subject"]: + if not value.get(key_): + raise serializers.ValidationError({"inputs": {name: f"Missing value for '{key_}'."}}) if not value.get("text") and not value.get("html"): raise serializers.ValidationError({"inputs": {name: f"Either 'text' or 'html' is required."}}) @@ -133,7 +160,9 @@ def validate(self, attrs): if "bytecode" in attrs: del attrs["bytecode"] else: - attrs["bytecode"] = generate_template_bytecode(value) + input_collector: set[str] = set() + attrs["bytecode"] = generate_template_bytecode(value, input_collector) + attrs["input_deps"] = list(input_collector) if "transpiled" in attrs: del attrs["transpiled"] except Exception as e: @@ -154,6 +183,41 @@ def validate_inputs_schema(value: list) -> list: return serializer.validated_data or [] +def topological_sort(nodes: list[str], edges: dict[str, list[str]]) -> list[str]: + """ + Perform a topological sort on the given graph. + nodes: list of all node identifiers + edges: adjacency list where edges[node] = list of nodes that `node` depends on + Returns: A list of nodes in topologically sorted order (no cycles). + Raises an error if a cycle is detected. + """ + # Build in-degree + in_degree = {node: 0 for node in nodes} + for node, deps in edges.items(): + for dep in deps: + if dep in in_degree: + in_degree[node] = in_degree[node] + 1 + + # Find all nodes with in_degree 0 + queue = [n for n, d in in_degree.items() if d == 0] + sorted_list = [] + + while queue: + current = queue.pop(0) + sorted_list.append(current) + # Decrease in-degree of dependent nodes + for node, deps in edges.items(): + if current in deps: + in_degree[node] -= 1 + if in_degree[node] == 0: + queue.append(node) + + if len(sorted_list) != len(nodes): + raise serializers.ValidationError("Circular dependency detected in input_deps.") + + return sorted_list + + def validate_inputs( inputs_schema: list, inputs: dict, @@ -162,10 +226,13 @@ def validate_inputs( ) -> dict: """ Tricky: We want to allow overriding the secret inputs, but not return them. - If we have a given input then we use it, otherwise we pull it from the existing secrets + If we have a given input then we use it, otherwise we pull it from the existing secrets. + Then we do topological sorting based on input_deps to assign order. """ + validated_inputs = {} + # Validate each input against the schema for schema in inputs_schema: value = inputs.get(schema["key"]) or {} @@ -180,9 +247,35 @@ def validate_inputs( if not serializer.is_valid(): raise serializers.ValidationError(serializer.errors) - validated_inputs[schema["key"]] = serializer.validated_data + validated_data = serializer.validated_data + + # If it's a secret input, not required, and no value was provided, don't add it + if schema.get("secret", False) and not schema.get("required", False) and "value" not in validated_data: + # Skip adding this input entirely + continue + + validated_inputs[schema["key"]] = validated_data + + # We'll topologically sort keys based on their input_deps. + edges = {} + all_keys = list(validated_inputs.keys()) + for k, v in validated_inputs.items(): + deps = v.get("input_deps", []) + deps = [d for d in deps if d in validated_inputs] + edges[k] = deps + + sorted_keys = topological_sort(all_keys, edges) + + # Assign order according to topological sort + for i, key in enumerate(sorted_keys): + validated_inputs[key]["order"] = i + if "input_deps" in validated_inputs[key]: + del validated_inputs[key]["input_deps"] + + # Rebuild in sorted order + sorted_validated_inputs = {key: validated_inputs[key] for key in sorted_keys} - return validated_inputs + return sorted_validated_inputs def compile_hog(hog: str, hog_type: str, in_repl: Optional[bool] = False) -> list[Any]: diff --git a/posthog/conftest.py b/posthog/conftest.py index c27dbec43955e..e9804d25eff42 100644 --- a/posthog/conftest.py +++ b/posthog/conftest.py @@ -14,6 +14,7 @@ def create_clickhouse_tables(num_tables: int): CREATE_DATA_QUERIES, CREATE_DICTIONARY_QUERIES, CREATE_DISTRIBUTED_TABLE_QUERIES, + CREATE_KAFKA_TABLE_QUERIES, CREATE_MERGETREE_TABLE_QUERIES, CREATE_MV_TABLE_QUERIES, CREATE_VIEW_QUERIES, @@ -28,10 +29,18 @@ def create_clickhouse_tables(num_tables: int): + len(CREATE_DICTIONARY_QUERIES) ) + # Evaluation tests use Kafka for faster data ingestion. + if settings.IN_EVAL_TESTING: + total_tables += len(CREATE_KAFKA_TABLE_QUERIES) + # Check if all the tables have already been created. Views, materialized views, and dictionaries also count if num_tables == total_tables: return + if settings.IN_EVAL_TESTING: + kafka_table_queries = list(map(build_query, CREATE_KAFKA_TABLE_QUERIES)) + run_clickhouse_statement_in_parallel(kafka_table_queries) + table_queries = list(map(build_query, CREATE_MERGETREE_TABLE_QUERIES + CREATE_DISTRIBUTED_TABLE_QUERIES)) run_clickhouse_statement_in_parallel(table_queries) @@ -62,7 +71,7 @@ def reset_clickhouse_tables(): from posthog.models.channel_type.sql import TRUNCATE_CHANNEL_DEFINITION_TABLE_SQL from posthog.models.cohort.sql import TRUNCATE_COHORTPEOPLE_TABLE_SQL from posthog.models.error_tracking.sql import TRUNCATE_ERROR_TRACKING_ISSUE_FINGERPRINT_OVERRIDES_TABLE_SQL - from posthog.models.event.sql import TRUNCATE_EVENTS_TABLE_SQL, TRUNCATE_EVENTS_RECENT_TABLE_SQL + from posthog.models.event.sql import TRUNCATE_EVENTS_RECENT_TABLE_SQL, TRUNCATE_EVENTS_TABLE_SQL from posthog.models.group.sql import TRUNCATE_GROUPS_TABLE_SQL from posthog.models.performance.sql import TRUNCATE_PERFORMANCE_EVENTS_TABLE_SQL from posthog.models.person.sql import ( @@ -100,6 +109,18 @@ def reset_clickhouse_tables(): TRUNCATE_HEATMAPS_TABLE_SQL(), ] + # Drop created Kafka tables because some tests don't expect it. + if settings.IN_EVAL_TESTING: + kafka_tables = sync_execute( + f""" + SELECT name + FROM system.tables + WHERE database = '{settings.CLICKHOUSE_DATABASE}' AND name LIKE 'kafka_%' + """, + ) + # Using `ON CLUSTER` takes x20 more time to drop the tables: https://github.com/ClickHouse/ClickHouse/issues/15473. + TABLES_TO_CREATE_DROP += [f"DROP TABLE {table[0]}" for table in kafka_tables] + run_clickhouse_statement_in_parallel(TABLES_TO_CREATE_DROP) from posthog.clickhouse.schema import ( diff --git a/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py b/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py index d9d874d7b6e6b..363ee06bc78be 100644 --- a/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py +++ b/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py @@ -7,6 +7,11 @@ calculate_credible_intervals, calculate_probabilities, ) +from posthog.hogql_queries.experiments.funnels_statistics_v2 import ( + are_results_significant_v2, + calculate_credible_intervals_v2, + calculate_probabilities_v2, +) from posthog.hogql_queries.query_runner import QueryRunner from posthog.models.experiment import Experiment from ..insights.funnels.funnels_query_runner import FunnelsQueryRunner @@ -45,6 +50,8 @@ def __init__(self, *args, **kwargs): if self.experiment.holdout: self.variants.append(f"holdout-{self.experiment.holdout.id}") + self.stats_version = self.query.stats_version or 1 + self.prepared_funnels_query = self._prepare_funnel_query() self.funnels_query_runner = FunnelsQueryRunner( query=self.prepared_funnels_query, team=self.team, timings=self.timings, limit_context=self.limit_context @@ -63,9 +70,14 @@ def calculate(self) -> ExperimentFunnelsQueryResponse: # Statistical analysis control_variant, test_variants = self._get_variants_with_base_stats(funnels_result) - probabilities = calculate_probabilities(control_variant, test_variants) - significance_code, loss = are_results_significant(control_variant, test_variants, probabilities) - credible_intervals = calculate_credible_intervals([control_variant, *test_variants]) + if self.stats_version == 2: + probabilities = calculate_probabilities_v2(control_variant, test_variants) + significance_code, loss = are_results_significant_v2(control_variant, test_variants, probabilities) + credible_intervals = calculate_credible_intervals_v2([control_variant, *test_variants]) + else: + probabilities = calculate_probabilities(control_variant, test_variants) + significance_code, loss = are_results_significant(control_variant, test_variants, probabilities) + credible_intervals = calculate_credible_intervals([control_variant, *test_variants]) except Exception as e: raise ValueError(f"Error calculating experiment funnel results: {str(e)}") from e @@ -80,6 +92,7 @@ def calculate(self) -> ExperimentFunnelsQueryResponse: }, significant=significance_code == ExperimentSignificanceCode.SIGNIFICANT, significance_code=significance_code, + stats_version=self.stats_version, expected_loss=loss, credible_intervals=credible_intervals, ) diff --git a/posthog/hogql_queries/experiments/funnels_statistics_v2.py b/posthog/hogql_queries/experiments/funnels_statistics_v2.py new file mode 100644 index 0000000000000..02f18d2f70740 --- /dev/null +++ b/posthog/hogql_queries/experiments/funnels_statistics_v2.py @@ -0,0 +1,216 @@ +import numpy as np +from scipy import stats +from posthog.schema import ExperimentVariantFunnelsBaseStats, ExperimentSignificanceCode +from posthog.hogql_queries.experiments import ( + FF_DISTRIBUTION_THRESHOLD, + MIN_PROBABILITY_FOR_SIGNIFICANCE, + EXPECTED_LOSS_SIGNIFICANCE_LEVEL, +) +from scipy.stats import betabinom + +ALPHA_PRIOR = 1 +BETA_PRIOR = 1 +SAMPLE_SIZE = 10000 + + +def calculate_probabilities_v2( + control: ExperimentVariantFunnelsBaseStats, variants: list[ExperimentVariantFunnelsBaseStats] +) -> list[float]: + """ + Calculate the win probabilities for each variant in an experiment using Bayesian analysis + for funnel conversion rates. + + This function computes the probability that each variant is the best (i.e., has the highest + conversion rate) compared to all other variants, including the control. It uses samples + drawn from the posterior Beta distributions of each variant's conversion rate. + + Parameters: + ----------- + control : ExperimentVariantFunnelsBaseStats + Statistics for the control group, including success and failure counts + variants : list[ExperimentVariantFunnelsBaseStats] + List of statistics for test variants to compare against the control + + Returns: + -------- + list[float] + A list of probabilities where: + - The first element is the probability that the control variant is the best + - Subsequent elements are the probabilities that each test variant is the best + + Notes: + ------ + - Uses a Bayesian approach with Beta distributions as the posterior + - Uses Beta(1,1) as the prior, which is uniform over [0,1] + - Draws 10,000 samples from each variant's posterior distribution + """ + all_variants = [control, *variants] + + # Use Beta distribution for conversion rates + samples: list[np.ndarray] = [] + for variant in all_variants: + # Add prior to both successes and failures for Bayesian prior + alpha = ALPHA_PRIOR + variant.success_count + beta = BETA_PRIOR + variant.failure_count + # Generate samples from Beta distribution + variant_samples = np.random.beta(alpha, beta, SAMPLE_SIZE) + samples.append(variant_samples) + + samples_array = np.array(samples) + # Calculate probability of each variant being the best + probabilities = [] + for i in range(len(all_variants)): + probability = (samples_array[i] == np.max(samples_array, axis=0)).mean() + probabilities.append(float(probability)) + + return probabilities + + +def calculate_expected_loss_v2( + target_variant: ExperimentVariantFunnelsBaseStats, variants: list[ExperimentVariantFunnelsBaseStats] +) -> float: + """ + Calculates expected loss in conversion rate using Beta-Binomial conjugate prior. + + This implementation uses a Bayesian approach with Beta-Binomial model + to estimate the expected loss when choosing the target variant over others. + + Parameters: + ----------- + target_variant : ExperimentVariantFunnelsBaseStats + The variant being evaluated for loss + variants : list[ExperimentVariantFunnelsBaseStats] + List of other variants to compare against + + Returns: + -------- + float + Expected loss in conversion rate if choosing the target variant + """ + # Calculate posterior parameters for target variant + target_alpha = int(ALPHA_PRIOR + target_variant.success_count) + target_beta = int(BETA_PRIOR + target_variant.failure_count) + target_n = int(target_variant.success_count + target_variant.failure_count) + + # Get samples from target variant's Beta-Binomial + target_samples = betabinom.rvs(target_n, target_alpha, target_beta, size=SAMPLE_SIZE) / target_n + + # Get samples from each comparison variant + variant_samples = [] + for variant in variants: + n = int(variant.success_count + variant.failure_count) + alpha = int(ALPHA_PRIOR + variant.success_count) + beta = int(BETA_PRIOR + variant.failure_count) + samples = betabinom.rvs(n, alpha, beta, size=SAMPLE_SIZE) / n + variant_samples.append(samples) + + # Calculate loss + variant_max = np.maximum.reduce(variant_samples) + losses = np.maximum(0, variant_max - target_samples) + expected_loss = float(np.mean(losses)) + + return expected_loss + + +def are_results_significant_v2( + control: ExperimentVariantFunnelsBaseStats, + variants: list[ExperimentVariantFunnelsBaseStats], + probabilities: list[float], +) -> tuple[ExperimentSignificanceCode, float]: + """ + Determine if the experiment results are statistically significant using Bayesian analysis + for funnel conversion rates. + + This function evaluates whether there is strong evidence that any variant is better + than the others by considering both winning probabilities and expected loss. It checks + if the sample size is sufficient and evaluates the risk of choosing the winning variant. + + Parameters: + ----------- + control : ExperimentVariantFunnelsBaseStats + Statistics for the control group, including success and failure counts + variants : list[ExperimentVariantFunnelsBaseStats] + List of statistics for test variants to compare against the control + probabilities : list[float] + List of probabilities from calculate_probabilities_v2 + + Returns: + -------- + tuple[ExperimentSignificanceCode, float] + A tuple containing: + - Significance code indicating the result (significant, not enough exposure, high loss, etc.) + - Expected loss value for significant results, 1.0 for non-significant results + + Notes: + ------ + - Requires minimum exposure threshold per variant for reliable results + - Uses probability threshold from MIN_PROBABILITY_FOR_SIGNIFICANCE + - Calculates expected loss for the best-performing variant + - Returns HIGH_LOSS if expected loss exceeds significance threshold + - Returns NOT_ENOUGH_EXPOSURE if sample size requirements not met + """ + # Check minimum exposure + if control.success_count + control.failure_count < FF_DISTRIBUTION_THRESHOLD or any( + v.success_count + v.failure_count < FF_DISTRIBUTION_THRESHOLD for v in variants + ): + return ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE, 1.0 + + # Check if any variant has high enough probability + max_probability = max(probabilities) + if max_probability >= MIN_PROBABILITY_FOR_SIGNIFICANCE: + # Find best performing variant + all_variants = [control, *variants] + conversion_rates = [v.success_count / (v.success_count + v.failure_count) for v in all_variants] + best_idx = np.argmax(conversion_rates) + best_variant = all_variants[best_idx] + other_variants = all_variants[:best_idx] + all_variants[best_idx + 1 :] + expected_loss = calculate_expected_loss_v2(best_variant, other_variants) + + if expected_loss >= EXPECTED_LOSS_SIGNIFICANCE_LEVEL: + return ExperimentSignificanceCode.HIGH_LOSS, expected_loss + + return ExperimentSignificanceCode.SIGNIFICANT, expected_loss + + return ExperimentSignificanceCode.LOW_WIN_PROBABILITY, 1.0 + + +def calculate_credible_intervals_v2(variants: list[ExperimentVariantFunnelsBaseStats]) -> dict[str, list[float]]: + """ + Calculate Bayesian credible intervals for conversion rates of each variant. + + This function computes the 95% credible intervals for the true conversion rate + of each variant, representing the range where we believe the true rate lies + with 95% probability. + + Parameters: + ----------- + variants : list[ExperimentVariantFunnelsBaseStats] + List of all variants including control, containing success and failure counts + + Returns: + -------- + dict[str, list[float]] + Dictionary mapping variant keys to [lower, upper] credible intervals, where: + - lower is the 2.5th percentile of the posterior distribution + - upper is the 97.5th percentile of the posterior distribution + + Notes: + ------ + - Uses Beta distribution as the posterior + - Uses Beta(1,1) as the prior, which is uniform over [0,1] + - Returns 95% credible intervals + - Intervals become narrower with larger sample sizes + """ + intervals = {} + + for variant in variants: + # Add 1 to both successes and failures for Bayesian prior + alpha = ALPHA_PRIOR + variant.success_count + beta = BETA_PRIOR + variant.failure_count + + # Calculate 95% credible interval + lower, upper = stats.beta.ppf([0.025, 0.975], alpha, beta) + + intervals[variant.key] = [float(lower), float(upper)] + + return intervals diff --git a/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py b/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py index 3f3127fcb1b55..3dc11499b1393 100644 --- a/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py +++ b/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py @@ -113,6 +113,8 @@ def test_query_runner(self): ) result = query_runner.calculate() + self.assertEqual(result.stats_version, 1) + self.assertEqual(len(result.variants), 2) control_variant = next(variant for variant in result.variants if variant.key == "control") @@ -123,11 +125,88 @@ def test_query_runner(self): self.assertEqual(test_variant.success_count, 8) self.assertEqual(test_variant.failure_count, 2) - self.assertIn("control", result.probability) - self.assertIn("test", result.probability) + self.assertAlmostEqual(result.probability["control"], 0.2, delta=0.1) + self.assertAlmostEqual(result.probability["test"], 0.8, delta=0.1) - self.assertIn("control", result.credible_intervals) - self.assertIn("test", result.credible_intervals) + self.assertEqual(result.significant, False) + self.assertEqual(result.significance_code, ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE) + self.assertEqual(result.expected_loss, 1.0) + + self.assertAlmostEqual(result.credible_intervals["control"][0], 0.3, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["control"][1], 0.8, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["test"][0], 0.5, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["test"][1], 0.9, delta=0.1) + + @freeze_time("2020-01-01T12:00:00Z") + def test_query_runner_v2(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + feature_flag_property = f"$feature/{feature_flag.key}" + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + funnels_query=funnels_query, + stats_version=2, + ) + + experiment.metrics = [{"type": "primary", "query": experiment_query.model_dump()}] + experiment.save() + + for variant, purchase_count in [("control", 6), ("test", 8)]: + for i in range(10): + _create_person(distinct_ids=[f"user_{variant}_{i}"], team_id=self.team.pk) + _create_event( + team=self.team, + event="$pageview", + distinct_id=f"user_{variant}_{i}", + timestamp="2020-01-02T12:00:00Z", + properties={feature_flag_property: variant}, + ) + if i < purchase_count: + _create_event( + team=self.team, + event="purchase", + distinct_id=f"user_{variant}_{i}", + timestamp="2020-01-02T12:01:00Z", + properties={feature_flag_property: variant}, + ) + + flush_persons_and_events() + + query_runner = ExperimentFunnelsQueryRunner( + query=ExperimentFunnelsQuery(**experiment.metrics[0]["query"]), team=self.team + ) + result = query_runner.calculate() + + self.assertEqual(result.stats_version, 2) + + self.assertEqual(len(result.variants), 2) + + control_variant = next(variant for variant in result.variants if variant.key == "control") + test_variant = next(variant for variant in result.variants if variant.key == "test") + + self.assertEqual(control_variant.success_count, 6) + self.assertEqual(control_variant.failure_count, 4) + self.assertEqual(test_variant.success_count, 8) + self.assertEqual(test_variant.failure_count, 2) + + self.assertAlmostEqual(result.probability["control"], 0.2, delta=0.1) + self.assertAlmostEqual(result.probability["test"], 0.8, delta=0.1) + + self.assertEqual(result.significant, False) + self.assertEqual(result.significance_code, ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE) + self.assertEqual(result.expected_loss, 1.0) + + self.assertAlmostEqual(result.credible_intervals["control"][0], 0.3, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["control"][1], 0.8, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["test"][0], 0.5, delta=0.1) + self.assertAlmostEqual(result.credible_intervals["test"][1], 0.9, delta=0.1) @flaky(max_runs=10, min_passes=1) @freeze_time("2020-01-01T12:00:00Z") diff --git a/posthog/hogql_queries/experiments/test/test_funnels_statistics.py b/posthog/hogql_queries/experiments/test/test_funnels_statistics.py new file mode 100644 index 0000000000000..2206ff92b9305 --- /dev/null +++ b/posthog/hogql_queries/experiments/test/test_funnels_statistics.py @@ -0,0 +1,243 @@ +from posthog.hogql_queries.experiments import MIN_PROBABILITY_FOR_SIGNIFICANCE +from posthog.schema import ExperimentVariantFunnelsBaseStats, ExperimentSignificanceCode +from posthog.hogql_queries.experiments.funnels_statistics_v2 import ( + calculate_probabilities_v2, + are_results_significant_v2, + calculate_credible_intervals_v2, +) +from posthog.hogql_queries.experiments.funnels_statistics import ( + calculate_probabilities, + are_results_significant, + calculate_credible_intervals, +) +from posthog.test.base import APIBaseTest + + +def create_variant( + key: str, + success_count: int, + failure_count: int, +) -> ExperimentVariantFunnelsBaseStats: + return ExperimentVariantFunnelsBaseStats( + key=key, + success_count=success_count, + failure_count=failure_count, + ) + + +class TestExperimentFunnelStatistics(APIBaseTest): + def run_test_for_both_implementations(self, test_fn): + """Run the same test for both implementations""" + self.stats_version = 1 + # Run for original implementation + test_fn( + stats_version=1, + calculate_probabilities=calculate_probabilities, + are_results_significant=are_results_significant, + calculate_credible_intervals=calculate_credible_intervals, + ) + self.stats_version = 2 + # Run for v2 implementation + test_fn( + stats_version=2, + calculate_probabilities=calculate_probabilities_v2, + are_results_significant=are_results_significant_v2, + calculate_credible_intervals=calculate_credible_intervals_v2, + ) + + def test_small_sample_two_variants_not_significant(self): + """Test with small sample size, two variants, no clear winner""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=10, failure_count=90) + test = create_variant("test", success_count=15, failure_count=85) + + probabilities = calculate_probabilities(control, [test]) + significance, p_value = are_results_significant(control, [test], probabilities) + intervals = calculate_credible_intervals([control, test]) + + self.assertEqual(len(probabilities), 2) + if stats_version == 2: + self.assertAlmostEqual(probabilities[0], 0.15, delta=0.1) + self.assertAlmostEqual(probabilities[1], 0.85, delta=0.1) + self.assertEqual(significance, ExperimentSignificanceCode.LOW_WIN_PROBABILITY) + self.assertEqual(p_value, 1) + + # Check credible intervals + self.assertAlmostEqual(intervals["control"][0], 0.05, delta=0.05) + self.assertAlmostEqual(intervals["control"][1], 0.20, delta=0.05) + self.assertAlmostEqual(intervals["test"][0], 0.08, delta=0.05) + self.assertAlmostEqual(intervals["test"][1], 0.25, delta=0.05) + else: + # Original implementation behavior + self.assertTrue(0.1 < probabilities[0] < 0.5) + self.assertTrue(0.5 < probabilities[1] < 0.9) + self.assertEqual(significance, ExperimentSignificanceCode.LOW_WIN_PROBABILITY) + self.assertEqual(p_value, 1) + + # Original implementation intervals + self.assertAlmostEqual(intervals["control"][0], 0.05, delta=0.05) + self.assertAlmostEqual(intervals["control"][1], 0.20, delta=0.05) + self.assertAlmostEqual(intervals["test"][0], 0.08, delta=0.05) + self.assertAlmostEqual(intervals["test"][1], 0.25, delta=0.05) + + self.run_test_for_both_implementations(run_test) + + def test_large_sample_two_variants_significant(self): + """Test with large sample size, two variants, clear winner""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=1000, failure_count=9000) + test = create_variant("test", success_count=1500, failure_count=8500) + + probabilities = calculate_probabilities(control, [test]) + significance, p_value = are_results_significant(control, [test], probabilities) + intervals = calculate_credible_intervals([control, test]) + + self.assertEqual(len(probabilities), 2) + if stats_version == 2: + self.assertAlmostEqual(probabilities[1], 1.0, delta=0.05) + self.assertAlmostEqual(probabilities[0], 0.0, delta=0.05) + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + self.assertEqual(p_value, 0) + + # Check credible intervals + self.assertAlmostEqual(intervals["control"][0], 0.095, delta=0.01) + self.assertAlmostEqual(intervals["control"][1], 0.105, delta=0.01) + self.assertAlmostEqual(intervals["test"][0], 0.145, delta=0.01) + self.assertAlmostEqual(intervals["test"][1], 0.155, delta=0.01) + else: + # Original implementation behavior + self.assertTrue(probabilities[1] > 0.5) # Test variant winning + self.assertTrue(probabilities[0] < 0.5) # Control variant losing + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + self.assertLess(p_value, 0.05) + + # Original implementation intervals + self.assertAlmostEqual(intervals["control"][0], 0.095, delta=0.01) + self.assertAlmostEqual(intervals["control"][1], 0.105, delta=0.01) + self.assertAlmostEqual(intervals["test"][0], 0.145, delta=0.01) + self.assertAlmostEqual(intervals["test"][1], 0.155, delta=0.01) + + self.run_test_for_both_implementations(run_test) + + def test_many_variants_not_significant(self): + """Test with multiple variants, no clear winner""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=100, failure_count=900) + test_a = create_variant("test_a", success_count=98, failure_count=902) + test_b = create_variant("test_b", success_count=102, failure_count=898) + test_c = create_variant("test_c", success_count=101, failure_count=899) + + probabilities = calculate_probabilities(control, [test_a, test_b, test_c]) + significance, p_value = are_results_significant(control, [test_a, test_b, test_c], probabilities) + intervals = calculate_credible_intervals([control, test_a, test_b, test_c]) + + self.assertEqual(len(probabilities), 4) + if stats_version == 2: + self.assertTrue(all(p < MIN_PROBABILITY_FOR_SIGNIFICANCE for p in probabilities)) + self.assertEqual(significance, ExperimentSignificanceCode.LOW_WIN_PROBABILITY) + self.assertEqual(p_value, 1) + + # Check credible intervals overlap + # Check credible intervals for control and all test variants + self.assertAlmostEqual(intervals["control"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["control"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_a"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_a"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_b"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_b"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_c"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_c"][1], 0.12, delta=0.02) + else: + # Original implementation behavior + self.assertTrue(all(0.1 < p < 0.9 for p in probabilities)) + self.assertEqual(significance, ExperimentSignificanceCode.LOW_WIN_PROBABILITY) + self.assertEqual(p_value, 1) + + # Check credible intervals overlap + # Check credible intervals for control and all test variants + self.assertAlmostEqual(intervals["control"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["control"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_a"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_a"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_b"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_b"][1], 0.12, delta=0.02) + self.assertAlmostEqual(intervals["test_c"][0], 0.09, delta=0.02) + self.assertAlmostEqual(intervals["test_c"][1], 0.12, delta=0.02) + + self.run_test_for_both_implementations(run_test) + + def test_insufficient_sample_size(self): + """Test with sample size below threshold""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=5, failure_count=45) + test = create_variant("test", success_count=8, failure_count=42) + + probabilities = calculate_probabilities(control, [test]) + significance, p_value = are_results_significant(control, [test], probabilities) + intervals = calculate_credible_intervals([control, test]) + + self.assertEqual(len(probabilities), 2) + if stats_version == 2: + self.assertEqual(significance, ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE) + self.assertEqual(p_value, 1.0) + + # Check wide credible intervals due to small sample + self.assertTrue(intervals["control"][1] - intervals["control"][0] > 0.15) + self.assertTrue(intervals["test"][1] - intervals["test"][0] > 0.15) + else: + # Original implementation behavior + self.assertEqual(significance, ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE) + self.assertEqual(p_value, 1.0) + + # Check wide credible intervals + self.assertTrue(intervals["control"][1] - intervals["control"][0] > 0.15) + self.assertTrue(intervals["test"][1] - intervals["test"][0] > 0.15) + + self.run_test_for_both_implementations(run_test) + + def test_expected_loss_minimal_difference(self): + """Test expected loss when variants have very similar performance""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=1000, failure_count=9000) # 11% conversion + test = create_variant("test", success_count=1050, failure_count=8800) # 11.9% conversion + + probabilities = calculate_probabilities(control, [test]) + significance, expected_loss = are_results_significant(control, [test], probabilities) + + if stats_version == 2: + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + # Expected loss should still be relatively small + self.assertLess(expected_loss, 0.03) # Less than 3% expected loss + self.assertGreater(expected_loss, 0) + else: + # Original implementation behavior + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + self.assertLess(expected_loss, 0.03) # Less than 3% expected loss + self.assertGreater(expected_loss, 0) + + self.run_test_for_both_implementations(run_test) + + def test_expected_loss_test_variant_clear_winner(self): + """Test expected loss when one variant is clearly better""" + + def run_test(stats_version, calculate_probabilities, are_results_significant, calculate_credible_intervals): + control = create_variant("control", success_count=1000, failure_count=9000) # 11% conversion + test = create_variant("test", success_count=2000, failure_count=8000) # 20% conversion + + probabilities = calculate_probabilities(control, [test]) + significance, expected_loss = are_results_significant(control, [test], probabilities) + + if stats_version == 2: + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + self.assertEqual(expected_loss, 0.0) + else: + # Original implementation behavior + self.assertEqual(significance, ExperimentSignificanceCode.SIGNIFICANT) + self.assertEqual(expected_loss, 0.0) + + self.run_test_for_both_implementations(run_test) diff --git a/posthog/management/commands/generate_experiment_data.py b/posthog/management/commands/generate_experiment_data.py new file mode 100644 index 0000000000000..5116d786be76e --- /dev/null +++ b/posthog/management/commands/generate_experiment_data.py @@ -0,0 +1,195 @@ +from datetime import datetime, timedelta +import logging +import random +import time +import uuid +import json + +from django.conf import settings +from django.core.management.base import BaseCommand +import posthoganalytics +from pydantic import BaseModel, ValidationError + + +class ActionConfig(BaseModel): + event: str + probability: float + count: int = 1 + required_for_next: bool = False + + def model_post_init(self, __context) -> None: + if self.required_for_next and self.count > 1: + raise ValueError("'required_for_next' cannot be used with 'count' greater than 1") + + +class VariantConfig(BaseModel): + weight: float + actions: list[ActionConfig] + + +class ExperimentConfig(BaseModel): + number_of_users: int + start_timestamp: datetime + end_timestamp: datetime + variants: dict[str, VariantConfig] + + +def get_default_funnel_experiment_config() -> ExperimentConfig: + return ExperimentConfig( + number_of_users=2000, + start_timestamp=datetime.now() - timedelta(days=7), + end_timestamp=datetime.now(), + variants={ + "control": VariantConfig( + weight=0.5, + actions=[ + ActionConfig(event="signup started", probability=1, required_for_next=True), + ActionConfig(event="signup completed", probability=0.25, required_for_next=True), + ], + ), + "test": VariantConfig( + weight=0.5, + actions=[ + ActionConfig(event="signup started", probability=1, required_for_next=True), + ActionConfig(event="signup completed", probability=0.35, required_for_next=True), + ], + ), + }, + ) + + +def get_default_trend_experiment_config() -> ExperimentConfig: + return ExperimentConfig( + number_of_users=2000, + start_timestamp=datetime.now() - timedelta(days=7), + end_timestamp=datetime.now(), + variants={ + "control": VariantConfig( + weight=0.5, + actions=[ActionConfig(event="$pageview", count=5, probability=0.25)], + ), + "test": VariantConfig( + weight=0.5, + actions=[ActionConfig(event="$pageview", count=5, probability=0.35)], + ), + }, + ) + + +def get_default_config(type) -> ExperimentConfig: + match type: + case "funnel": + return get_default_funnel_experiment_config() + case "trend": + return get_default_trend_experiment_config() + case _: + raise ValueError(f"Invalid experiment type: {type}") + + +class Command(BaseCommand): + help = "Generate experiment test data" + + def add_arguments(self, parser): + parser.add_argument( + "--type", + type=str, + choices=["trend", "funnel"], + default="trend", + help="Type of experiment data to generate or configuration to initialize.", + ) + + parser.add_argument( + "--init-config", + type=str, + help="Initialize a new experiment configuration file at the specified path. Does not generate data.", + ) + parser.add_argument("--experiment-id", type=str, help="Experiment ID (feature flag name)") + parser.add_argument("--config", type=str, help="Path to experiment config file") + + def handle(self, *args, **options): + # Make sure this runs in development environment only + if not settings.DEBUG: + raise ValueError("This command should only be run in development! DEBUG must be True.") + + experiment_type = options.get("type") + + if init_config_path := options.get("init_config"): + with open(init_config_path, "w") as f: + f.write(get_default_config(experiment_type).model_dump_json(indent=2)) + logging.info(f"Created example {experiment_type} configuration file at: {init_config_path}") + return + + experiment_id = options.get("experiment_id") + config_path = options.get("config") + + # Validate required arguments + if not experiment_id: + raise ValueError("--experiment-id is missing!") + + if config_path is None and experiment_type is None: + raise ValueError("--config or --type trends|funnel is missing!") + + if config_path: + with open(config_path) as config_file: + config_data = json.load(config_file) + + try: + # Use the ExperimentConfig model to parse and validate the JSON data + experiment_config = ExperimentConfig(**config_data) + except ValidationError as e: + raise ValueError(f"Invalid configuration: {e}") + else: + experiment_config = get_default_config(experiment_type) + + variants = list(experiment_config.variants.keys()) + variant_counts = {variant: 0 for variant in variants} + + for _ in range(experiment_config.number_of_users): + variant = random.choices( + variants, + weights=[v.weight for v in experiment_config.variants.values()], + )[0] + variant_counts[variant] += 1 + distinct_id = str(uuid.uuid4()) + random_timestamp = datetime.fromtimestamp( + random.uniform( + experiment_config.start_timestamp.timestamp(), + experiment_config.end_timestamp.timestamp() - 3600, + ) + ) + + posthoganalytics.capture( + distinct_id=distinct_id, + event="$feature_flag_called", + timestamp=random_timestamp, + properties={ + "$feature_flag_response": variant, + "$feature_flag": experiment_id, + }, + ) + + should_stop = False + for action in experiment_config.variants[variant].actions: + for _ in range(action.count): + if random.random() < action.probability: + posthoganalytics.capture( + distinct_id=distinct_id, + event=action.event, + timestamp=random_timestamp + timedelta(minutes=1), + properties={ + f"$feature/{experiment_id}": variant, + }, + ) + else: + if action.required_for_next: + should_stop = True + break + if should_stop: + break + + # TODO: need to figure out how to wait for the data to be flushed. shutdown() doesn't work as expected. + time.sleep(2) + posthoganalytics.shutdown() + + logging.info(f"Generated data for {experiment_id}") + logging.info(f"Variant counts: {variant_counts}") diff --git a/posthog/migrations/0534_team_cookieless_server_hash_mode.py b/posthog/migrations/0534_team_cookieless_server_hash_mode.py new file mode 100644 index 0000000000000..9a35fb224f5d8 --- /dev/null +++ b/posthog/migrations/0534_team_cookieless_server_hash_mode.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.15 on 2024-12-19 13:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("posthog", "0533_externaldatajob_pipeline_version"), + ] + + operations = [ + migrations.AddField( + model_name="team", + name="cookieless_server_hash_mode", + field=models.SmallIntegerField( + choices=[(0, "Disabled"), (1, "Stateless"), (2, "Stateful")], default=0, null=True + ), + ), + ] diff --git a/posthog/migrations/max_migration.txt b/posthog/migrations/max_migration.txt index 44547aebb012e..43f1f6ffa127f 100644 --- a/posthog/migrations/max_migration.txt +++ b/posthog/migrations/max_migration.txt @@ -1 +1 @@ -0533_externaldatajob_pipeline_version +0534_team_cookieless_server_hash_mode diff --git a/posthog/models/filters/test/__snapshots__/test_filter.ambr b/posthog/models/filters/test/__snapshots__/test_filter.ambr index 773730e93858c..12f297b9ca5ac 100644 --- a/posthog/models/filters/test/__snapshots__/test_filter.ambr +++ b/posthog/models/filters/test/__snapshots__/test_filter.ambr @@ -50,6 +50,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -120,6 +121,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -190,6 +192,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -260,6 +263,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -330,6 +334,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/models/remote_config.py b/posthog/models/remote_config.py index aa4ed7c59d379..427b3df7ec2de 100644 --- a/posthog/models/remote_config.py +++ b/posthog/models/remote_config.py @@ -261,7 +261,7 @@ def _build_site_apps_js(self): config = get_site_config_from_schema(site_app.config_schema, site_app.config) site_apps_js.append( indent_js( - f"\n{{\n id: '{site_app.token}',\n init: function(config) {{\n {indent_js(site_app.source, indent=4)}().inject({{ config:{json.dumps(config)}, posthog:config.posthog }});\n config.callback();\n }}\n}}" + f"\n{{\n id: '{site_app.token}',\n init: function(config) {{\n {indent_js(site_app.source, indent=4)}().inject({{ config:{json.dumps(config)}, posthog:config.posthog }});\n config.callback(); return {{}} }}\n}}" ) ) site_functions = ( diff --git a/posthog/models/team/team.py b/posthog/models/team/team.py index 40bea6d9e089c..1c2809900c156 100644 --- a/posthog/models/team/team.py +++ b/posthog/models/team/team.py @@ -177,6 +177,12 @@ def clickhouse_mode(self) -> str: return "3" if self == WeekStartDay.MONDAY else "0" +class CookielessServerHashMode(models.IntegerChoices): + DISABLED = 0, "Disabled" + STATELESS = 1, "Stateless" + STATEFUL = 2, "Stateful" + + class Team(UUIDClassicModel): """Team means "environment" (historically it meant "project", but now we have the Project model for that).""" @@ -276,6 +282,9 @@ class Meta: person_display_name_properties: ArrayField = ArrayField(models.CharField(max_length=400), null=True, blank=True) live_events_columns: ArrayField = ArrayField(models.TextField(), null=True, blank=True) recording_domains: ArrayField = ArrayField(models.CharField(max_length=200, null=True), blank=True, null=True) + cookieless_server_hash_mode = models.SmallIntegerField( + default=CookielessServerHashMode.DISABLED, choices=CookielessServerHashMode.choices, null=True + ) primary_dashboard = models.ForeignKey( "posthog.Dashboard", diff --git a/posthog/models/test/test_remote_config.py b/posthog/models/test/test_remote_config.py index 52bfc71821a79..ddcd23aca2a73 100644 --- a/posthog/models/test/test_remote_config.py +++ b/posthog/models/test/test_remote_config.py @@ -541,22 +541,19 @@ def test_renders_js_including_site_apps(self): id: 'tokentoken', init: function(config) { (function () { return { inject: (data) => console.log('injected!', data)}; })().inject({ config:{}, posthog:config.posthog }); - config.callback(); - } + config.callback(); return {} } }, { id: 'tokentoken', init: function(config) { (function () { return { inject: (data) => console.log('injected 2!', data)}; })().inject({ config:{}, posthog:config.posthog }); - config.callback(); - } + config.callback(); return {} } }, { id: 'tokentoken', init: function(config) { (function () { return { inject: (data) => console.log('injected but disabled!', data)}; })().inject({ config:{}, posthog:config.posthog }); - config.callback(); - } + config.callback(); return {} } }] } })();\ diff --git a/posthog/schema.py b/posthog/schema.py index 564dcc321fa60..a46596350f23f 100644 --- a/posthog/schema.py +++ b/posthog/schema.py @@ -6292,6 +6292,7 @@ class QueryResponseAlternative15(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool + stats_version: Optional[int] = None variants: list[ExperimentVariantFunnelsBaseStats] @@ -6324,6 +6325,7 @@ class QueryResponseAlternative26(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool + stats_version: Optional[int] = None variants: list[ExperimentVariantFunnelsBaseStats] @@ -6461,6 +6463,7 @@ class CachedExperimentFunnelsQueryResponse(BaseModel): ) significance_code: ExperimentSignificanceCode significant: bool + stats_version: Optional[int] = None timezone: str variants: list[ExperimentVariantFunnelsBaseStats] @@ -6477,6 +6480,7 @@ class Response9(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool + stats_version: Optional[int] = None variants: list[ExperimentVariantFunnelsBaseStats] @@ -6508,6 +6512,7 @@ class ExperimentFunnelsQueryResponse(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool + stats_version: Optional[int] = None variants: list[ExperimentVariantFunnelsBaseStats] @@ -6640,6 +6645,7 @@ class ExperimentFunnelsQuery(BaseModel): ) name: Optional[str] = None response: Optional[ExperimentFunnelsQueryResponse] = None + stats_version: Optional[int] = None class FunnelCorrelationQuery(BaseModel): diff --git a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr index c25bdb4d587b4..db61b8d5663ad 100644 --- a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr +++ b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr @@ -50,6 +50,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -120,6 +121,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -190,6 +192,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -260,6 +263,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -330,6 +334,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -508,6 +513,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -605,6 +611,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1013,6 +1020,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1163,6 +1171,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1233,6 +1242,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1303,6 +1313,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1373,6 +1384,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1443,6 +1455,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1513,6 +1526,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1615,6 +1629,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2170,6 +2185,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2310,6 +2326,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2926,6 +2943,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3066,6 +3084,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3436,6 +3455,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3669,6 +3689,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -3809,6 +3830,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4374,6 +4396,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4514,6 +4537,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -4908,6 +4932,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5138,6 +5163,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -5278,6 +5304,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6339,6 +6366,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -6479,6 +6507,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7052,6 +7081,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7192,6 +7222,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7743,6 +7774,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -7883,6 +7915,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8495,6 +8528,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -8635,6 +8669,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/settings/__init__.py b/posthog/settings/__init__.py index c6067fd19c1f7..3e7ebc0b7c984 100644 --- a/posthog/settings/__init__.py +++ b/posthog/settings/__init__.py @@ -108,6 +108,7 @@ PROM_PUSHGATEWAY_ADDRESS: str | None = os.getenv("PROM_PUSHGATEWAY_ADDRESS", None) IN_UNIT_TESTING: bool = get_from_env("IN_UNIT_TESTING", False, type_cast=str_to_bool) +IN_EVAL_TESTING: bool = get_from_env("DEEPEVAL", False, type_cast=str_to_bool) HOGQL_INCREASED_MAX_EXECUTION_TIME: int = get_from_env("HOGQL_INCREASED_MAX_EXECUTION_TIME", 600, type_cast=int) diff --git a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr index 059600098cd50..263314ec6d767 100644 --- a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr +++ b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr @@ -73,6 +73,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -192,6 +193,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -267,6 +269,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -467,6 +470,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -605,6 +609,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -753,6 +758,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -828,6 +834,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -991,6 +998,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1098,6 +1106,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1224,6 +1233,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1576,6 +1586,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/temporal/common/worker.py b/posthog/temporal/common/worker.py index 2c78aae3f4a8c..f5db3d6b0417d 100644 --- a/posthog/temporal/common/worker.py +++ b/posthog/temporal/common/worker.py @@ -6,6 +6,7 @@ from temporalio.runtime import PrometheusConfig, Runtime, TelemetryConfig from temporalio.worker import UnsandboxedWorkflowRunner, Worker +from posthog.constants import DATA_WAREHOUSE_TASK_QUEUE_V2 from posthog.temporal.common.client import connect from posthog.temporal.common.sentry import SentryInterceptor @@ -34,18 +35,35 @@ async def start_worker( client_key, runtime=runtime, ) - worker = Worker( - client, - task_queue=task_queue, - workflows=workflows, - activities=activities, - workflow_runner=UnsandboxedWorkflowRunner(), - graceful_shutdown_timeout=timedelta(minutes=5), - interceptors=[SentryInterceptor()], - activity_executor=ThreadPoolExecutor(max_workers=max_concurrent_activities or 50), - max_concurrent_activities=max_concurrent_activities or 50, - max_concurrent_workflow_tasks=max_concurrent_workflow_tasks, - ) + if task_queue == DATA_WAREHOUSE_TASK_QUEUE_V2: + worker = Worker( + client, + task_queue=task_queue, + workflows=workflows, + activities=activities, + workflow_runner=UnsandboxedWorkflowRunner(), + graceful_shutdown_timeout=timedelta(minutes=5), + interceptors=[SentryInterceptor()], + activity_executor=ThreadPoolExecutor(max_workers=max_concurrent_activities or 50), + # Only run one workflow at a time + max_concurrent_activities=1, + max_concurrent_workflow_task_polls=1, + max_concurrent_workflow_tasks=1, + max_cached_workflows=0, + ) + else: + worker = Worker( + client, + task_queue=task_queue, + workflows=workflows, + activities=activities, + workflow_runner=UnsandboxedWorkflowRunner(), + graceful_shutdown_timeout=timedelta(minutes=5), + interceptors=[SentryInterceptor()], + activity_executor=ThreadPoolExecutor(max_workers=max_concurrent_activities or 50), + max_concurrent_activities=max_concurrent_activities or 50, + max_concurrent_workflow_tasks=max_concurrent_workflow_tasks, + ) # catch the TERM signal, and stop the worker gracefully # https://github.com/temporalio/sdk-python#worker-shutdown diff --git a/posthog/temporal/data_imports/external_data_job.py b/posthog/temporal/data_imports/external_data_job.py index 62a1e1bc834ed..59508a2ee6f25 100644 --- a/posthog/temporal/data_imports/external_data_job.py +++ b/posthog/temporal/data_imports/external_data_job.py @@ -3,12 +3,17 @@ import datetime as dt import json import re +import threading +import time from django.conf import settings from django.db import close_old_connections import posthoganalytics +import psutil from temporalio import activity, exceptions, workflow from temporalio.common import RetryPolicy +from temporalio.exceptions import WorkflowAlreadyStartedError + from posthog.constants import DATA_WAREHOUSE_TASK_QUEUE_V2 @@ -144,20 +149,22 @@ def trigger_pipeline_v2(inputs: ExternalDataWorkflowInputs): logger.debug("Triggering V2 pipeline") temporal = sync_connect() - - asyncio.run( - temporal.start_workflow( - workflow="external-data-job", - arg=dataclasses.asdict(inputs), - id=f"{inputs.external_data_schema_id}-V2", - task_queue=str(DATA_WAREHOUSE_TASK_QUEUE_V2), - retry_policy=RetryPolicy( - maximum_interval=dt.timedelta(seconds=60), - maximum_attempts=1, - non_retryable_error_types=["NondeterminismError"], - ), + try: + asyncio.run( + temporal.start_workflow( + workflow="external-data-job", + arg=dataclasses.asdict(inputs), + id=f"{inputs.external_data_schema_id}-V2", + task_queue=str(DATA_WAREHOUSE_TASK_QUEUE_V2), + retry_policy=RetryPolicy( + maximum_interval=dt.timedelta(seconds=60), + maximum_attempts=1, + non_retryable_error_types=["NondeterminismError"], + ), + ) ) - ) + except WorkflowAlreadyStartedError: + pass logger.debug("V2 pipeline triggered") @@ -173,6 +180,22 @@ def create_source_templates(inputs: CreateSourceTemplateInputs) -> None: create_warehouse_templates_for_source(team_id=inputs.team_id, run_id=inputs.run_id) +def log_memory_usage(): + process = psutil.Process() + logger = bind_temporal_worker_logger_sync(team_id=0) + + while True: + memory_info = process.memory_info() + logger.info(f"Memory Usage: RSS = {memory_info.rss / (1024 * 1024):.2f} MB") + + time.sleep(10) # Log every 10 seconds + + +if settings.TEMPORAL_TASK_QUEUE == DATA_WAREHOUSE_TASK_QUEUE_V2: + thread = threading.Thread(target=log_memory_usage, daemon=True) + thread.start() + + # TODO: update retry policies @workflow.defn(name="external-data-job") class ExternalDataJobWorkflow(PostHogWorkflow): diff --git a/posthog/temporal/data_imports/pipelines/sql_database/helpers.py b/posthog/temporal/data_imports/pipelines/sql_database/helpers.py index 0400a60b32fd5..9bf72a26f3c1e 100644 --- a/posthog/temporal/data_imports/pipelines/sql_database/helpers.py +++ b/posthog/temporal/data_imports/pipelines/sql_database/helpers.py @@ -24,7 +24,7 @@ def __init__( self, engine: Engine, table: Table, - chunk_size: int = 1000, + chunk_size: int = DEFAULT_CHUNK_SIZE, incremental: Optional[dlt.sources.incremental[Any]] = None, connect_args: Optional[list[str]] = None, db_incremental_field_last_value: Optional[Any] = None, diff --git a/posthog/temporal/data_imports/pipelines/sql_database_v2/__init__.py b/posthog/temporal/data_imports/pipelines/sql_database_v2/__init__.py index a3fc1c6b2838b..0c49b04ba1d1c 100644 --- a/posthog/temporal/data_imports/pipelines/sql_database_v2/__init__.py +++ b/posthog/temporal/data_imports/pipelines/sql_database_v2/__init__.py @@ -15,6 +15,7 @@ from dlt.sources.credentials import ConnectionStringCredentials from posthog.settings.utils import get_from_env +from posthog.temporal.data_imports.pipelines.sql_database_v2.settings import DEFAULT_CHUNK_SIZE from posthog.temporal.data_imports.pipelines.sql_database_v2._json import BigQueryJSON from posthog.utils import str_to_bool from posthog.warehouse.models import ExternalDataSource @@ -252,7 +253,7 @@ def sql_database( schema: Optional[str] = dlt.config.value, metadata: Optional[MetaData] = None, table_names: Optional[list[str]] = dlt.config.value, - chunk_size: int = 50000, + chunk_size: int = DEFAULT_CHUNK_SIZE, backend: TableBackend = "pyarrow", detect_precision_hints: Optional[bool] = False, reflection_level: Optional[ReflectionLevel] = "full", @@ -365,7 +366,7 @@ def sql_table( schema: Optional[str] = dlt.config.value, metadata: Optional[MetaData] = None, incremental: Optional[dlt.sources.incremental[Any]] = None, - chunk_size: int = 50000, + chunk_size: int = DEFAULT_CHUNK_SIZE, backend: TableBackend = "sqlalchemy", detect_precision_hints: Optional[bool] = None, reflection_level: Optional[ReflectionLevel] = "full", diff --git a/posthog/temporal/data_imports/pipelines/sql_database_v2/helpers.py b/posthog/temporal/data_imports/pipelines/sql_database_v2/helpers.py index acd64c97aae99..74a79650caa15 100644 --- a/posthog/temporal/data_imports/pipelines/sql_database_v2/helpers.py +++ b/posthog/temporal/data_imports/pipelines/sql_database_v2/helpers.py @@ -18,6 +18,8 @@ from dlt.sources.credentials import ConnectionStringCredentials +from posthog.temporal.data_imports.pipelines.sql_database_v2.settings import DEFAULT_CHUNK_SIZE + from .arrow_helpers import row_tuples_to_arrow from .schema_types import ( default_table_adapter, @@ -44,7 +46,7 @@ def __init__( backend: TableBackend, table: Table, columns: TTableSchemaColumns, - chunk_size: int = 1000, + chunk_size: int = DEFAULT_CHUNK_SIZE, incremental: Optional[dlt.sources.incremental[Any]] = None, db_incremental_field_last_value: Optional[Any] = None, query_adapter_callback: Optional[TQueryAdapter] = None, @@ -302,7 +304,7 @@ class SqlTableResourceConfiguration(BaseConfiguration): table: Optional[str] = None schema: Optional[str] = None incremental: Optional[dlt.sources.incremental] = None # type: ignore[type-arg] - chunk_size: int = 50000 + chunk_size: int = DEFAULT_CHUNK_SIZE backend: TableBackend = "sqlalchemy" detect_precision_hints: Optional[bool] = None defer_table_reflect: Optional[bool] = False diff --git a/posthog/temporal/data_imports/pipelines/sql_database_v2/settings.py b/posthog/temporal/data_imports/pipelines/sql_database_v2/settings.py new file mode 100644 index 0000000000000..d730961c096e8 --- /dev/null +++ b/posthog/temporal/data_imports/pipelines/sql_database_v2/settings.py @@ -0,0 +1 @@ +DEFAULT_CHUNK_SIZE = 10_000 diff --git a/posthog/temporal/data_imports/workflow_activities/import_data_sync.py b/posthog/temporal/data_imports/workflow_activities/import_data_sync.py index 135d6b8d5fb89..0c51c8db81fcf 100644 --- a/posthog/temporal/data_imports/workflow_activities/import_data_sync.py +++ b/posthog/temporal/data_imports/workflow_activities/import_data_sync.py @@ -101,6 +101,9 @@ def import_data_activity_sync(inputs: ImportDataActivityInputs): schema.sync_type_config.get("incremental_field_type"), ) + if schema.is_incremental: + logger.debug(f"Incremental last value being used is: {processed_incremental_last_value}") + source = None if model.pipeline.source_type == ExternalDataSource.Type.STRIPE: from posthog.temporal.data_imports.pipelines.stripe import stripe_source diff --git a/posthog/test/__snapshots__/test_feature_flag.ambr b/posthog/test/__snapshots__/test_feature_flag.ambr index 32352422e1f1a..b54b1d98421a6 100644 --- a/posthog/test/__snapshots__/test_feature_flag.ambr +++ b/posthog/test/__snapshots__/test_feature_flag.ambr @@ -174,6 +174,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -267,6 +268,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -342,6 +344,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -542,6 +545,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -639,6 +643,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -714,6 +719,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -777,6 +783,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -977,6 +984,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1137,6 +1145,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1337,6 +1346,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1434,6 +1444,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1551,6 +1562,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1626,6 +1638,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -1826,6 +1839,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2139,6 +2153,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2306,6 +2321,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2381,6 +2397,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", @@ -2581,6 +2598,7 @@ "posthog_team"."person_display_name_properties", "posthog_team"."live_events_columns", "posthog_team"."recording_domains", + "posthog_team"."cookieless_server_hash_mode", "posthog_team"."primary_dashboard_id", "posthog_team"."extra_settings", "posthog_team"."modifiers", diff --git a/posthog/warehouse/api/external_data_schema.py b/posthog/warehouse/api/external_data_schema.py index 9391268bb69d0..56208208d5eaf 100644 --- a/posthog/warehouse/api/external_data_schema.py +++ b/posthog/warehouse/api/external_data_schema.py @@ -141,12 +141,16 @@ def update(self, instance: ExternalDataSchema, validated_data: dict[str, Any]) - payload = instance.sync_type_config payload["incremental_field"] = data.get("incremental_field") payload["incremental_field_type"] = data.get("incremental_field_type") + payload["incremental_field_last_value"] = None + payload["incremental_field_last_value_v2"] = None validated_data["sync_type_config"] = payload else: payload = instance.sync_type_config payload.pop("incremental_field", None) payload.pop("incremental_field_type", None) + payload.pop("incremental_field_last_value", None) + payload.pop("incremental_field_last_value_v2", None) validated_data["sync_type_config"] = payload diff --git a/requirements.in b/requirements.in index faefd16d9294d..8c0b98d16587b 100644 --- a/requirements.in +++ b/requirements.in @@ -16,6 +16,7 @@ clickhouse-driver==0.2.7 clickhouse-pool==0.5.3 conditional-cache==1.2 cryptography==39.0.2 +deltalake==0.22.3 dj-database-url==0.5.0 Django~=4.2.15 django-axes==5.9.0 @@ -34,7 +35,6 @@ djangorestframework==3.15.1 djangorestframework-csv==2.1.1 djangorestframework-dataclasses==1.2.0 dlt==1.3.0 -dlt[deltalake]==1.3.0 dnspython==2.2.1 drf-exceptions-hog==0.4.0 drf-extensions==0.7.0 diff --git a/requirements.txt b/requirements.txt index 639c98066ccd4..e4a1521f8dd7a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -138,8 +138,8 @@ defusedxml==0.6.0 # via # python3-openid # social-auth-core -deltalake==0.19.1 - # via dlt +deltalake==0.22.3 + # via -r requirements.in distro==1.9.0 # via openai dj-database-url==0.5.0 @@ -273,8 +273,6 @@ googleapis-common-protos==1.60.0 # via # google-api-core # grpcio-status -greenlet==3.1.1 - # via sqlalchemy grpcio==1.63.2 # via # -r requirements.in @@ -505,7 +503,6 @@ pyarrow==17.0.0 # via # -r requirements.in # deltalake - # dlt # sqlalchemy-bigquery pyasn1==0.5.0 # via