diff --git a/.github/workflows/ci-hog.yml b/.github/workflows/ci-hog.yml index 69a3bc3d5f133..ea51f70721f5c 100644 --- a/.github/workflows/ci-hog.yml +++ b/.github/workflows/ci-hog.yml @@ -48,25 +48,23 @@ jobs: hog-tests: needs: changes timeout-minutes: 30 - name: Hog tests runs-on: ubuntu-24.04 + if: needs.changes.outputs.hog == 'true' steps: # If this run wasn't initiated by the bot (meaning: snapshot update) and we've determined # there are backend changes, cancel previous runs - uses: n1hility/cancel-previous-runs@v3 - if: github.actor != 'posthog-bot' && needs.changes.outputs.hog == 'true' + if: github.actor != 'posthog-bot' with: token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v3 - if: needs.changes.outputs.hog == 'true' with: fetch-depth: 1 - name: Set up Python - if: needs.changes.outputs.hog == 'true' uses: actions/setup-python@v5 with: python-version: 3.11.9 @@ -76,31 +74,25 @@ jobs: # uv is a fast pip alternative: https://github.com/astral-sh/uv/ - run: pip install uv - if: needs.changes.outputs.hog == 'true' - name: Install SAML (python3-saml) dependencies - if: needs.changes.outputs.hog == 'true' run: | sudo apt-get update sudo apt-get install libxml2-dev libxmlsec1 libxmlsec1-dev libxmlsec1-openssl - name: Install Python dependencies - if: needs.changes.outputs.hog == 'true' run: | uv pip install --system -r requirements.txt -r requirements-dev.txt - name: Install pnpm - if: needs.changes.outputs.hog == 'true' uses: pnpm/action-setup@v4 - name: Set up Node.js - if: needs.changes.outputs.hog == 'true' uses: actions/setup-node@v4 with: - node-version: 18.12.1 + node-version: 18 - name: Check if ANTLR definitions are up to date - if: needs.changes.outputs.hog == 'true' run: | cd .. sudo apt-get install default-jre @@ -123,27 +115,175 @@ jobs: ANTLR_VERSION: '4.13.2' - name: Check if STL bytecode is up to date - if: needs.changes.outputs.hog == 'true' run: | python -m hogvm.stl.compile git diff --exit-code - name: Run HogVM Python tests - if: needs.changes.outputs.hog == 'true' run: | pytest hogvm - name: Run HogVM TypeScript tests - if: needs.changes.outputs.hog == 'true' run: | cd hogvm/typescript pnpm install --frozen-lockfile pnpm run test - name: Run Hog tests - if: needs.changes.outputs.hog == 'true' run: | cd hogvm/typescript pnpm run build cd ../ ./test.sh && git diff --exit-code + + check-package-version: + name: Check HogVM TypeScript package version and detect an update + needs: hog-tests + if: needs.hog-tests.result == 'success' && needs.changes.outputs.hog == 'true' + runs-on: ubuntu-24.04 + outputs: + committed-version: ${{ steps.check-package-version.outputs.committed-version }} + published-version: ${{ steps.check-package-version.outputs.published-version }} + is-new-version: ${{ steps.check-package-version.outputs.is-new-version }} + steps: + - name: Checkout the repository + uses: actions/checkout@v2 + - name: Check package version and detect an update + id: check-package-version + uses: PostHog/check-package-version@v2 + with: + path: hogvm/typescript + + release-hogvm: + name: Release new HogVM TypeScript version + runs-on: ubuntu-24.04 + needs: check-package-version + if: needs.changes.outputs.hog == 'true' && needs.check-package-version.outputs.is-new-version == 'true' + env: + COMMITTED_VERSION: ${{ needs.check-package-version.outputs.committed-version }} + PUBLISHED_VERSION: ${{ needs.check-package-version.outputs.published-version }} + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + token: ${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }} + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11.9 + cache: 'pip' + cache-dependency-path: '**/requirements*.txt' + token: ${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }} + - run: pip install uv + - name: Install SAML (python3-saml) dependencies + run: | + sudo apt-get update + sudo apt-get install libxml2-dev libxmlsec1 libxmlsec1-dev libxmlsec1-openssl + - name: Install Python dependencies + run: | + uv pip install --system -r requirements.txt -r requirements-dev.txt + - name: Install pnpm + uses: pnpm/action-setup@v4 + - name: Set up Node 18 + uses: actions/setup-node@v4 + with: + node-version: 18 + registry-url: https://registry.npmjs.org + - name: Install package.json dependencies + run: cd hogvm/typescript && pnpm install + - name: Publish the package in the npm registry + run: cd hogvm/typescript && npm publish --access public + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + - name: Sleep 60 seconds to allow npm to update the package + run: sleep 60 + + update-versions: + name: Update versions in package.json + runs-on: ubuntu-24.04 + needs: release-hogvm + if: always() # This ensures the job runs regardless of the result of release-hogvm + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + token: ${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }} + + - name: Install pnpm + uses: pnpm/action-setup@v4 + - name: Set up Node 18 + uses: actions/setup-node@v4 + with: + node-version: 18 + registry-url: https://registry.npmjs.org + + - name: Check for version mismatches + id: check-mismatch + run: | + # Extract committed version + HOGVM_VERSION=$(jq -r '.version' hogvm/typescript/package.json) + + # Compare dependencies in package.json + MAIN_VERSION=$(jq -r '.dependencies."@posthog/hogvm"' package.json | tr -d '^') + PLUGIN_VERSION=$(jq -r '.dependencies."@posthog/hogvm"' plugin-server/package.json | tr -d '^') + + echo "HOGVM_VERSION=$HOGVM_VERSION" + echo "MAIN_VERSION=$MAIN_VERSION" + echo "PLUGIN_VERSION=$PLUGIN_VERSION" + + # Set output if mismatches exist + if [[ "$HOGVM_VERSION" != "$MAIN_VERSION" || "$HOGVM_VERSION" != "$PLUGIN_VERSION" ]]; then + echo "mismatch=true" >> "$GITHUB_ENV" + else + echo "mismatch=false" >> "$GITHUB_ENV" + fi + + - name: Update package.json versions + if: env.mismatch == 'true' + run: | + VERSION=$(jq ".version" hogvm/typescript/package.json -r) + + retry_pnpm_install() { + local retries=0 + local max_retries=20 # 10 minutes total + local delay=30 + + while [[ $retries -lt $max_retries ]]; do + echo "Attempting pnpm install (retry $((retries+1))/$max_retries)..." + pnpm install --no-frozen-lockfile && break + echo "Install failed. Retrying in $delay seconds..." + sleep $delay + retries=$((retries + 1)) + done + + if [[ $retries -eq $max_retries ]]; then + echo "pnpm install failed after $max_retries attempts." + exit 1 + fi + } + + # Update main package.json + mv package.json package.old.json + jq --indent 4 '.dependencies."@posthog/hogvm" = "^'$VERSION'"' package.old.json > package.json + rm package.old.json + retry_pnpm_install + + # Update plugin-server/package.json + cd plugin-server + mv package.json package.old.json + jq --indent 4 '.dependencies."@posthog/hogvm" = "^'$VERSION'"' package.old.json > package.json + rm package.old.json + retry_pnpm_install + + - name: Commit updated package.json files + if: env.mismatch == 'true' + uses: EndBug/add-and-commit@v9 + with: + add: '["package.json", "pnpm-lock.yaml", "plugin-server/package.json", "plugin-server/pnpm-lock.yaml", "hogvm/typescript/package.json"]' + message: 'Update @posthog/hogvm version in package.json' + default_author: github_actions + github_token: ${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }} diff --git a/.github/workflows/release-hogvm.yml b/.github/workflows/release-hogvm.yml deleted file mode 100644 index 6c8480202b24d..0000000000000 --- a/.github/workflows/release-hogvm.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: 'Publish HogVM package' - -on: - push: - branches: - - main - - master - -jobs: - release: - name: Publish - runs-on: ubuntu-24.04 - defaults: - run: - working-directory: hogvm/typescript - steps: - - name: Checkout the repository - uses: actions/checkout@v4 - - - name: Check package version and detect an update - id: check-package-version - uses: PostHog/check-package-version@v2 - with: - path: hogvm/typescript - - - name: Install pnpm - uses: pnpm/action-setup@v4 - - - name: Set up Node - uses: actions/setup-node@v4 - if: steps.check-package-version.outputs.is-new-version == 'true' - with: - node-version: 18.12.1 - registry-url: https://registry.npmjs.org - cache: pnpm - cache-dependency-path: hogvm/typescript/pnpm-lock.yaml - - - name: Install dependencies - if: steps.check-package-version.outputs.is-new-version == 'true' - run: pnpm i --frozen-lockfile - - - name: Build - if: steps.check-package-version.outputs.is-new-version == 'true' - run: pnpm build - - - name: Publish the package in the npm registry - id: publish-package - if: steps.check-package-version.outputs.is-new-version == 'true' - run: | - pnpm publish --access public --tag latest - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/cypress/e2e/dashboard.cy.ts b/cypress/e2e/dashboard.cy.ts index b5f62097ebee1..cefbb60b6fe69 100644 --- a/cypress/e2e/dashboard.cy.ts +++ b/cypress/e2e/dashboard.cy.ts @@ -379,6 +379,8 @@ describe('Dashboard', () => { cy.get('[data-attr="date-filter"]').click() cy.contains('span', 'Last 14 days').click() + cy.wait(2000) + // insight meta should be updated to show new date range cy.get('h5').contains('Last 14 days').should('exist') diff --git a/cypress/e2e/surveys.cy.ts b/cypress/e2e/surveys.cy.ts index 604c182a51a18..1680681bf1fb5 100644 --- a/cypress/e2e/surveys.cy.ts +++ b/cypress/e2e/surveys.cy.ts @@ -92,7 +92,7 @@ describe('Surveys', () => { cy.get('.LemonCollapsePanel').contains('Display conditions').click() cy.contains('All users').click() cy.get('.Popover__content').contains('Users who match').click() - cy.contains('Add user targeting').click() + cy.contains('Add property targeting').click() // select the first property cy.get('[data-attr="property-select-toggle-0"]').click() @@ -144,7 +144,7 @@ describe('Surveys', () => { // remove user targeting properties cy.get('.LemonCollapsePanel').contains('Display conditions').click() - cy.contains('Remove all user properties').click() + cy.contains('Remove all property targeting').click() // save cy.get('[data-attr="save-survey"]').eq(0).click() @@ -197,7 +197,7 @@ describe('Surveys', () => { cy.get('.LemonCollapsePanel').contains('Display conditions').click() cy.contains('All users').click() cy.get('.Popover__content').contains('Users who match').click() - cy.contains('Add user targeting').click() + cy.contains('Add property targeting').click() cy.get('[data-attr="property-select-toggle-0"]').click() cy.get('[data-attr="prop-filter-person_properties-0"]').click() cy.get('[data-attr=prop-val]').click({ force: true }) diff --git a/ee/api/conversation.py b/ee/api/conversation.py new file mode 100644 index 0000000000000..70e314b94039f --- /dev/null +++ b/ee/api/conversation.py @@ -0,0 +1,69 @@ +from typing import cast + +from django.http import StreamingHttpResponse +from pydantic import ValidationError +from rest_framework import serializers +from rest_framework.renderers import BaseRenderer +from rest_framework.request import Request +from rest_framework.viewsets import GenericViewSet + +from ee.hogai.assistant import Assistant +from ee.models.assistant import Conversation +from posthog.api.routing import TeamAndOrgViewSetMixin +from posthog.models.user import User +from posthog.rate_limit import AIBurstRateThrottle, AISustainedRateThrottle +from posthog.schema import HumanMessage + + +class MessageSerializer(serializers.Serializer): + content = serializers.CharField(required=True, max_length=1000) + conversation = serializers.UUIDField(required=False) + + def validate(self, data): + try: + message = HumanMessage(content=data["content"]) + data["message"] = message + except ValidationError: + raise serializers.ValidationError("Invalid message content.") + return data + + +class ServerSentEventRenderer(BaseRenderer): + media_type = "text/event-stream" + format = "txt" + + def render(self, data, accepted_media_type=None, renderer_context=None): + return data + + +class ConversationViewSet(TeamAndOrgViewSetMixin, GenericViewSet): + scope_object = "INTERNAL" + serializer_class = MessageSerializer + renderer_classes = [ServerSentEventRenderer] + queryset = Conversation.objects.all() + lookup_url_kwarg = "conversation" + + def safely_get_queryset(self, queryset): + # Only allow access to conversations created by the current user + return queryset.filter(user=self.request.user) + + def get_throttles(self): + return [AIBurstRateThrottle(), AISustainedRateThrottle()] + + def create(self, request: Request, *args, **kwargs): + serializer = self.get_serializer(data=request.data) + serializer.is_valid(raise_exception=True) + conversation_id = serializer.validated_data.get("conversation") + if conversation_id: + self.kwargs[self.lookup_url_kwarg] = conversation_id + conversation = self.get_object() + else: + conversation = self.get_queryset().create(user=request.user, team=self.team) + assistant = Assistant( + self.team, + conversation, + serializer.validated_data["message"], + user=cast(User, request.user), + is_new_conversation=not conversation_id, + ) + return StreamingHttpResponse(assistant.stream(), content_type=ServerSentEventRenderer.media_type) diff --git a/ee/api/test/test_conversation.py b/ee/api/test/test_conversation.py new file mode 100644 index 0000000000000..6eb466876dc01 --- /dev/null +++ b/ee/api/test/test_conversation.py @@ -0,0 +1,157 @@ +from unittest.mock import patch + +from rest_framework import status + +from ee.hogai.assistant import Assistant +from ee.models.assistant import Conversation +from posthog.models.team.team import Team +from posthog.models.user import User +from posthog.test.base import APIBaseTest + + +class TestConversation(APIBaseTest): + def setUp(self): + super().setUp() + self.other_team = Team.objects.create(organization=self.organization, name="other team") + self.other_user = User.objects.create_and_join( + organization=self.organization, + email="other@posthog.com", + password="password", + first_name="Other", + ) + + def _get_streaming_content(self, response): + return b"".join(response.streaming_content) + + def test_create_conversation(self): + with patch.object(Assistant, "_stream", return_value=["test response"]) as stream_mock: + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": "test query"}, + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(self._get_streaming_content(response), b"test response") + self.assertEqual(Conversation.objects.count(), 1) + conversation: Conversation = Conversation.objects.first() + self.assertEqual(conversation.user, self.user) + self.assertEqual(conversation.team, self.team) + stream_mock.assert_called_once() + + def test_add_message_to_existing_conversation(self): + with patch.object(Assistant, "_stream", return_value=["test response"]) as stream_mock: + conversation = Conversation.objects.create(user=self.user, team=self.team) + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + { + "conversation": str(conversation.id), + "content": "test query", + }, + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(self._get_streaming_content(response), b"test response") + self.assertEqual(Conversation.objects.count(), 1) + stream_mock.assert_called_once() + + def test_cant_access_other_users_conversation(self): + conversation = Conversation.objects.create(user=self.other_user, team=self.team) + + self.client.force_login(self.user) + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"conversation": conversation.id, "content": "test query"}, + ) + + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_cant_access_other_teams_conversation(self): + conversation = Conversation.objects.create(user=self.user, team=self.other_team) + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"conversation": conversation.id, "content": "test query"}, + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_invalid_message_format(self): + response = self.client.post("/api/environments/@current/conversations/") + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_rate_limit_burst(self): + # Create multiple requests to trigger burst rate limit + with patch.object(Assistant, "_stream", return_value=["test response"]): + for _ in range(11): # Assuming burst limit is less than this + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": "test query"}, + ) + self.assertEqual(response.status_code, status.HTTP_429_TOO_MANY_REQUESTS) + + def test_empty_content(self): + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": ""}, + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_content_too_long(self): + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": "x" * 1001}, # Very long message + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_invalid_conversation_id(self): + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + { + "conversation": "not-a-valid-uuid", + "content": "test query", + }, + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_nonexistent_conversation(self): + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + { + "conversation": "12345678-1234-5678-1234-567812345678", + "content": "test query", + }, + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_deleted_conversation(self): + # Create and then delete a conversation + conversation = Conversation.objects.create(user=self.user, team=self.team) + conversation_id = conversation.id + conversation.delete() + + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + { + "conversation": str(conversation_id), + "content": "test query", + }, + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_unauthenticated_request(self): + self.client.logout() + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": "test query"}, + ) + self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + + def test_streaming_error_handling(self): + def raise_error(): + yield "some content" + raise Exception("Streaming error") + + with patch.object(Assistant, "_stream", side_effect=raise_error): + response = self.client.post( + f"/api/environments/{self.team.id}/conversations/", + {"content": "test query"}, + ) + with self.assertRaises(Exception) as context: + b"".join(response.streaming_content) + self.assertTrue("Streaming error" in str(context.exception)) diff --git a/ee/clickhouse/queries/event_query.py b/ee/clickhouse/queries/event_query.py index 3f7857fd9a970..64f08da69d6bf 100644 --- a/ee/clickhouse/queries/event_query.py +++ b/ee/clickhouse/queries/event_query.py @@ -7,7 +7,6 @@ from posthog.models.filters.path_filter import PathFilter from posthog.models.filters.properties_timeline_filter import PropertiesTimelineFilter from posthog.models.filters.retention_filter import RetentionFilter -from posthog.models.filters.session_recordings_filter import SessionRecordingsFilter from posthog.models.filters.stickiness_filter import StickinessFilter from posthog.models.property import PropertyName from posthog.models.team import Team @@ -25,7 +24,6 @@ def __init__( PathFilter, RetentionFilter, StickinessFilter, - SessionRecordingsFilter, PropertiesTimelineFilter, ], team: Team, diff --git a/ee/hogai/assistant.py b/ee/hogai/assistant.py index 77b1c2c050008..3a296ba9ce7d6 100644 --- a/ee/hogai/assistant.py +++ b/ee/hogai/assistant.py @@ -1,9 +1,12 @@ +import json from collections.abc import AsyncGenerator, Generator, Iterator from functools import partial -from typing import Any, Literal, Optional, TypedDict, TypeGuard, Union +from typing import Any, Optional +from uuid import uuid4 from asgiref.sync import sync_to_async from langchain_core.messages import AIMessageChunk +from langchain_core.runnables.config import RunnableConfig from langfuse.callback import CallbackHandler from langgraph.graph.state import CompiledStateGraph from pydantic import BaseModel @@ -17,7 +20,19 @@ from ee.hogai.trends.nodes import ( TrendsGeneratorNode, ) -from ee.hogai.utils import AssistantNodeName, AssistantState, Conversation +from ee.hogai.utils.state import ( + GraphMessageUpdateTuple, + GraphTaskStartedUpdateTuple, + GraphValueUpdateTuple, + is_message_update, + is_state_update, + is_task_started_update, + is_value_update, + validate_state_update, + validate_value_update, +) +from ee.hogai.utils.types import AssistantNodeName, AssistantState, PartialAssistantState +from ee.models import Conversation from posthog.event_usage import report_user_action from posthog.models import Team, User from posthog.schema import ( @@ -40,42 +55,6 @@ langfuse_handler = None -def is_value_update(update: list[Any]) -> TypeGuard[tuple[Literal["values"], dict[AssistantNodeName, AssistantState]]]: - """ - Transition between nodes. - """ - return len(update) == 2 and update[0] == "updates" - - -class LangGraphState(TypedDict): - langgraph_node: AssistantNodeName - - -def is_message_update( - update: list[Any], -) -> TypeGuard[tuple[Literal["messages"], tuple[Union[AIMessageChunk, Any], LangGraphState]]]: - """ - Streaming of messages. Returns a partial state. - """ - return len(update) == 2 and update[0] == "messages" - - -def is_state_update(update: list[Any]) -> TypeGuard[tuple[Literal["updates"], AssistantState]]: - """ - Update of the state. - """ - return len(update) == 2 and update[0] == "values" - - -def is_task_started_update( - update: list[Any], -) -> TypeGuard[tuple[Literal["messages"], tuple[Union[AIMessageChunk, Any], LangGraphState]]]: - """ - Streaming of messages. Returns a partial state. - """ - return len(update) == 2 and update[0] == "debug" and update[1]["type"] == "task" - - VISUALIZATION_NODES: dict[AssistantNodeName, type[SchemaGeneratorNode]] = { AssistantNodeName.TRENDS_GENERATOR: TrendsGeneratorNode, AssistantNodeName.FUNNEL_GENERATOR: FunnelGeneratorNode, @@ -87,13 +66,25 @@ class Assistant: _graph: CompiledStateGraph _user: Optional[User] _conversation: Conversation + _latest_message: HumanMessage + _state: Optional[AssistantState] - def __init__(self, team: Team, conversation: Conversation, user: Optional[User] = None): + def __init__( + self, + team: Team, + conversation: Conversation, + new_message: HumanMessage, + user: Optional[User] = None, + is_new_conversation: bool = False, + ): self._team = team self._user = user self._conversation = conversation + self._latest_message = new_message.model_copy(deep=True, update={"id": str(uuid4())}) + self._is_new_conversation = is_new_conversation self._graph = AssistantGraph(team).compile_full_graph() self._chunks = AIMessageChunk(content="") + self._state = None def stream(self): if SERVER_GATEWAY_INTERFACE == "ASGI": @@ -110,15 +101,19 @@ async def _astream(self) -> AsyncGenerator[str, None]: break def _stream(self) -> Generator[str, None, None]: - callbacks = [langfuse_handler] if langfuse_handler else [] + state = self._init_or_update_state() + config = self._get_config() + generator: Iterator[Any] = self._graph.stream( - self._initial_state, - config={"recursion_limit": 24, "callbacks": callbacks}, - stream_mode=["messages", "values", "updates", "debug"], + state, config=config, stream_mode=["messages", "values", "updates", "debug"] ) - # Send a chunk to establish the connection avoiding the worker's timeout. - yield self._serialize_message(AssistantGenerationStatusEvent(type=AssistantGenerationStatusType.ACK)) + # Assign the conversation id to the client. + if self._is_new_conversation: + yield self._serialize_conversation() + + # Send the last message with the initialized id. + yield self._serialize_message(self._latest_message) try: last_viz_message = None @@ -127,7 +122,15 @@ def _stream(self) -> Generator[str, None, None]: if isinstance(message, VisualizationMessage): last_viz_message = message yield self._serialize_message(message) - self._report_conversation(last_viz_message) + + # Check if the assistant has requested help. + state = self._graph.get_state(config) + if state.next: + yield self._serialize_message( + AssistantMessage(content=state.tasks[0].interrupts[0].value, id=str(uuid4())) + ) + else: + self._report_conversation_state(last_viz_message) except: # This is an unhandled error, so we just stop further generation at this point yield self._serialize_message(FailureMessage()) @@ -135,8 +138,34 @@ def _stream(self) -> Generator[str, None, None]: @property def _initial_state(self) -> AssistantState: - messages = [message.root for message in self._conversation.messages] - return {"messages": messages, "intermediate_steps": None, "plan": None} + return AssistantState(messages=[self._latest_message], start_id=self._latest_message.id) + + def _get_config(self) -> RunnableConfig: + callbacks = [langfuse_handler] if langfuse_handler else [] + config: RunnableConfig = { + "recursion_limit": 24, + "callbacks": callbacks, + "configurable": {"thread_id": self._conversation.id}, + } + return config + + def _init_or_update_state(self): + config = self._get_config() + snapshot = self._graph.get_state(config) + if snapshot.next: + saved_state = validate_state_update(snapshot.values) + self._state = saved_state + if saved_state.intermediate_steps: + intermediate_steps = saved_state.intermediate_steps.copy() + intermediate_steps[-1] = (intermediate_steps[-1][0], self._latest_message.content) + self._graph.update_state( + config, + PartialAssistantState(messages=[self._latest_message], intermediate_steps=intermediate_steps), + ) + return None + initial_state = self._initial_state + self._state = initial_state + return initial_state def _node_to_reasoning_message( self, node_name: AssistantNodeName, input: AssistantState @@ -152,7 +181,7 @@ def _node_to_reasoning_message( ): substeps: list[str] = [] if input: - if intermediate_steps := input.get("intermediate_steps"): + if intermediate_steps := input.intermediate_steps: for action, _ in intermediate_steps: match action.tool: case "retrieve_event_properties": @@ -178,42 +207,65 @@ def _node_to_reasoning_message( return None def _process_update(self, update: Any) -> BaseModel | None: - if is_value_update(update): - _, state_update = update + if is_state_update(update): + _, new_state = update + self._state = validate_state_update(new_state) + elif is_value_update(update) and (new_message := self._process_value_update(update)): + return new_message + elif is_message_update(update) and (new_message := self._process_message_update(update)): + return new_message + elif is_task_started_update(update) and (new_message := self._process_task_started_update(update)): + return new_message + return None - if AssistantNodeName.ROUTER in state_update and "messages" in state_update[AssistantNodeName.ROUTER]: - return state_update[AssistantNodeName.ROUTER]["messages"][0] - elif intersected_nodes := state_update.keys() & VISUALIZATION_NODES.keys(): - # Reset chunks when schema validation fails. - self._chunks = AIMessageChunk(content="") + def _process_value_update(self, update: GraphValueUpdateTuple) -> BaseModel | None: + _, maybe_state_update = update + state_update = validate_value_update(maybe_state_update) + + if node_val := state_update.get(AssistantNodeName.ROUTER): + if isinstance(node_val, PartialAssistantState) and node_val.messages: + return node_val.messages[0] + elif intersected_nodes := state_update.keys() & VISUALIZATION_NODES.keys(): + # Reset chunks when schema validation fails. + self._chunks = AIMessageChunk(content="") - node_name = intersected_nodes.pop() - if "messages" in state_update[node_name]: - return state_update[node_name]["messages"][0] - elif state_update[node_name].get("intermediate_steps", []): - return AssistantGenerationStatusEvent(type=AssistantGenerationStatusType.GENERATION_ERROR) - elif AssistantNodeName.SUMMARIZER in state_update: + node_name = intersected_nodes.pop() + node_val = state_update[node_name] + if not isinstance(node_val, PartialAssistantState): + return None + if node_val.messages: + return node_val.messages[0] + elif node_val.intermediate_steps: + return AssistantGenerationStatusEvent(type=AssistantGenerationStatusType.GENERATION_ERROR) + elif node_val := state_update.get(AssistantNodeName.SUMMARIZER): + if isinstance(node_val, PartialAssistantState) and node_val.messages: self._chunks = AIMessageChunk(content="") - return state_update[AssistantNodeName.SUMMARIZER]["messages"][0] - elif is_message_update(update): - langchain_message, langgraph_state = update[1] - if isinstance(langchain_message, AIMessageChunk): - if langgraph_state["langgraph_node"] in VISUALIZATION_NODES.keys(): - self._chunks += langchain_message # type: ignore - parsed_message = VISUALIZATION_NODES[langgraph_state["langgraph_node"]].parse_output( - self._chunks.tool_calls[0]["args"] - ) - if parsed_message: - return VisualizationMessage(answer=parsed_message.query) - elif langgraph_state["langgraph_node"] == AssistantNodeName.SUMMARIZER: - self._chunks += langchain_message # type: ignore - return AssistantMessage(content=self._chunks.content) - elif is_task_started_update(update): - _, task_update = update - node_name = task_update["payload"]["name"] # type: ignore - node_input = task_update["payload"]["input"] # type: ignore - if reasoning_message := self._node_to_reasoning_message(node_name, node_input): - return reasoning_message + return node_val.messages[0] + + return None + + def _process_message_update(self, update: GraphMessageUpdateTuple) -> BaseModel | None: + langchain_message, langgraph_state = update[1] + if isinstance(langchain_message, AIMessageChunk): + if langgraph_state["langgraph_node"] in VISUALIZATION_NODES.keys(): + self._chunks += langchain_message # type: ignore + parsed_message = VISUALIZATION_NODES[langgraph_state["langgraph_node"]].parse_output( + self._chunks.tool_calls[0]["args"] + ) + if parsed_message: + initiator_id = self._state.start_id if self._state is not None else None + return VisualizationMessage(answer=parsed_message.query, initiator=initiator_id) + elif langgraph_state["langgraph_node"] == AssistantNodeName.SUMMARIZER: + self._chunks += langchain_message # type: ignore + return AssistantMessage(content=self._chunks.content) + return None + + def _process_task_started_update(self, update: GraphTaskStartedUpdateTuple) -> BaseModel | None: + _, task_update = update + node_name = task_update["payload"]["name"] # type: ignore + node_input = task_update["payload"]["input"] # type: ignore + if reasoning_message := self._node_to_reasoning_message(node_name, node_input): + return reasoning_message return None def _serialize_message(self, message: BaseModel) -> str: @@ -224,9 +276,15 @@ def _serialize_message(self, message: BaseModel) -> str: output += f"event: {AssistantEventType.MESSAGE}\n" return output + f"data: {message.model_dump_json(exclude_none=True)}\n\n" - def _report_conversation(self, message: Optional[VisualizationMessage]): - human_message = self._conversation.messages[-1].root - if self._user and message and isinstance(human_message, HumanMessage): + def _serialize_conversation(self) -> str: + output = f"event: {AssistantEventType.CONVERSATION}\n" + json_conversation = json.dumps({"id": str(self._conversation.id)}) + output += f"data: {json_conversation}\n\n" + return output + + def _report_conversation_state(self, message: Optional[VisualizationMessage]): + human_message = self._latest_message + if self._user and message: report_user_action( self._user, "chat with ai", diff --git a/ee/hogai/django_checkpoint/__init__.py b/ee/hogai/django_checkpoint/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/ee/hogai/django_checkpoint/checkpointer.py b/ee/hogai/django_checkpoint/checkpointer.py new file mode 100644 index 0000000000000..78817dca9df76 --- /dev/null +++ b/ee/hogai/django_checkpoint/checkpointer.py @@ -0,0 +1,309 @@ +import json +import random +import threading +from collections.abc import Iterable, Iterator, Sequence +from typing import Any, Optional, cast + +from django.db import transaction +from django.db.models import Q +from langchain_core.runnables import RunnableConfig +from langgraph.checkpoint.base import ( + WRITES_IDX_MAP, + BaseCheckpointSaver, + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, + PendingWrite, + get_checkpoint_id, +) +from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer +from langgraph.checkpoint.serde.types import ChannelProtocol + +from ee.models.assistant import ConversationCheckpoint, ConversationCheckpointBlob, ConversationCheckpointWrite + + +class DjangoCheckpointer(BaseCheckpointSaver[str]): + jsonplus_serde = JsonPlusSerializer() + _lock: threading.Lock + + def __init__(self, *args): + super().__init__(*args) + self._lock = threading.Lock() + + def _load_writes(self, writes: Sequence[ConversationCheckpointWrite]) -> list[PendingWrite]: + return ( + [ + ( + str(checkpoint_write.task_id), + checkpoint_write.channel, + self.serde.loads_typed((checkpoint_write.type, checkpoint_write.blob)), + ) + for checkpoint_write in writes + if checkpoint_write.type is not None and checkpoint_write.blob is not None + ] + if writes + else [] + ) + + def _load_json(self, obj: Any): + return self.jsonplus_serde.loads(self.jsonplus_serde.dumps(obj)) + + def _dump_json(self, obj: Any) -> dict[str, Any]: + serialized_metadata = self.jsonplus_serde.dumps(obj) + # NOTE: we're using JSON serializer (not msgpack), so we need to remove null characters before writing + nulls_removed = serialized_metadata.decode().replace("\\u0000", "") + return json.loads(nulls_removed) + + def _get_checkpoint_qs( + self, + config: Optional[RunnableConfig], + filter: Optional[dict[str, Any]], + before: Optional[RunnableConfig], + ): + query = Q() + + # construct predicate for config filter + if config and "configurable" in config: + thread_id = config["configurable"].get("thread_id") + query &= Q(thread_id=thread_id) + checkpoint_ns = config["configurable"].get("checkpoint_ns") + if checkpoint_ns is not None: + query &= Q(checkpoint_ns=checkpoint_ns) + if checkpoint_id := get_checkpoint_id(config): + query &= Q(id=checkpoint_id) + + # construct predicate for metadata filter + if filter: + query &= Q(metadata__contains=filter) + + # construct predicate for `before` + if before is not None: + query &= Q(id__lt=get_checkpoint_id(before)) + + return ConversationCheckpoint.objects.filter(query).order_by("-id") + + def _get_checkpoint_channel_values( + self, checkpoint: ConversationCheckpoint + ) -> Iterable[ConversationCheckpointBlob]: + if not checkpoint.checkpoint: + return [] + loaded_checkpoint = self._load_json(checkpoint.checkpoint) + if "channel_versions" not in loaded_checkpoint: + return [] + query = Q() + for channel, version in loaded_checkpoint["channel_versions"].items(): + query |= Q(channel=channel, version=version) + return checkpoint.blobs.filter(query) + + def list( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> Iterator[CheckpointTuple]: + """List checkpoints from the database. + + This method retrieves a list of checkpoint tuples from the Postgres database based + on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first). + + Args: + config (RunnableConfig): The config to use for listing the checkpoints. + filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. Defaults to None. + before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None. + limit (Optional[int]): The maximum number of checkpoints to return. Defaults to None. + + Yields: + Iterator[CheckpointTuple]: An iterator of checkpoint tuples. + """ + qs = self._get_checkpoint_qs(config, filter, before) + if limit: + qs = qs[:limit] + + for checkpoint in qs: + channel_values = self._get_checkpoint_channel_values(checkpoint) + loaded_checkpoint: Checkpoint = self._load_json(checkpoint.checkpoint) + + checkpoint_dict: Checkpoint = { + **loaded_checkpoint, + "pending_sends": [ + self.serde.loads_typed((checkpoint_write.type, checkpoint_write.blob)) + for checkpoint_write in checkpoint.pending_sends + ], + "channel_values": { + checkpoint_blob.channel: self.serde.loads_typed((checkpoint_blob.type, checkpoint_blob.blob)) + for checkpoint_blob in channel_values + if checkpoint_blob.type is not None + and checkpoint_blob.type != "empty" + and checkpoint_blob.blob is not None + }, + } + + yield CheckpointTuple( + { + "configurable": { + "thread_id": checkpoint.thread_id, + "checkpoint_ns": checkpoint.checkpoint_ns, + "checkpoint_id": checkpoint.id, + } + }, + checkpoint_dict, + self._load_json(checkpoint.metadata), + ( + { + "configurable": { + "thread_id": checkpoint.thread_id, + "checkpoint_ns": checkpoint.checkpoint_ns, + "checkpoint_id": checkpoint.parent_checkpoint_id, + } + } + if checkpoint.parent_checkpoint + else None + ), + self._load_writes(checkpoint.pending_writes), + ) + + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Get a checkpoint tuple from the database. + + This method retrieves a checkpoint tuple from the Postgres database based on the + provided config. If the config contains a "checkpoint_id" key, the checkpoint with + the matching thread ID and timestamp is retrieved. Otherwise, the latest checkpoint + for the given thread ID is retrieved. + + Args: + config (RunnableConfig): The config to use for retrieving the checkpoint. + + Returns: + Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found. + """ + return next(self.list(config), None) + + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Save a checkpoint to the database. + + This method saves a checkpoint to the Postgres database. The checkpoint is associated + with the provided config and its parent config (if any). + + Args: + config (RunnableConfig): The config to associate with the checkpoint. + checkpoint (Checkpoint): The checkpoint to save. + metadata (CheckpointMetadata): Additional metadata to save with the checkpoint. + new_versions (ChannelVersions): New channel versions as of this write. + + Returns: + RunnableConfig: Updated configuration after storing the checkpoint. + """ + configurable = config["configurable"] + thread_id: str = configurable["thread_id"] + checkpoint_id = get_checkpoint_id(config) + checkpoint_ns: str | None = configurable.get("checkpoint_ns") or "" + + checkpoint_copy = cast(dict[str, Any], checkpoint.copy()) + channel_values = checkpoint_copy.pop("channel_values", {}) + + next_config: RunnableConfig = { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint["id"], + } + } + + with self._lock, transaction.atomic(): + updated_checkpoint, _ = ConversationCheckpoint.objects.update_or_create( + id=checkpoint["id"], + thread_id=thread_id, + checkpoint_ns=checkpoint_ns, + defaults={ + "parent_checkpoint_id": checkpoint_id, + "checkpoint": self._dump_json({**checkpoint_copy, "pending_sends": []}), + "metadata": self._dump_json(metadata), + }, + ) + + blobs = [] + for channel, version in new_versions.items(): + type, blob = ( + self.serde.dumps_typed(channel_values[channel]) if channel in channel_values else ("empty", None) + ) + blobs.append( + ConversationCheckpointBlob( + checkpoint=updated_checkpoint, + channel=channel, + version=str(version), + type=type, + blob=blob, + ) + ) + + ConversationCheckpointBlob.objects.bulk_create(blobs, ignore_conflicts=True) + return next_config + + def put_writes( + self, + config: RunnableConfig, + writes: Sequence[tuple[str, Any]], + task_id: str, + ) -> None: + """Store intermediate writes linked to a checkpoint. + + This method saves intermediate writes associated with a checkpoint to the Postgres database. + + Args: + config (RunnableConfig): Configuration of the related checkpoint. + writes (List[Tuple[str, Any]]): List of writes to store. + task_id (str): Identifier for the task creating the writes. + """ + configurable = config["configurable"] + thread_id: str = configurable["thread_id"] + checkpoint_id = get_checkpoint_id(config) + checkpoint_ns: str | None = configurable.get("checkpoint_ns") or "" + + with self._lock, transaction.atomic(): + # `put_writes` and `put` are concurrently called without guaranteeing the call order + # so we need to ensure the checkpoint is created before creating writes. + # Thread.lock() will prevent race conditions though to the same checkpoints within a single pod. + checkpoint, _ = ConversationCheckpoint.objects.get_or_create( + id=checkpoint_id, thread_id=thread_id, checkpoint_ns=checkpoint_ns + ) + + writes_to_create = [] + for idx, (channel, value) in enumerate(writes): + type, blob = self.serde.dumps_typed(value) + writes_to_create.append( + ConversationCheckpointWrite( + checkpoint=checkpoint, + task_id=task_id, + idx=idx, + channel=channel, + type=type, + blob=blob, + ) + ) + + ConversationCheckpointWrite.objects.bulk_create( + writes_to_create, + update_conflicts=all(w[0] in WRITES_IDX_MAP for w in writes), + unique_fields=["checkpoint", "task_id", "idx"], + update_fields=["channel", "type", "blob"], + ) + + def get_next_version(self, current: Optional[str | int], channel: ChannelProtocol) -> str: + if current is None: + current_v = 0 + elif isinstance(current, int): + current_v = current + else: + current_v = int(current.split(".")[0]) + next_v = current_v + 1 + next_h = random.random() + return f"{next_v:032}.{next_h:016}" diff --git a/ee/hogai/django_checkpoint/test/test_checkpointer.py b/ee/hogai/django_checkpoint/test/test_checkpointer.py new file mode 100644 index 0000000000000..2f8fd7f4a60ed --- /dev/null +++ b/ee/hogai/django_checkpoint/test/test_checkpointer.py @@ -0,0 +1,274 @@ +# type: ignore + +from typing import Any, TypedDict + +from langchain_core.runnables import RunnableConfig +from langgraph.checkpoint.base import ( + Checkpoint, + CheckpointMetadata, + create_checkpoint, + empty_checkpoint, +) +from langgraph.checkpoint.base.id import uuid6 +from langgraph.errors import NodeInterrupt +from langgraph.graph import END, START +from langgraph.graph.state import CompiledStateGraph, StateGraph + +from ee.hogai.django_checkpoint.checkpointer import DjangoCheckpointer +from ee.models.assistant import ( + Conversation, + ConversationCheckpoint, + ConversationCheckpointBlob, + ConversationCheckpointWrite, +) +from posthog.test.base import NonAtomicBaseTest + + +class TestDjangoCheckpointer(NonAtomicBaseTest): + CLASS_DATA_LEVEL_SETUP = False + + def _build_graph(self, checkpointer: DjangoCheckpointer): + class State(TypedDict): + val: int + + graph = StateGraph(State) + + def handle_node1(state: State) -> State: + if state["val"] == 1: + raise NodeInterrupt("test") + return {"val": state["val"] + 1} + + graph.add_node("node1", handle_node1) + graph.add_node("node2", lambda state: state) + + graph.add_edge(START, "node1") + graph.add_edge("node1", "node2") + graph.add_edge("node2", END) + + return graph.compile(checkpointer=checkpointer) + + def test_saver(self): + thread1 = Conversation.objects.create(user=self.user, team=self.team) + thread2 = Conversation.objects.create(user=self.user, team=self.team) + + config_1: RunnableConfig = { + "configurable": { + "thread_id": thread1.id, + "checkpoint_ns": "", + } + } + chkpnt_1: Checkpoint = empty_checkpoint() + + config_2: RunnableConfig = { + "configurable": { + "thread_id": thread2.id, + "checkpoint_ns": "", + } + } + chkpnt_2: Checkpoint = create_checkpoint(chkpnt_1, {}, 1) + + config_3: RunnableConfig = { + "configurable": { + "thread_id": thread2.id, + "checkpoint_id": chkpnt_2["id"], + "checkpoint_ns": "inner", + } + } + chkpnt_3: Checkpoint = empty_checkpoint() + + metadata_1: CheckpointMetadata = { + "source": "input", + "step": 2, + "writes": {}, + "score": 1, + } + metadata_2: CheckpointMetadata = { + "source": "loop", + "step": 1, + "writes": {"foo": "bar"}, + "score": None, + } + metadata_3: CheckpointMetadata = {} + + test_data = { + "configs": [config_1, config_2, config_3], + "checkpoints": [chkpnt_1, chkpnt_2, chkpnt_3], + "metadata": [metadata_1, metadata_2, metadata_3], + } + + saver = DjangoCheckpointer() + + configs = test_data["configs"] + checkpoints = test_data["checkpoints"] + metadata = test_data["metadata"] + + saver.put(configs[0], checkpoints[0], metadata[0], {}) + saver.put(configs[1], checkpoints[1], metadata[1], {}) + saver.put(configs[2], checkpoints[2], metadata[2], {}) + + # call method / assertions + query_1 = {"source": "input"} # search by 1 key + query_2 = { + "step": 1, + "writes": {"foo": "bar"}, + } # search by multiple keys + query_3: dict[str, Any] = {} # search by no keys, return all checkpoints + query_4 = {"source": "update", "step": 1} # no match + + search_results_1 = list(saver.list(None, filter=query_1)) + assert len(search_results_1) == 1 + assert search_results_1[0].metadata == metadata[0] + + search_results_2 = list(saver.list(None, filter=query_2)) + assert len(search_results_2) == 1 + assert search_results_2[0].metadata == metadata[1] + + search_results_3 = list(saver.list(None, filter=query_3)) + assert len(search_results_3) == 3 + + search_results_4 = list(saver.list(None, filter=query_4)) + assert len(search_results_4) == 0 + + # search by config (defaults to checkpoints across all namespaces) + search_results_5 = list(saver.list({"configurable": {"thread_id": thread2.id}})) + assert len(search_results_5) == 2 + assert { + search_results_5[0].config["configurable"]["checkpoint_ns"], + search_results_5[1].config["configurable"]["checkpoint_ns"], + } == {"", "inner"} + + def test_channel_versions(self): + thread1 = Conversation.objects.create(user=self.user, team=self.team) + + chkpnt = { + "v": 1, + "ts": "2024-07-31T20:14:19.804150+00:00", + "id": str(uuid6(clock_seq=-2)), + "channel_values": { + "post": "hog", + "node": "node", + }, + "channel_versions": { + "__start__": 2, + "my_key": 3, + "start:node": 3, + "node": 3, + }, + "versions_seen": { + "__input__": {}, + "__start__": {"__start__": 1}, + "node": {"start:node": 2}, + }, + "pending_sends": [], + } + metadata = {"meta": "key"} + + write_config = {"configurable": {"thread_id": thread1.id, "checkpoint_ns": ""}} + read_config = {"configurable": {"thread_id": thread1.id}} + + saver = DjangoCheckpointer() + saver.put(write_config, chkpnt, metadata, {}) + + checkpoint = ConversationCheckpoint.objects.first() + self.assertIsNotNone(checkpoint) + self.assertEqual(checkpoint.thread, thread1) + self.assertEqual(checkpoint.checkpoint_ns, "") + self.assertEqual(str(checkpoint.id), chkpnt["id"]) + self.assertIsNone(checkpoint.parent_checkpoint) + chkpnt.pop("channel_values") + self.assertEqual(checkpoint.checkpoint, chkpnt) + self.assertEqual(checkpoint.metadata, metadata) + + checkpoints = list(saver.list(read_config)) + self.assertEqual(len(checkpoints), 1) + + checkpoint = saver.get(read_config) + self.assertEqual(checkpoint, checkpoints[0].checkpoint) + + def test_put_copies_checkpoint(self): + thread1 = Conversation.objects.create(user=self.user, team=self.team) + chkpnt = { + "v": 1, + "ts": "2024-07-31T20:14:19.804150+00:00", + "id": str(uuid6(clock_seq=-2)), + "channel_values": { + "post": "hog", + "node": "node", + }, + "channel_versions": { + "__start__": 2, + "my_key": 3, + "start:node": 3, + "node": 3, + }, + "versions_seen": { + "__input__": {}, + "__start__": {"__start__": 1}, + "node": {"start:node": 2}, + }, + "pending_sends": [], + } + metadata = {"meta": "key"} + write_config = {"configurable": {"thread_id": thread1.id, "checkpoint_ns": ""}} + saver = DjangoCheckpointer() + saver.put(write_config, chkpnt, metadata, {}) + self.assertIn("channel_values", chkpnt) + + def test_concurrent_puts_and_put_writes(self): + graph: CompiledStateGraph = self._build_graph(DjangoCheckpointer()) + thread = Conversation.objects.create(user=self.user, team=self.team) + config = {"configurable": {"thread_id": str(thread.id)}} + graph.invoke( + {"val": 0}, + config=config, + ) + self.assertEqual(len(ConversationCheckpoint.objects.all()), 4) + self.assertEqual(len(ConversationCheckpointBlob.objects.all()), 10) + self.assertEqual(len(ConversationCheckpointWrite.objects.all()), 6) + + def test_resuming(self): + checkpointer = DjangoCheckpointer() + graph: CompiledStateGraph = self._build_graph(checkpointer) + thread = Conversation.objects.create(user=self.user, team=self.team) + config = {"configurable": {"thread_id": str(thread.id)}} + + graph.invoke( + {"val": 1}, + config=config, + ) + snapshot = graph.get_state(config) + self.assertIsNotNone(snapshot.next) + self.assertEqual(snapshot.tasks[0].interrupts[0].value, "test") + + self.assertEqual(len(ConversationCheckpoint.objects.all()), 2) + self.assertEqual(len(ConversationCheckpointBlob.objects.all()), 4) + self.assertEqual(len(ConversationCheckpointWrite.objects.all()), 3) + self.assertEqual(len(list(checkpointer.list(config))), 2) + + latest_checkpoint = ConversationCheckpoint.objects.last() + latest_write = ConversationCheckpointWrite.objects.filter(checkpoint=latest_checkpoint).first() + actual_checkpoint = checkpointer.get_tuple(config) + self.assertIsNotNone(actual_checkpoint) + self.assertIsNotNone(latest_write) + self.assertEqual(len(latest_checkpoint.writes.all()), 1) + blobs = list(latest_checkpoint.blobs.all()) + self.assertEqual(len(blobs), 3) + self.assertEqual(actual_checkpoint.checkpoint["id"], str(latest_checkpoint.id)) + self.assertEqual(len(actual_checkpoint.pending_writes), 1) + self.assertEqual(actual_checkpoint.pending_writes[0][0], str(latest_write.task_id)) + + graph.update_state(config, {"val": 2}) + # add the value update checkpoint + self.assertEqual(len(ConversationCheckpoint.objects.all()), 3) + self.assertEqual(len(ConversationCheckpointBlob.objects.all()), 6) + self.assertEqual(len(ConversationCheckpointWrite.objects.all()), 5) + self.assertEqual(len(list(checkpointer.list(config))), 3) + + res = graph.invoke(None, config=config) + self.assertEqual(len(ConversationCheckpoint.objects.all()), 5) + self.assertEqual(len(ConversationCheckpointBlob.objects.all()), 12) + self.assertEqual(len(ConversationCheckpointWrite.objects.all()), 9) + self.assertEqual(len(list(checkpointer.list(config))), 5) + self.assertEqual(res, {"val": 3}) + snapshot = graph.get_state(config) + self.assertFalse(snapshot.next) diff --git a/ee/hogai/eval/tests/test_eval_funnel_generator.py b/ee/hogai/eval/tests/test_eval_funnel_generator.py index cd7e93b260ae9..4d7876ca6f73c 100644 --- a/ee/hogai/eval/tests/test_eval_funnel_generator.py +++ b/ee/hogai/eval/tests/test_eval_funnel_generator.py @@ -1,9 +1,11 @@ +from typing import cast + from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph from ee.hogai.eval.utils import EvalBaseTest -from ee.hogai.utils import AssistantNodeName -from posthog.schema import AssistantFunnelsQuery, HumanMessage +from ee.hogai.utils.types import AssistantNodeName, AssistantState +from posthog.schema import AssistantFunnelsQuery, HumanMessage, VisualizationMessage class TestEvalFunnelGenerator(EvalBaseTest): @@ -14,8 +16,11 @@ def _call_node(self, query: str, plan: str) -> AssistantFunnelsQuery: .add_funnel_generator(AssistantNodeName.END) .compile() ) - state = graph.invoke({"messages": [HumanMessage(content=query)], "plan": plan}) - return state["messages"][-1].answer + state = graph.invoke( + AssistantState(messages=[HumanMessage(content=query)], plan=plan), + self._get_config(), + ) + return cast(VisualizationMessage, AssistantState.model_validate(state).messages[-1]).answer def test_node_replaces_equals_with_contains(self): query = "what is the conversion rate from a page view to sign up for users with name John?" diff --git a/ee/hogai/eval/tests/test_eval_funnel_planner.py b/ee/hogai/eval/tests/test_eval_funnel_planner.py index 3760961f9bb03..9adbd75e77c6c 100644 --- a/ee/hogai/eval/tests/test_eval_funnel_planner.py +++ b/ee/hogai/eval/tests/test_eval_funnel_planner.py @@ -5,7 +5,7 @@ from ee.hogai.assistant import AssistantGraph from ee.hogai.eval.utils import EvalBaseTest -from ee.hogai.utils import AssistantNodeName +from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage @@ -40,8 +40,11 @@ def _call_node(self, query): .add_funnel_planner(AssistantNodeName.END) .compile() ) - state = graph.invoke({"messages": [HumanMessage(content=query)]}) - return state["plan"] + state = graph.invoke( + AssistantState(messages=[HumanMessage(content=query)]), + self._get_config(), + ) + return AssistantState.model_validate(state).plan or "" def test_basic_funnel(self): query = "what was the conversion from a page view to sign up?" diff --git a/ee/hogai/eval/tests/test_eval_router.py b/ee/hogai/eval/tests/test_eval_router.py index 25a84769dbfc8..c1307e9d40f00 100644 --- a/ee/hogai/eval/tests/test_eval_router.py +++ b/ee/hogai/eval/tests/test_eval_router.py @@ -1,8 +1,10 @@ +from typing import cast + from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph from ee.hogai.eval.utils import EvalBaseTest -from ee.hogai.utils import AssistantNodeName +from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage, RouterMessage @@ -15,8 +17,11 @@ def _call_node(self, query: str | list): .compile() ) messages = [HumanMessage(content=query)] if isinstance(query, str) else query - state = graph.invoke({"messages": messages}) - return state["messages"][-1].content + state = graph.invoke( + AssistantState(messages=messages), + self._get_config(), + ) + return cast(RouterMessage, AssistantState.model_validate(state).messages[-1]).content def test_outputs_basic_trends_insight(self): query = "Show the $pageview trend" diff --git a/ee/hogai/eval/tests/test_eval_trends_generator.py b/ee/hogai/eval/tests/test_eval_trends_generator.py index c5341584ca2f7..496bbf0100b51 100644 --- a/ee/hogai/eval/tests/test_eval_trends_generator.py +++ b/ee/hogai/eval/tests/test_eval_trends_generator.py @@ -1,9 +1,11 @@ +from typing import cast + from langgraph.graph.state import CompiledStateGraph from ee.hogai.assistant import AssistantGraph from ee.hogai.eval.utils import EvalBaseTest -from ee.hogai.utils import AssistantNodeName -from posthog.schema import AssistantTrendsQuery, HumanMessage +from ee.hogai.utils.types import AssistantNodeName, AssistantState +from posthog.schema import AssistantTrendsQuery, HumanMessage, VisualizationMessage class TestEvalTrendsGenerator(EvalBaseTest): @@ -14,8 +16,11 @@ def _call_node(self, query: str, plan: str) -> AssistantTrendsQuery: .add_trends_generator(AssistantNodeName.END) .compile() ) - state = graph.invoke({"messages": [HumanMessage(content=query)], "plan": plan}) - return state["messages"][-1].answer + state = graph.invoke( + AssistantState(messages=[HumanMessage(content=query)], plan=plan), + self._get_config(), + ) + return cast(VisualizationMessage, AssistantState.model_validate(state).messages[-1]).answer def test_node_replaces_equals_with_contains(self): query = "what is pageview trend for users with name John?" diff --git a/ee/hogai/eval/tests/test_eval_trends_planner.py b/ee/hogai/eval/tests/test_eval_trends_planner.py index e7ea741d03687..d4fbff456a91c 100644 --- a/ee/hogai/eval/tests/test_eval_trends_planner.py +++ b/ee/hogai/eval/tests/test_eval_trends_planner.py @@ -5,7 +5,7 @@ from ee.hogai.assistant import AssistantGraph from ee.hogai.eval.utils import EvalBaseTest -from ee.hogai.utils import AssistantNodeName +from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.schema import HumanMessage @@ -40,8 +40,11 @@ def _call_node(self, query): .add_trends_planner(AssistantNodeName.END) .compile() ) - state = graph.invoke({"messages": [HumanMessage(content=query)]}) - return state["plan"] + state = graph.invoke( + AssistantState(messages=[HumanMessage(content=query)]), + self._get_config(), + ) + return AssistantState.model_validate(state).plan or "" def test_no_excessive_property_filters(self): query = "Show the $pageview trend" diff --git a/ee/hogai/eval/utils.py b/ee/hogai/eval/utils.py index 1e50a75daefa2..6e03c4cfafa9f 100644 --- a/ee/hogai/eval/utils.py +++ b/ee/hogai/eval/utils.py @@ -3,15 +3,25 @@ import pytest from django.test import override_settings from flaky import flaky +from langchain_core.runnables import RunnableConfig +from ee.models.assistant import Conversation from posthog.demo.matrix.manager import MatrixManager from posthog.tasks.demo_create_data import HedgeboxMatrix -from posthog.test.base import BaseTest +from posthog.test.base import NonAtomicBaseTest @pytest.mark.skipif(os.environ.get("DEEPEVAL") != "YES", reason="Only runs for the assistant evaluation") @flaky(max_runs=3, min_passes=1) -class EvalBaseTest(BaseTest): +class EvalBaseTest(NonAtomicBaseTest): + def _get_config(self) -> RunnableConfig: + conversation = Conversation.objects.create(team=self.team, user=self.user) + return { + "configurable": { + "thread_id": conversation.id, + } + } + @classmethod def setUpTestData(cls): super().setUpTestData() diff --git a/ee/hogai/funnels/nodes.py b/ee/hogai/funnels/nodes.py index a55bc223847f2..6f71305e0b796 100644 --- a/ee/hogai/funnels/nodes.py +++ b/ee/hogai/funnels/nodes.py @@ -6,12 +6,12 @@ from ee.hogai.schema_generator.nodes import SchemaGeneratorNode, SchemaGeneratorToolsNode from ee.hogai.schema_generator.utils import SchemaGeneratorOutput from ee.hogai.taxonomy_agent.nodes import TaxonomyAgentPlannerNode, TaxonomyAgentPlannerToolsNode -from ee.hogai.utils import AssistantState +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import AssistantFunnelsQuery class FunnelPlannerNode(TaxonomyAgentPlannerNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: toolkit = FunnelsTaxonomyAgentToolkit(self._team) prompt = ChatPromptTemplate.from_messages( [ @@ -23,7 +23,7 @@ def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: class FunnelPlannerToolsNode(TaxonomyAgentPlannerToolsNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: toolkit = FunnelsTaxonomyAgentToolkit(self._team) return super()._run_with_toolkit(state, toolkit, config=config) @@ -36,7 +36,7 @@ class FunnelGeneratorNode(SchemaGeneratorNode[AssistantFunnelsQuery]): OUTPUT_MODEL = FunnelsSchemaGeneratorOutput OUTPUT_SCHEMA = FUNNEL_SCHEMA - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: prompt = ChatPromptTemplate.from_messages( [ ("system", FUNNEL_SYSTEM_PROMPT), diff --git a/ee/hogai/funnels/prompts.py b/ee/hogai/funnels/prompts.py index b2deec894a070..3808809c173a7 100644 --- a/ee/hogai/funnels/prompts.py +++ b/ee/hogai/funnels/prompts.py @@ -12,6 +12,8 @@ {{react_format}} +{{react_human_in_the_loop}} + Below you will find information on how to correctly discover the taxonomy of the user's data. diff --git a/ee/hogai/funnels/test/test_nodes.py b/ee/hogai/funnels/test/test_nodes.py index 5c65b14110599..4f4e9fca0e5d4 100644 --- a/ee/hogai/funnels/test/test_nodes.py +++ b/ee/hogai/funnels/test/test_nodes.py @@ -4,6 +4,7 @@ from langchain_core.runnables import RunnableLambda from ee.hogai.funnels.nodes import FunnelGeneratorNode, FunnelsSchemaGeneratorOutput +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import ( AssistantFunnelsQuery, HumanMessage, @@ -15,6 +16,7 @@ @override_settings(IN_UNIT_TESTING=True) class TestFunnelsGeneratorNode(ClickhouseTestMixin, APIBaseTest): def setUp(self): + super().setUp() self.schema = AssistantFunnelsQuery(series=[]) def test_node_runs(self): @@ -24,16 +26,13 @@ def test_node_runs(self): lambda _: FunnelsSchemaGeneratorOutput(query=self.schema).model_dump() ) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "plan": "Plan", - }, + AssistantState(messages=[HumanMessage(content="Text")], plan="Plan"), {}, ) self.assertEqual( new_state, - { - "messages": [VisualizationMessage(answer=self.schema, plan="Plan", done=True)], - "intermediate_steps": None, - }, + PartialAssistantState( + messages=[VisualizationMessage(answer=self.schema, plan="Plan", id=new_state.messages[0].id)], + intermediate_steps=None, + ), ) diff --git a/ee/hogai/funnels/toolkit.py b/ee/hogai/funnels/toolkit.py index 8d6407027aac1..ae603519cc331 100644 --- a/ee/hogai/funnels/toolkit.py +++ b/ee/hogai/funnels/toolkit.py @@ -1,5 +1,5 @@ from ee.hogai.taxonomy_agent.toolkit import TaxonomyAgentToolkit, ToolkitTool -from ee.hogai.utils import dereference_schema +from ee.hogai.utils.helpers import dereference_schema from posthog.schema import AssistantFunnelsQuery diff --git a/ee/hogai/graph.py b/ee/hogai/graph.py index 79e5f914097ce..bf961d6bb9aa8 100644 --- a/ee/hogai/graph.py +++ b/ee/hogai/graph.py @@ -1,10 +1,10 @@ from collections.abc import Hashable from typing import Optional, cast -from langfuse.callback import CallbackHandler +from langchain_core.runnables.base import RunnableLike from langgraph.graph.state import StateGraph -from ee import settings +from ee.hogai.django_checkpoint.checkpointer import DjangoCheckpointer from ee.hogai.funnels.nodes import ( FunnelGeneratorNode, FunnelGeneratorToolsNode, @@ -19,15 +19,10 @@ TrendsPlannerNode, TrendsPlannerToolsNode, ) -from ee.hogai.utils import AssistantNodeName, AssistantState +from ee.hogai.utils.types import AssistantNodeName, AssistantState from posthog.models.team.team import Team -if settings.LANGFUSE_PUBLIC_KEY: - langfuse_handler = CallbackHandler( - public_key=settings.LANGFUSE_PUBLIC_KEY, secret_key=settings.LANGFUSE_SECRET_KEY, host=settings.LANGFUSE_HOST - ) -else: - langfuse_handler = None +checkpointer = DjangoCheckpointer() class AssistantGraph: @@ -45,10 +40,14 @@ def add_edge(self, from_node: AssistantNodeName, to_node: AssistantNodeName): self._graph.add_edge(from_node, to_node) return self + def add_node(self, node: AssistantNodeName, action: RunnableLike): + self._graph.add_node(node, action) + return self + def compile(self): if not self._has_start_node: raise ValueError("Start node not added to the graph") - return self._graph.compile() + return self._graph.compile(checkpointer=checkpointer) def add_start(self): return self.add_edge(AssistantNodeName.START, AssistantNodeName.ROUTER) diff --git a/ee/hogai/router/nodes.py b/ee/hogai/router/nodes.py index c9151faaabc29..f6aeacdebbe6b 100644 --- a/ee/hogai/router/nodes.py +++ b/ee/hogai/router/nodes.py @@ -1,4 +1,5 @@ from typing import Literal, cast +from uuid import uuid4 from langchain_core.messages import AIMessage as LangchainAIMessage, BaseMessage from langchain_core.prompts import ChatPromptTemplate @@ -11,7 +12,8 @@ ROUTER_SYSTEM_PROMPT, ROUTER_USER_PROMPT, ) -from ee.hogai.utils import AssistantState, AssistantNode +from ee.hogai.utils.nodes import AssistantNode +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import HumanMessage, RouterMessage RouteName = Literal["trends", "funnel"] @@ -22,7 +24,7 @@ class RouterOutput(BaseModel): class RouterNode(AssistantNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: prompt = ChatPromptTemplate.from_messages( [ ("system", ROUTER_SYSTEM_PROMPT), @@ -31,10 +33,10 @@ def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: ) + self._construct_messages(state) chain = prompt | self._model output: RouterOutput = chain.invoke({}, config) - return {"messages": [RouterMessage(content=output.visualization_type)]} + return PartialAssistantState(messages=[RouterMessage(content=output.visualization_type, id=str(uuid4()))]) def router(self, state: AssistantState) -> RouteName: - last_message = state["messages"][-1] + last_message = state.messages[-1] if isinstance(last_message, RouterMessage): return cast(RouteName, last_message.content) raise ValueError("Invalid route.") @@ -47,7 +49,7 @@ def _model(self): def _construct_messages(self, state: AssistantState): history: list[BaseMessage] = [] - for message in state["messages"]: + for message in state.messages: if isinstance(message, HumanMessage): history += ChatPromptTemplate.from_messages( [("user", ROUTER_USER_PROMPT.strip())], template_format="mustache" diff --git a/ee/hogai/router/test/test_nodes.py b/ee/hogai/router/test/test_nodes.py index 06014fb0b9f59..53074a381b804 100644 --- a/ee/hogai/router/test/test_nodes.py +++ b/ee/hogai/router/test/test_nodes.py @@ -2,11 +2,11 @@ from unittest.mock import patch from django.test import override_settings -from langchain_core.messages import AIMessage as LangchainAIMessage -from langchain_core.messages import HumanMessage as LangchainHumanMessage +from langchain_core.messages import AIMessage as LangchainAIMessage, HumanMessage as LangchainHumanMessage from langchain_core.runnables import RunnableLambda from ee.hogai.router.nodes import RouterNode, RouterOutput +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import ( HumanMessage, RouterMessage, @@ -19,7 +19,7 @@ class TestRouterNode(ClickhouseTestMixin, APIBaseTest): def test_router(self): node = RouterNode(self.team) - state: Any = {"messages": [RouterMessage(content="trends")]} + state: Any = AssistantState(messages=[RouterMessage(content="trends")]) self.assertEqual(node.router(state), "trends") def test_node_runs(self): @@ -28,28 +28,36 @@ def test_node_runs(self): return_value=RunnableLambda(lambda _: RouterOutput(visualization_type="funnel")), ): node = RouterNode(self.team) - state: Any = {"messages": [HumanMessage(content="generate trends")]} - self.assertEqual(node.run(state, {}), {"messages": [RouterMessage(content="funnel")]}) + state: Any = AssistantState(messages=[HumanMessage(content="generate trends")]) + next_state = node.run(state, {}) + self.assertEqual( + next_state, + PartialAssistantState(messages=[RouterMessage(content="funnel", id=next_state.messages[0].id)]), + ) with patch( "ee.hogai.router.nodes.RouterNode._model", return_value=RunnableLambda(lambda _: RouterOutput(visualization_type="trends")), ): node = RouterNode(self.team) - state: Any = {"messages": [HumanMessage(content="generate trends")]} - self.assertEqual(node.run(state, {}), {"messages": [RouterMessage(content="trends")]}) + state: Any = AssistantState(messages=[HumanMessage(content="generate trends")]) + next_state = node.run(state, {}) + self.assertEqual( + next_state, + PartialAssistantState(messages=[RouterMessage(content="trends", id=next_state.messages[0].id)]), + ) def test_node_reconstructs_conversation(self): node = RouterNode(self.team) - state: Any = {"messages": [HumanMessage(content="generate trends")]} + state: Any = AssistantState(messages=[HumanMessage(content="generate trends")]) self.assertEqual(node._construct_messages(state), [LangchainHumanMessage(content="Question: generate trends")]) - state = { - "messages": [ + state = AssistantState( + messages=[ HumanMessage(content="generate trends"), RouterMessage(content="trends"), VisualizationMessage(), ] - } + ) self.assertEqual( node._construct_messages(state), [LangchainHumanMessage(content="Question: generate trends"), LangchainAIMessage(content="trends")], diff --git a/ee/hogai/schema_generator/nodes.py b/ee/hogai/schema_generator/nodes.py index f2d383d5c1e30..4bed02fd462cc 100644 --- a/ee/hogai/schema_generator/nodes.py +++ b/ee/hogai/schema_generator/nodes.py @@ -1,10 +1,16 @@ -import itertools import xml.etree.ElementTree as ET +from collections.abc import Sequence from functools import cached_property from typing import Generic, Optional, TypeVar +from uuid import uuid4 from langchain_core.agents import AgentAction -from langchain_core.messages import AIMessage as LangchainAssistantMessage, BaseMessage, merge_message_runs +from langchain_core.messages import ( + AIMessage as LangchainAssistantMessage, + BaseMessage, + HumanMessage as LangchainHumanMessage, + merge_message_runs, +) from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.runnables import RunnableConfig from langchain_openai import ChatOpenAI @@ -23,10 +29,14 @@ QUESTION_PROMPT, ) from ee.hogai.schema_generator.utils import SchemaGeneratorOutput -from ee.hogai.utils import AssistantNode, AssistantState, filter_visualization_conversation +from ee.hogai.utils.helpers import find_last_message_of_type, slice_messages_to_conversation_start +from ee.hogai.utils.nodes import AssistantNode +from ee.hogai.utils.types import AssistantMessageUnion, AssistantState, PartialAssistantState from posthog.models.group_type_mapping import GroupTypeMapping from posthog.schema import ( + AssistantMessage, FailureMessage, + HumanMessage, VisualizationMessage, ) @@ -63,9 +73,10 @@ def _run_with_prompt( state: AssistantState, prompt: ChatPromptTemplate, config: Optional[RunnableConfig] = None, - ) -> AssistantState: - generated_plan = state.get("plan", "") - intermediate_steps = state.get("intermediate_steps") or [] + ) -> PartialAssistantState: + start_id = state.start_id + generated_plan = state.plan or "" + intermediate_steps = state.intermediate_steps or [] validation_error_message = intermediate_steps[-1][1] if intermediate_steps else None generation_prompt = prompt + self._construct_messages(state, validation_error_message=validation_error_message) @@ -79,35 +90,36 @@ def _run_with_prompt( except PydanticOutputParserException as e: # Generation step is expensive. After a second unsuccessful attempt, it's better to send a failure message. if len(intermediate_steps) >= 2: - return { - "messages": [ + return PartialAssistantState( + messages=[ FailureMessage( content=f"Oops! It looks like I’m having trouble generating this {self.INSIGHT_NAME} insight. Could you please try again?" ) ], - "intermediate_steps": None, - } + intermediate_steps=None, + ) - return { - "intermediate_steps": [ + return PartialAssistantState( + intermediate_steps=[ *intermediate_steps, (AgentAction("handle_incorrect_response", e.llm_output, e.validation_message), None), ], - } + ) - return { - "messages": [ + return PartialAssistantState( + messages=[ VisualizationMessage( plan=generated_plan, answer=message.query, - done=True, + initiator=start_id, + id=str(uuid4()), ) ], - "intermediate_steps": None, - } + intermediate_steps=None, + ) def router(self, state: AssistantState): - if state.get("intermediate_steps") is not None: + if state.intermediate_steps: return "tools" return "next" @@ -123,15 +135,25 @@ def _group_mapping_prompt(self) -> str: ) return ET.tostring(root, encoding="unicode") + def _get_human_viz_message_mapping(self, messages: Sequence[AssistantMessageUnion]) -> dict[str, int]: + mapping: dict[str, int] = {} + for idx, msg in enumerate(messages): + if isinstance(msg, VisualizationMessage) and msg.initiator is not None: + mapping[msg.initiator] = idx + return mapping + def _construct_messages( self, state: AssistantState, validation_error_message: Optional[str] = None ) -> list[BaseMessage]: """ Reconstruct the conversation for the generation. Take all previously generated questions, plans, and schemas, and return the history. """ - messages = state.get("messages", []) - generated_plan = state.get("plan", "") + messages = state.messages + generated_plan = state.plan + start_id = state.start_id + if start_id is not None: + messages = slice_messages_to_conversation_start(messages, start_id) if len(messages) == 0: return [] @@ -141,43 +163,61 @@ def _construct_messages( ) ] - human_messages, visualization_messages = filter_visualization_conversation(messages) - first_ai_message = True + msg_mapping = self._get_human_viz_message_mapping(messages) + initiator_message = messages[-1] + last_viz_message = find_last_message_of_type(messages, VisualizationMessage) + + for message in messages: + # The initial human message and the new plan are added to the end of the conversation. + if message == initiator_message: + continue + if isinstance(message, HumanMessage): + if message.id and (viz_message_idx := msg_mapping.get(message.id)): + # Plans go first. + viz_message = messages[viz_message_idx] + if isinstance(viz_message, VisualizationMessage): + conversation.append( + HumanMessagePromptTemplate.from_template(PLAN_PROMPT, template_format="mustache").format( + plan=viz_message.plan or "" + ) + ) - for idx, (human_message, ai_message) in enumerate( - itertools.zip_longest(human_messages, visualization_messages) - ): - # Plans go first - if ai_message: - conversation.append( - HumanMessagePromptTemplate.from_template( - PLAN_PROMPT if first_ai_message else NEW_PLAN_PROMPT, - template_format="mustache", - ).format(plan=ai_message.plan or "") - ) - first_ai_message = False - elif generated_plan: - conversation.append( - HumanMessagePromptTemplate.from_template( - PLAN_PROMPT if first_ai_message else NEW_PLAN_PROMPT, - template_format="mustache", - ).format(plan=generated_plan) + # Augment with the prompt previous initiator messages. + conversation.append( + HumanMessagePromptTemplate.from_template(QUESTION_PROMPT, template_format="mustache").format( + question=message.content + ) + ) + # Otherwise, just append the human message. + else: + conversation.append(LangchainHumanMessage(content=message.content)) + # Summary, human-in-the-loop messages. + elif isinstance(message, AssistantMessage): + conversation.append(LangchainAssistantMessage(content=message.content)) + + # Include only last generated schema because it doesn't need more context. + if last_viz_message: + conversation.append( + LangchainAssistantMessage( + content=last_viz_message.answer.model_dump_json() if last_viz_message.answer else "" ) - - # Then questions - if human_message: + ) + # Add the initiator message and the generated plan to the end, so instructions are clear. + if isinstance(initiator_message, HumanMessage): + if generated_plan: + plan_prompt = PLAN_PROMPT if messages[0] == initiator_message else NEW_PLAN_PROMPT conversation.append( - HumanMessagePromptTemplate.from_template(QUESTION_PROMPT, template_format="mustache").format( - question=human_message.content + HumanMessagePromptTemplate.from_template(plan_prompt, template_format="mustache").format( + plan=generated_plan or "" ) ) - - # Then schemas, but include only last generated schema because it doesn't need more context. - if ai_message and idx + 1 == len(visualization_messages): - conversation.append( - LangchainAssistantMessage(content=ai_message.answer.model_dump_json() if ai_message.answer else "") + conversation.append( + HumanMessagePromptTemplate.from_template(QUESTION_PROMPT, template_format="mustache").format( + question=initiator_message.content ) + ) + # Retries must be added to the end of the conversation. if validation_error_message: conversation.append( HumanMessagePromptTemplate.from_template(FAILOVER_PROMPT, template_format="mustache").format( @@ -193,10 +233,10 @@ class SchemaGeneratorToolsNode(AssistantNode): Used for failover from generation errors. """ - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: - intermediate_steps = state.get("intermediate_steps", []) + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: + intermediate_steps = state.intermediate_steps or [] if not intermediate_steps: - return state + return PartialAssistantState() action, _ = intermediate_steps[-1] prompt = ( @@ -205,9 +245,9 @@ def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: .content ) - return { - "intermediate_steps": [ + return PartialAssistantState( + intermediate_steps=[ *intermediate_steps[:-1], (action, str(prompt)), ] - } + ) diff --git a/ee/hogai/schema_generator/test/test_nodes.py b/ee/hogai/schema_generator/test/test_nodes.py index 795045af50b56..b44154b93b927 100644 --- a/ee/hogai/schema_generator/test/test_nodes.py +++ b/ee/hogai/schema_generator/test/test_nodes.py @@ -4,10 +4,11 @@ from django.test import override_settings from langchain_core.agents import AgentAction from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnableLambda +from langchain_core.runnables import RunnableConfig, RunnableLambda from ee.hogai.schema_generator.nodes import SchemaGeneratorNode, SchemaGeneratorToolsNode from ee.hogai.schema_generator.utils import SchemaGeneratorOutput +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import ( AssistantMessage, AssistantTrendsQuery, @@ -16,7 +17,7 @@ RouterMessage, VisualizationMessage, ) -from posthog.test.base import APIBaseTest, ClickhouseTestMixin +from posthog.test.base import BaseTest TestSchema = SchemaGeneratorOutput[AssistantTrendsQuery] @@ -26,7 +27,7 @@ class DummyGeneratorNode(SchemaGeneratorNode[AssistantTrendsQuery]): OUTPUT_MODEL = SchemaGeneratorOutput[AssistantTrendsQuery] OUTPUT_SCHEMA = {} - def run(self, state, config): + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: prompt = ChatPromptTemplate.from_messages( [ ("system", "system_prompt"), @@ -36,8 +37,9 @@ def run(self, state, config): @override_settings(IN_UNIT_TESTING=True) -class TestSchemaGeneratorNode(ClickhouseTestMixin, APIBaseTest): +class TestSchemaGeneratorNode(BaseTest): def setUp(self): + super().setUp() self.schema = AssistantTrendsQuery(series=[]) def test_node_runs(self): @@ -45,23 +47,23 @@ def test_node_runs(self): with patch.object(DummyGeneratorNode, "_model") as generator_model_mock: generator_model_mock.return_value = RunnableLambda(lambda _: TestSchema(query=self.schema).model_dump()) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "plan": "Plan", - }, + AssistantState( + messages=[HumanMessage(content="Text", id="0")], + plan="Plan", + start_id="0", + ), {}, ) - self.assertEqual( - new_state, - { - "messages": [VisualizationMessage(answer=self.schema, plan="Plan", done=True)], - "intermediate_steps": None, - }, - ) + self.assertIsNone(new_state.intermediate_steps) + self.assertEqual(len(new_state.messages), 1) + self.assertEqual(new_state.messages[0].type, "ai/viz") + self.assertEqual(new_state.messages[0].answer, self.schema) - def test_agent_reconstructs_conversation(self): + def test_agent_reconstructs_conversation_and_does_not_add_an_empty_plan(self): node = DummyGeneratorNode(self.team) - history = node._construct_messages({"messages": [HumanMessage(content="Text")]}) + history = node._construct_messages( + AssistantState(messages=[HumanMessage(content="Text", id="0")], start_id="0") + ) self.assertEqual(len(history), 2) self.assertEqual(history[0].type, "human") self.assertIn("mapping", history[0].content) @@ -69,7 +71,11 @@ def test_agent_reconstructs_conversation(self): self.assertIn("Answer to this question:", history[1].content) self.assertNotIn("{{question}}", history[1].content) - history = node._construct_messages({"messages": [HumanMessage(content="Text")], "plan": "randomplan"}) + def test_agent_reconstructs_conversation_adds_plan(self): + node = DummyGeneratorNode(self.team) + history = node._construct_messages( + AssistantState(messages=[HumanMessage(content="Text", id="0")], plan="randomplan", start_id="0") + ) self.assertEqual(len(history), 3) self.assertEqual(history[0].type, "human") self.assertIn("mapping", history[0].content) @@ -82,16 +88,18 @@ def test_agent_reconstructs_conversation(self): self.assertNotIn("{{question}}", history[2].content) self.assertIn("Text", history[2].content) + def test_agent_reconstructs_conversation_can_handle_follow_ups(self): node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Text"), - VisualizationMessage(answer=self.schema, plan="randomplan"), - HumanMessage(content="Follow Up"), + AssistantState( + messages=[ + HumanMessage(content="Text", id="0"), + VisualizationMessage(answer=self.schema, plan="randomplan", id="1", initiator="0"), + HumanMessage(content="Follow Up", id="2"), ], - "plan": "newrandomplan", - } + plan="newrandomplan", + start_id="2", + ) ) self.assertEqual(len(history), 6) @@ -116,13 +124,41 @@ def test_agent_reconstructs_conversation(self): self.assertNotIn("{{question}}", history[5].content) self.assertIn("Follow Up", history[5].content) - def test_agent_reconstructs_conversation_and_merges_messages(self): + def test_agent_reconstructs_conversation_and_does_not_merge_messages(self): + node = DummyGeneratorNode(self.team) + history = node._construct_messages( + AssistantState( + messages=[HumanMessage(content="Te", id="0"), HumanMessage(content="xt", id="1")], + plan="randomplan", + start_id="1", + ) + ) + self.assertEqual(len(history), 4) + self.assertEqual(history[0].type, "human") + self.assertIn("mapping", history[0].content) + self.assertIn("Te", history[1].content) + self.assertEqual(history[2].type, "human") + self.assertNotIn("{{plan}}", history[2].content) + self.assertIn("randomplan", history[2].content) + self.assertEqual(history[3].type, "human") + self.assertIn("Answer to this question:", history[3].content) + self.assertNotIn("{{question}}", history[3].content) + self.assertEqual(history[3].type, "human") + self.assertIn("xt", history[3].content) + + def test_filters_out_human_in_the_loop_after_initiator(self): node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [HumanMessage(content="Te"), HumanMessage(content="xt")], - "plan": "randomplan", - } + AssistantState( + messages=[ + HumanMessage(content="Text", id="0"), + VisualizationMessage(answer=self.schema, plan="randomplan", initiator="0", id="1"), + HumanMessage(content="Follow", id="2"), + HumanMessage(content="Up", id="3"), + ], + plan="newrandomplan", + start_id="0", + ) ) self.assertEqual(len(history), 3) self.assertEqual(history[0].type, "human") @@ -134,104 +170,114 @@ def test_agent_reconstructs_conversation_and_merges_messages(self): self.assertEqual(history[2].type, "human") self.assertIn("Answer to this question:", history[2].content) self.assertNotIn("{{question}}", history[2].content) - self.assertIn("Te\nxt", history[2].content) + self.assertIn("Text", history[2].content) + def test_preserves_human_in_the_loop_before_initiator(self): node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Text"), - VisualizationMessage(answer=self.schema, plan="randomplan"), - HumanMessage(content="Follow"), - HumanMessage(content="Up"), + AssistantState( + messages=[ + HumanMessage(content="Question 1", id="0"), + AssistantMessage(content="Loop", id="1"), + HumanMessage(content="Answer", id="2"), + VisualizationMessage(answer=self.schema, plan="randomplan", initiator="0", id="3"), + HumanMessage(content="Question 2", id="4"), ], - "plan": "newrandomplan", - } + plan="newrandomplan", + start_id="4", + ) ) - - self.assertEqual(len(history), 6) + self.assertEqual(len(history), 8) self.assertEqual(history[0].type, "human") self.assertIn("mapping", history[0].content) self.assertEqual(history[1].type, "human") self.assertIn("the plan", history[1].content) self.assertNotIn("{{plan}}", history[1].content) self.assertIn("randomplan", history[1].content) - self.assertEqual(history[2].type, "human") - self.assertIn("Answer to this question:", history[2].content) self.assertNotIn("{{question}}", history[2].content) - self.assertIn("Text", history[2].content) + self.assertIn("Question 1", history[2].content) self.assertEqual(history[3].type, "ai") - self.assertEqual(history[3].content, self.schema.model_dump_json()) + self.assertEqual("Loop", history[3].content) self.assertEqual(history[4].type, "human") - self.assertIn("the new plan", history[4].content) - self.assertNotIn("{{plan}}", history[4].content) - self.assertIn("newrandomplan", history[4].content) - self.assertEqual(history[5].type, "human") - self.assertIn("Answer to this question:", history[5].content) - self.assertNotIn("{{question}}", history[5].content) - self.assertIn("Follow\nUp", history[5].content) + self.assertEqual("Answer", history[4].content) + self.assertEqual(history[5].type, "ai") + self.assertEqual(history[6].type, "human") + self.assertIn("the new plan", history[6].content) + self.assertIn("newrandomplan", history[6].content) + self.assertEqual(history[7].type, "human") + self.assertNotIn("{{question}}", history[7].content) + self.assertIn("Question 2", history[7].content) def test_agent_reconstructs_typical_conversation(self): node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Question 1"), - RouterMessage(content="trends"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1"), - AssistantMessage(content="Summary 1"), - HumanMessage(content="Question 2"), - RouterMessage(content="funnel"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2"), - AssistantMessage(content="Summary 2"), - HumanMessage(content="Question 3"), - RouterMessage(content="funnel"), + AssistantState( + messages=[ + HumanMessage(content="Question 1", id="0"), + RouterMessage(content="trends", id="1"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1", initiator="0", id="2"), + AssistantMessage(content="Summary 1", id="3"), + HumanMessage(content="Question 2", id="4"), + RouterMessage(content="funnel", id="5"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2", initiator="4", id="6"), + AssistantMessage(content="Summary 2", id="7"), + HumanMessage(content="Question 3", id="8"), + RouterMessage(content="funnel", id="9"), ], - "plan": "Plan 3", - } + plan="Plan 3", + start_id="8", + ) ) - self.assertEqual(len(history), 8) + + self.assertEqual(len(history), 10) self.assertEqual(history[0].type, "human") self.assertIn("mapping", history[0].content) self.assertEqual(history[1].type, "human") self.assertIn("Plan 1", history[1].content) self.assertEqual(history[2].type, "human") self.assertIn("Question 1", history[2].content) - self.assertEqual(history[3].type, "human") - self.assertIn("Plan 2", history[3].content) + self.assertEqual(history[3].type, "ai") + self.assertEqual(history[3].content, "Summary 1") self.assertEqual(history[4].type, "human") - self.assertIn("Question 2", history[4].content) - self.assertEqual(history[5].type, "ai") - self.assertEqual(history[6].type, "human") - self.assertIn("Plan 3", history[6].content) - self.assertEqual(history[7].type, "human") - self.assertIn("Question 3", history[7].content) - - def test_prompt(self): + self.assertIn("Plan 2", history[4].content) + self.assertEqual(history[5].type, "human") + self.assertIn("Question 2", history[5].content) + self.assertEqual(history[6].type, "ai") + self.assertEqual(history[6].content, "Summary 2") + self.assertEqual(history[7].type, "ai") + self.assertEqual(history[8].type, "human") + self.assertIn("Plan 3", history[8].content) + self.assertEqual(history[9].type, "human") + self.assertIn("Question 3", history[9].content) + + def test_prompt_messages_merged(self): node = DummyGeneratorNode(self.team) - state = { - "messages": [ - HumanMessage(content="Question 1"), - RouterMessage(content="trends"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1"), - AssistantMessage(content="Summary 1"), - HumanMessage(content="Question 2"), - RouterMessage(content="funnel"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2"), - AssistantMessage(content="Summary 2"), - HumanMessage(content="Question 3"), - RouterMessage(content="funnel"), + state = AssistantState( + messages=[ + HumanMessage(content="Question 1", id="0"), + RouterMessage(content="trends", id="1"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1", initiator="0", id="2"), + AssistantMessage(content="Summary 1", id="3"), + HumanMessage(content="Question 2", id="4"), + RouterMessage(content="funnel", id="5"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2", initiator="4", id="6"), + AssistantMessage(content="Summary 2", id="7"), + HumanMessage(content="Question 3", id="8"), + RouterMessage(content="funnel", id="9"), ], - "plan": "Plan 3", - } + plan="Plan 3", + start_id="8", + ) with patch.object(DummyGeneratorNode, "_model") as generator_model_mock: def assert_prompt(prompt): - self.assertEqual(len(prompt), 4) + self.assertEqual(len(prompt), 6) self.assertEqual(prompt[0].type, "system") self.assertEqual(prompt[1].type, "human") self.assertEqual(prompt[2].type, "ai") self.assertEqual(prompt[3].type, "human") + self.assertEqual(prompt[4].type, "ai") + self.assertEqual(prompt[5].type, "human") generator_model_mock.return_value = RunnableLambda(assert_prompt) node.run(state, {}) @@ -244,19 +290,17 @@ def test_failover_with_incorrect_schema(self): schema["query"] = [] generator_model_mock.return_value = RunnableLambda(lambda _: json.dumps(schema)) - new_state = node.run({"messages": [HumanMessage(content="Text")]}, {}) - self.assertIn("intermediate_steps", new_state) - self.assertEqual(len(new_state["intermediate_steps"]), 1) + new_state = node.run(AssistantState(messages=[HumanMessage(content="Text")]), {}) + self.assertEqual(len(new_state.intermediate_steps), 1) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "intermediate_steps": [(AgentAction(tool="", tool_input="", log="exception"), "exception")], - }, + AssistantState( + messages=[HumanMessage(content="Text")], + intermediate_steps=[(AgentAction(tool="", tool_input="", log="exception"), "exception")], + ), {}, ) - self.assertIn("intermediate_steps", new_state) - self.assertEqual(len(new_state["intermediate_steps"]), 2) + self.assertEqual(len(new_state.intermediate_steps), 2) def test_node_leaves_failover(self): node = DummyGeneratorNode(self.team) @@ -266,25 +310,25 @@ def test_node_leaves_failover(self): return_value=RunnableLambda(lambda _: TestSchema(query=self.schema).model_dump()), ): new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "intermediate_steps": [(AgentAction(tool="", tool_input="", log="exception"), "exception")], - }, + AssistantState( + messages=[HumanMessage(content="Text")], + intermediate_steps=[(AgentAction(tool="", tool_input="", log="exception"), "exception")], + ), {}, ) - self.assertIsNone(new_state["intermediate_steps"]) + self.assertIsNone(new_state.intermediate_steps) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "intermediate_steps": [ + AssistantState( + messages=[HumanMessage(content="Text")], + intermediate_steps=[ (AgentAction(tool="", tool_input="", log="exception"), "exception"), (AgentAction(tool="", tool_input="", log="exception"), "exception"), ], - }, + ), {}, ) - self.assertIsNone(new_state["intermediate_steps"]) + self.assertIsNone(new_state.intermediate_steps) def test_node_leaves_failover_after_second_unsuccessful_attempt(self): node = DummyGeneratorNode(self.team) @@ -295,29 +339,30 @@ def test_node_leaves_failover_after_second_unsuccessful_attempt(self): generator_model_mock.return_value = RunnableLambda(lambda _: json.dumps(schema)) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "intermediate_steps": [ + AssistantState( + messages=[HumanMessage(content="Text")], + intermediate_steps=[ (AgentAction(tool="", tool_input="", log="exception"), "exception"), (AgentAction(tool="", tool_input="", log="exception"), "exception"), ], - }, + ), {}, ) - self.assertIsNone(new_state["intermediate_steps"]) - self.assertEqual(len(new_state["messages"]), 1) - self.assertIsInstance(new_state["messages"][0], FailureMessage) + self.assertIsNone(new_state.intermediate_steps) + self.assertEqual(len(new_state.messages), 1) + self.assertIsInstance(new_state.messages[0], FailureMessage) def test_agent_reconstructs_conversation_with_failover(self): action = AgentAction(tool="fix", tool_input="validation error", log="exception") node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [HumanMessage(content="Text")], - "plan": "randomplan", - "intermediate_steps": [(action, "uniqexception")], - }, - "uniqexception", + AssistantState( + messages=[HumanMessage(content="Text", id="0")], + plan="randomplan", + intermediate_steps=[(action, "uniqexception")], + start_id="0", + ), + validation_error_message="uniqexception", ) self.assertEqual(len(history), 4) self.assertEqual(history[0].type, "human") @@ -337,14 +382,14 @@ def test_agent_reconstructs_conversation_with_failover(self): def test_agent_reconstructs_conversation_with_failed_messages(self): node = DummyGeneratorNode(self.team) history = node._construct_messages( - { - "messages": [ + AssistantState( + messages=[ HumanMessage(content="Text"), FailureMessage(content="Error"), HumanMessage(content="Text"), ], - "plan": "randomplan", - }, + plan="randomplan", + ), ) self.assertEqual(len(history), 3) self.assertEqual(history[0].type, "human") @@ -360,19 +405,19 @@ def test_agent_reconstructs_conversation_with_failed_messages(self): def test_router(self): node = DummyGeneratorNode(self.team) - state = node.router({"messages": [], "intermediate_steps": None}) + state = node.router(AssistantState(messages=[], intermediate_steps=None)) self.assertEqual(state, "next") state = node.router( - {"messages": [], "intermediate_steps": [(AgentAction(tool="", tool_input="", log=""), None)]} + AssistantState(messages=[], intermediate_steps=[(AgentAction(tool="", tool_input="", log=""), None)]) ) self.assertEqual(state, "tools") -class TestSchemaGeneratorToolsNode(ClickhouseTestMixin, APIBaseTest): +class TestSchemaGeneratorToolsNode(BaseTest): def test_tools_node(self): node = SchemaGeneratorToolsNode(self.team) action = AgentAction(tool="fix", tool_input="validationerror", log="pydanticexception") - state = node.run({"messages": [], "intermediate_steps": [(action, None)]}, {}) - self.assertIsNotNone("validationerror", state["intermediate_steps"][0][1]) - self.assertIn("validationerror", state["intermediate_steps"][0][1]) - self.assertIn("pydanticexception", state["intermediate_steps"][0][1]) + state = node.run(AssistantState(messages=[], intermediate_steps=[(action, None)]), {}) + self.assertIsNotNone("validationerror", state.intermediate_steps[0][1]) + self.assertIn("validationerror", state.intermediate_steps[0][1]) + self.assertIn("pydanticexception", state.intermediate_steps[0][1]) diff --git a/ee/hogai/summarizer/nodes.py b/ee/hogai/summarizer/nodes.py index 8d5e8a406f45e..513246bcc1238 100644 --- a/ee/hogai/summarizer/nodes.py +++ b/ee/hogai/summarizer/nodes.py @@ -1,15 +1,18 @@ import json from time import sleep +from uuid import uuid4 + from django.conf import settings +from django.core.serializers.json import DjangoJSONEncoder from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableConfig from langchain_openai import ChatOpenAI -from django.core.serializers.json import DjangoJSONEncoder from rest_framework.exceptions import APIException from sentry_sdk import capture_exception -from ee.hogai.summarizer.prompts import SUMMARIZER_SYSTEM_PROMPT, SUMMARIZER_INSTRUCTION_PROMPT -from ee.hogai.utils import AssistantNode, AssistantNodeName, AssistantState +from ee.hogai.summarizer.prompts import SUMMARIZER_INSTRUCTION_PROMPT, SUMMARIZER_SYSTEM_PROMPT +from ee.hogai.utils.nodes import AssistantNode +from ee.hogai.utils.types import AssistantNodeName, AssistantState, PartialAssistantState from posthog.api.services.query import process_query_dict from posthog.clickhouse.client.execute_async import get_query_status from posthog.errors import ExposedCHQueryError @@ -21,8 +24,8 @@ class SummarizerNode(AssistantNode): name = AssistantNodeName.SUMMARIZER - def run(self, state: AssistantState, config: RunnableConfig): - viz_message = state["messages"][-1] + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: + viz_message = state.messages[-1] if not isinstance(viz_message, VisualizationMessage): raise ValueError("Can only run summarization with a visualization message as the last one in the state") if viz_message.answer is None: @@ -58,10 +61,16 @@ def run(self, state: AssistantState, config: RunnableConfig): err_message = ", ".join(f"{key}: {value}" for key, value in err.detail.items()) elif isinstance(err.detail, list): err_message = ", ".join(map(str, err.detail)) - return {"messages": [FailureMessage(content=f"There was an error running this query: {err_message}")]} + return PartialAssistantState( + messages=[ + FailureMessage(content=f"There was an error running this query: {err_message}", id=str(uuid4())) + ] + ) except Exception as err: capture_exception(err) - return {"messages": [FailureMessage(content="There was an unknown error running this query.")]} + return PartialAssistantState( + messages=[FailureMessage(content="There was an unknown error running this query.", id=str(uuid4()))] + ) summarization_prompt = ChatPromptTemplate(self._construct_messages(state), template_format="mustache") @@ -76,7 +85,7 @@ def run(self, state: AssistantState, config: RunnableConfig): config, ) - return {"messages": [AssistantMessage(content=str(message.content), done=True)]} + return PartialAssistantState(messages=[AssistantMessage(content=str(message.content), id=str(uuid4()))]) @property def _model(self): @@ -85,7 +94,7 @@ def _model(self): def _construct_messages(self, state: AssistantState) -> list[tuple[str, str]]: conversation: list[tuple[str, str]] = [("system", SUMMARIZER_SYSTEM_PROMPT)] - for message in state.get("messages", []): + for message in state.messages: if isinstance(message, HumanMessage): conversation.append(("human", message.content)) elif isinstance(message, AssistantMessage): diff --git a/ee/hogai/summarizer/test/test_nodes.py b/ee/hogai/summarizer/test/test_nodes.py index b38d88275aa19..9c54517717b5f 100644 --- a/ee/hogai/summarizer/test/test_nodes.py +++ b/ee/hogai/summarizer/test/test_nodes.py @@ -1,23 +1,23 @@ from unittest.mock import patch from django.test import override_settings -from langchain_core.runnables import RunnableLambda from langchain_core.messages import ( HumanMessage as LangchainHumanMessage, ) +from langchain_core.runnables import RunnableLambda +from rest_framework.exceptions import ValidationError + from ee.hogai.summarizer.nodes import SummarizerNode from ee.hogai.summarizer.prompts import SUMMARIZER_INSTRUCTION_PROMPT, SUMMARIZER_SYSTEM_PROMPT +from ee.hogai.utils.types import AssistantState +from posthog.api.services.query import process_query_dict from posthog.schema import ( - AssistantMessage, AssistantTrendsEventsNode, AssistantTrendsQuery, - FailureMessage, HumanMessage, VisualizationMessage, ) -from rest_framework.exceptions import ValidationError from posthog.test.base import APIBaseTest, ClickhouseTestMixin -from posthog.api.services.query import process_query_dict @override_settings(IN_UNIT_TESTING=True) @@ -32,28 +32,26 @@ def test_node_runs(self, mock_process_query_dict): lambda _: LangchainHumanMessage(content="The results indicate foobar.") ) new_state = node.run( - { - "messages": [ - HumanMessage(content="Text"), + AssistantState( + messages=[ + HumanMessage(content="Text", id="test"), VisualizationMessage( answer=AssistantTrendsQuery(series=[AssistantTrendsEventsNode()]), plan="Plan", - done=True, + id="test2", + initiator="test", ), ], - "plan": "Plan", - }, + plan="Plan", + start_id="test", + ), {}, ) mock_process_query_dict.assert_called_once() # Query processing started - self.assertEqual( - new_state, - { - "messages": [ - AssistantMessage(content="The results indicate foobar.", done=True), - ], - }, - ) + msg = new_state.messages[0] + self.assertEqual(msg.content, "The results indicate foobar.") + self.assertEqual(msg.type, "ai") + self.assertIsNotNone(msg.id) @patch( "ee.hogai.summarizer.nodes.process_query_dict", @@ -66,28 +64,26 @@ def test_node_handles_internal_error(self, mock_process_query_dict): lambda _: LangchainHumanMessage(content="The results indicate foobar.") ) new_state = node.run( - { - "messages": [ - HumanMessage(content="Text"), + AssistantState( + messages=[ + HumanMessage(content="Text", id="test"), VisualizationMessage( answer=AssistantTrendsQuery(series=[AssistantTrendsEventsNode()]), plan="Plan", - done=True, + id="test2", + initiator="test", ), ], - "plan": "Plan", - }, + plan="Plan", + start_id="test", + ), {}, ) mock_process_query_dict.assert_called_once() # Query processing started - self.assertEqual( - new_state, - { - "messages": [ - FailureMessage(content="There was an unknown error running this query."), - ], - }, - ) + msg = new_state.messages[0] + self.assertEqual(msg.content, "There was an unknown error running this query.") + self.assertEqual(msg.type, "ai/failure") + self.assertIsNotNone(msg.id) @patch( "ee.hogai.summarizer.nodes.process_query_dict", @@ -102,33 +98,29 @@ def test_node_handles_exposed_error(self, mock_process_query_dict): lambda _: LangchainHumanMessage(content="The results indicate foobar.") ) new_state = node.run( - { - "messages": [ - HumanMessage(content="Text"), + AssistantState( + messages=[ + HumanMessage(content="Text", id="test"), VisualizationMessage( answer=AssistantTrendsQuery(series=[AssistantTrendsEventsNode()]), plan="Plan", - done=True, + id="test2", + initiator="test", ), ], - "plan": "Plan", - }, + plan="Plan", + start_id="test", + ), {}, ) mock_process_query_dict.assert_called_once() # Query processing started + msg = new_state.messages[0] self.assertEqual( - new_state, - { - "messages": [ - FailureMessage( - content=( - "There was an error running this query: This query exceeds the capabilities of our picolator. " - "Try de-brolling its flim-flam." - ) - ), - ], - }, + msg.content, + "There was an error running this query: This query exceeds the capabilities of our picolator. Try de-brolling its flim-flam.", ) + self.assertEqual(msg.type, "ai/failure") + self.assertIsNotNone(msg.id) def test_node_requires_a_viz_message_in_state(self): node = SummarizerNode(self.team) @@ -137,12 +129,13 @@ def test_node_requires_a_viz_message_in_state(self): ValueError, "Can only run summarization with a visualization message as the last one in the state" ): node.run( - { - "messages": [ + AssistantState( + messages=[ HumanMessage(content="Text"), ], - "plan": "Plan", - }, + plan="Plan", + start_id="test", + ), {}, ) @@ -151,16 +144,13 @@ def test_node_requires_viz_message_in_state_to_have_query(self): with self.assertRaisesMessage(ValueError, "Did not found query in the visualization message"): node.run( - { - "messages": [ - VisualizationMessage( - answer=None, - plan="Plan", - done=True, - ), + AssistantState( + messages=[ + VisualizationMessage(answer=None, plan="Plan", id="test"), ], - "plan": "Plan", - }, + plan="Plan", + start_id="test", + ), {}, ) @@ -170,16 +160,18 @@ def test_agent_reconstructs_conversation(self): node = SummarizerNode(self.team) history = node._construct_messages( - { - "messages": [ - HumanMessage(content="What's the trends in signups?"), + AssistantState( + messages=[ + HumanMessage(content="What's the trends in signups?", id="test"), VisualizationMessage( answer=AssistantTrendsQuery(series=[AssistantTrendsEventsNode()]), plan="Plan", - done=True, + id="test2", + initiator="test", ), - ] - } + ], + start_id="test", + ) ) self.assertEqual( history, diff --git a/ee/hogai/taxonomy_agent/nodes.py b/ee/hogai/taxonomy_agent/nodes.py index 025058a51eec1..bd26a7a93918f 100644 --- a/ee/hogai/taxonomy_agent/nodes.py +++ b/ee/hogai/taxonomy_agent/nodes.py @@ -1,4 +1,3 @@ -import itertools import xml.etree.ElementTree as ET from abc import ABC from functools import cached_property @@ -7,10 +6,16 @@ from git import Optional from langchain.agents.format_scratchpad import format_log_to_str from langchain_core.agents import AgentAction -from langchain_core.messages import AIMessage as LangchainAssistantMessage, BaseMessage, merge_message_runs +from langchain_core.messages import ( + AIMessage as LangchainAssistantMessage, + BaseMessage, + HumanMessage as LangchainHumanMessage, + merge_message_runs, +) from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.runnables import RunnableConfig from langchain_openai import ChatOpenAI +from langgraph.errors import NodeInterrupt from pydantic import ValidationError from ee.hogai.taxonomy import CORE_FILTER_DEFINITIONS_BY_GROUP @@ -24,6 +29,7 @@ REACT_FOLLOW_UP_PROMPT, REACT_FORMAT_PROMPT, REACT_FORMAT_REMINDER_PROMPT, + REACT_HUMAN_IN_THE_LOOP_PROMPT, REACT_MALFORMED_JSON_PROMPT, REACT_MISSING_ACTION_CORRECTION_PROMPT, REACT_MISSING_ACTION_PROMPT, @@ -33,13 +39,18 @@ REACT_USER_PROMPT, ) from ee.hogai.taxonomy_agent.toolkit import TaxonomyAgentTool, TaxonomyAgentToolkit -from ee.hogai.utils import AssistantNode, AssistantState, filter_visualization_conversation, remove_line_breaks +from ee.hogai.utils.helpers import filter_messages, remove_line_breaks, slice_messages_to_conversation_start +from ee.hogai.utils.nodes import AssistantNode +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.hogql_queries.ai.team_taxonomy_query_runner import TeamTaxonomyQueryRunner from posthog.hogql_queries.query_runner import ExecutionMode from posthog.models.group_type_mapping import GroupTypeMapping from posthog.schema import ( + AssistantMessage, CachedTeamTaxonomyQueryResponse, + HumanMessage, TeamTaxonomyQuery, + VisualizationMessage, ) @@ -50,8 +61,8 @@ def _run_with_prompt_and_toolkit( prompt: ChatPromptTemplate, toolkit: TaxonomyAgentToolkit, config: Optional[RunnableConfig] = None, - ) -> AssistantState: - intermediate_steps = state.get("intermediate_steps") or [] + ) -> PartialAssistantState: + intermediate_steps = state.intermediate_steps or [] conversation = ( prompt + ChatPromptTemplate.from_messages( @@ -79,6 +90,7 @@ def _run_with_prompt_and_toolkit( "react_format": self._get_react_format_prompt(toolkit), "react_format_reminder": REACT_FORMAT_REMINDER_PROMPT, "react_property_filters": self._get_react_property_filters_prompt(), + "react_human_in_the_loop": REACT_HUMAN_IN_THE_LOOP_PROMPT, "product_description": self._team.project.product_description, "groups": self._team_group_types, "events": self._events_prompt, @@ -108,12 +120,12 @@ def _run_with_prompt_and_toolkit( e.llm_output, ) - return { - "intermediate_steps": [*intermediate_steps, (result, None)], - } + return PartialAssistantState( + intermediate_steps=[*intermediate_steps, (result, None)], + ) def router(self, state: AssistantState): - if state.get("intermediate_steps", []): + if state.intermediate_steps: return "tools" raise ValueError("Invalid state.") @@ -188,33 +200,34 @@ def _construct_messages(self, state: AssistantState) -> list[BaseMessage]: """ Reconstruct the conversation for the agent. On this step we only care about previously asked questions and generated plans. All other messages are filtered out. """ - human_messages, visualization_messages = filter_visualization_conversation(state.get("messages", [])) - - if not human_messages: - return [] - + start_id = state.start_id + filtered_messages = filter_messages(slice_messages_to_conversation_start(state.messages, start_id)) conversation = [] - for idx, messages in enumerate(itertools.zip_longest(human_messages, visualization_messages)): - human_message, viz_message = messages - - if human_message: + for idx, message in enumerate(filtered_messages): + if isinstance(message, HumanMessage): + # Add initial instructions. if idx == 0: conversation.append( HumanMessagePromptTemplate.from_template(REACT_USER_PROMPT, template_format="mustache").format( - question=human_message.content + question=message.content ) ) - else: + # Add follow-up instructions only for the human message that initiated a generation. + elif message.id == start_id: conversation.append( HumanMessagePromptTemplate.from_template( REACT_FOLLOW_UP_PROMPT, template_format="mustache", - ).format(feedback=human_message.content) + ).format(feedback=message.content) ) - - if viz_message: - conversation.append(LangchainAssistantMessage(content=viz_message.plan or "")) + # Everything else leave as is. + else: + conversation.append(LangchainHumanMessage(content=message.content)) + elif isinstance(message, VisualizationMessage): + conversation.append(LangchainAssistantMessage(content=message.plan or "")) + elif isinstance(message, AssistantMessage): + conversation.append(LangchainAssistantMessage(content=message.content)) return conversation @@ -230,26 +243,37 @@ def _get_agent_scratchpad(self, scratchpad: list[tuple[AgentAction, str | None]] class TaxonomyAgentPlannerToolsNode(AssistantNode, ABC): def _run_with_toolkit( self, state: AssistantState, toolkit: TaxonomyAgentToolkit, config: Optional[RunnableConfig] = None - ) -> AssistantState: - intermediate_steps = state.get("intermediate_steps") or [] - action, _ = intermediate_steps[-1] + ) -> PartialAssistantState: + intermediate_steps = state.intermediate_steps or [] + action, observation = intermediate_steps[-1] try: input = TaxonomyAgentTool.model_validate({"name": action.tool, "arguments": action.tool_input}).root except ValidationError as e: - observation = ( + observation = str( ChatPromptTemplate.from_template(REACT_PYDANTIC_VALIDATION_EXCEPTION_PROMPT, template_format="mustache") .format_messages(exception=e.errors(include_url=False))[0] .content ) - return {"intermediate_steps": [*intermediate_steps[:-1], (action, str(observation))]} + return PartialAssistantState( + intermediate_steps=[*intermediate_steps[:-1], (action, str(observation))], + ) # The plan has been found. Move to the generation. if input.name == "final_answer": - return { - "plan": input.arguments, - "intermediate_steps": None, - } + return PartialAssistantState( + plan=input.arguments, + intermediate_steps=[], + ) + if input.name == "ask_user_for_help": + # The agent has requested help, so we interrupt the graph. + if not observation: + raise NodeInterrupt(input.arguments) + + # Feedback was provided. + return PartialAssistantState( + intermediate_steps=[*intermediate_steps[:-1], (action, observation)], + ) output = "" if input.name == "retrieve_event_properties": @@ -263,9 +287,11 @@ def _run_with_toolkit( else: output = toolkit.handle_incorrect_response(input.arguments) - return {"intermediate_steps": [*intermediate_steps[:-1], (action, output)]} + return PartialAssistantState( + intermediate_steps=[*intermediate_steps[:-1], (action, output)], + ) def router(self, state: AssistantState): - if state.get("plan") is not None: + if state.plan is not None: return "plan_found" return "continue" diff --git a/ee/hogai/taxonomy_agent/prompts.py b/ee/hogai/taxonomy_agent/prompts.py index f63a7dfe15455..c9d409bcdf103 100644 --- a/ee/hogai/taxonomy_agent/prompts.py +++ b/ee/hogai/taxonomy_agent/prompts.py @@ -81,6 +81,15 @@ """.strip() +REACT_HUMAN_IN_THE_LOOP_PROMPT = """ + +Ask the user for clarification if: +- The user's question is ambiguous. +- You can't find matching events or properties. +- You're unable to build a plan that effectively answers the user's question. + +""".strip() + REACT_FORMAT_REMINDER_PROMPT = """ Begin! Reminder that you must ALWAYS respond with a valid JSON blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB``` then Observation. """.strip() diff --git a/ee/hogai/taxonomy_agent/test/test_nodes.py b/ee/hogai/taxonomy_agent/test/test_nodes.py index 40127c19370b6..cb25331664331 100644 --- a/ee/hogai/taxonomy_agent/test/test_nodes.py +++ b/ee/hogai/taxonomy_agent/test/test_nodes.py @@ -11,7 +11,7 @@ TaxonomyAgentPlannerToolsNode, ) from ee.hogai.taxonomy_agent.toolkit import TaxonomyAgentToolkit, ToolkitTool -from ee.hogai.utils import AssistantState +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.models import GroupTypeMapping from posthog.schema import ( AssistantMessage, @@ -37,7 +37,7 @@ def setUp(self): def _get_node(self): class Node(TaxonomyAgentPlannerNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: prompt: ChatPromptTemplate = ChatPromptTemplate.from_messages([("user", "test")]) toolkit = DummyToolkit(self._team) return super()._run_with_prompt_and_toolkit(state, prompt, toolkit, config=config) @@ -46,19 +46,20 @@ def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: def test_agent_reconstructs_conversation(self): node = self._get_node() - history = node._construct_messages({"messages": [HumanMessage(content="Text")]}) + history = node._construct_messages(AssistantState(messages=[HumanMessage(content="Text")])) self.assertEqual(len(history), 1) self.assertEqual(history[0].type, "human") self.assertIn("Text", history[0].content) self.assertNotIn(f"{{question}}", history[0].content) history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Text"), - VisualizationMessage(answer=self.schema, plan="randomplan"), - ] - } + AssistantState( + messages=[ + HumanMessage(content="Text", id="0"), + VisualizationMessage(answer=self.schema, plan="randomplan", id="1", initiator="0"), + ], + start_id="1", + ) ) self.assertEqual(len(history), 2) self.assertEqual(history[0].type, "human") @@ -68,13 +69,14 @@ def test_agent_reconstructs_conversation(self): self.assertEqual(history[1].content, "randomplan") history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Text"), - VisualizationMessage(answer=self.schema, plan="randomplan"), - HumanMessage(content="Text"), - ] - } + AssistantState( + messages=[ + HumanMessage(content="Text", id="0"), + VisualizationMessage(answer=self.schema, plan="randomplan", id="1", initiator="0"), + HumanMessage(content="Text", id="2"), + ], + start_id="2", + ) ) self.assertEqual(len(history), 3) self.assertEqual(history[0].type, "human") @@ -89,12 +91,14 @@ def test_agent_reconstructs_conversation(self): def test_agent_reconstructs_conversation_and_omits_unknown_messages(self): node = self._get_node() history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Text"), - AssistantMessage(content="test"), - ] - } + AssistantState( + messages=[ + HumanMessage(content="Text", id="0"), + RouterMessage(content="trends", id="1"), + AssistantMessage(content="test", id="2"), + ], + start_id="0", + ) ) self.assertEqual(len(history), 1) self.assertEqual(history[0].type, "human") @@ -104,13 +108,13 @@ def test_agent_reconstructs_conversation_and_omits_unknown_messages(self): def test_agent_reconstructs_conversation_with_failures(self): node = self._get_node() history = node._construct_messages( - { - "messages": [ + AssistantState( + messages=[ HumanMessage(content="Text"), FailureMessage(content="Error"), HumanMessage(content="Text"), - ] - } + ], + ) ) self.assertEqual(len(history), 1) self.assertEqual(history[0].type, "human") @@ -120,32 +124,60 @@ def test_agent_reconstructs_conversation_with_failures(self): def test_agent_reconstructs_typical_conversation(self): node = self._get_node() history = node._construct_messages( - { - "messages": [ - HumanMessage(content="Question 1"), - RouterMessage(content="trends"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1"), - AssistantMessage(content="Summary 1"), - HumanMessage(content="Question 2"), - RouterMessage(content="funnel"), - VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2"), - AssistantMessage(content="Summary 2"), - HumanMessage(content="Question 3"), - RouterMessage(content="funnel"), - ] - } + AssistantState( + messages=[ + HumanMessage(content="Question 1", id="0"), + RouterMessage(content="trends", id="1"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1", id="2", initiator="0"), + AssistantMessage(content="Summary 1", id="3"), + HumanMessage(content="Question 2", id="4"), + RouterMessage(content="funnel", id="5"), + AssistantMessage(content="Loop 1", id="6"), + HumanMessage(content="Loop Answer 1", id="7"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2", id="8", initiator="4"), + AssistantMessage(content="Summary 2", id="9"), + HumanMessage(content="Question 3", id="10"), + RouterMessage(content="funnel", id="11"), + ], + start_id="10", + ) ) - self.assertEqual(len(history), 5) + self.assertEqual(len(history), 9) self.assertEqual(history[0].type, "human") self.assertIn("Question 1", history[0].content) self.assertEqual(history[1].type, "ai") self.assertEqual(history[1].content, "Plan 1") - self.assertEqual(history[2].type, "human") - self.assertIn("Question 2", history[2].content) - self.assertEqual(history[3].type, "ai") - self.assertEqual(history[3].content, "Plan 2") - self.assertEqual(history[4].type, "human") - self.assertIn("Question 3", history[4].content) + self.assertEqual(history[2].type, "ai") + self.assertEqual(history[2].content, "Summary 1") + self.assertEqual(history[3].type, "human") + self.assertIn("Question 2", history[3].content) + self.assertEqual(history[4].type, "ai") + self.assertEqual(history[4].content, "Loop 1") + self.assertEqual(history[5].type, "human") + self.assertEqual(history[5].content, "Loop Answer 1") + self.assertEqual(history[6].content, "Plan 2") + self.assertEqual(history[6].type, "ai") + self.assertEqual(history[7].type, "ai") + self.assertEqual(history[7].content, "Summary 2") + self.assertEqual(history[8].type, "human") + self.assertIn("Question 3", history[8].content) + + def test_agent_reconstructs_conversation_without_messages_after_parent(self): + node = self._get_node() + history = node._construct_messages( + AssistantState( + messages=[ + HumanMessage(content="Question 1", id="0"), + RouterMessage(content="trends", id="1"), + AssistantMessage(content="Loop 1", id="2"), + HumanMessage(content="Loop Answer 1", id="3"), + ], + start_id="0", + ) + ) + self.assertEqual(len(history), 1) + self.assertEqual(history[0].type, "human") + self.assertIn("Question 1", history[0].content) def test_agent_filters_out_low_count_events(self): _create_person(distinct_ids=["test"], team=self.team) @@ -182,9 +214,9 @@ def test_agent_handles_output_without_action_block(self): return_value=RunnableLambda(lambda _: LangchainAIMessage(content="I don't want to output an action.")), ): node = self._get_node() - state_update = node.run({"messages": [HumanMessage(content="Question")]}, {}) - self.assertEqual(len(state_update["intermediate_steps"]), 1) - action, obs = state_update["intermediate_steps"][0] + state_update = node.run(AssistantState(messages=[HumanMessage(content="Question")]), {}) + self.assertEqual(len(state_update.intermediate_steps), 1) + action, obs = state_update.intermediate_steps[0] self.assertIsNone(obs) self.assertIn("I don't want to output an action.", action.log) self.assertIn("Action:", action.log) @@ -196,9 +228,9 @@ def test_agent_handles_output_with_malformed_json(self): return_value=RunnableLambda(lambda _: LangchainAIMessage(content="Thought.\nAction: abc")), ): node = self._get_node() - state_update = node.run({"messages": [HumanMessage(content="Question")]}, {}) - self.assertEqual(len(state_update["intermediate_steps"]), 1) - action, obs = state_update["intermediate_steps"][0] + state_update = node.run(AssistantState(messages=[HumanMessage(content="Question")]), {}) + self.assertEqual(len(state_update.intermediate_steps), 1) + action, obs = state_update.intermediate_steps[0] self.assertIsNone(obs) self.assertIn("Thought.\nAction: abc", action.log) self.assertIn("action", action.tool_input) @@ -232,34 +264,34 @@ def test_property_filters_prompt(self): class TestTaxonomyAgentPlannerToolsNode(ClickhouseTestMixin, APIBaseTest): def _get_node(self): class Node(TaxonomyAgentPlannerToolsNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: toolkit = DummyToolkit(self._team) return super()._run_with_toolkit(state, toolkit, config=config) return Node(self.team) def test_node_handles_action_name_validation_error(self): - state = { - "intermediate_steps": [(AgentAction(tool="does not exist", tool_input="input", log="log"), "test")], - "messages": [], - } + state = AssistantState( + intermediate_steps=[(AgentAction(tool="does not exist", tool_input="input", log="log"), "test")], + messages=[], + ) node = self._get_node() state_update = node.run(state, {}) - self.assertEqual(len(state_update["intermediate_steps"]), 1) - action, observation = state_update["intermediate_steps"][0] + self.assertEqual(len(state_update.intermediate_steps), 1) + action, observation = state_update.intermediate_steps[0] self.assertIsNotNone(observation) self.assertIn("", observation) def test_node_handles_action_input_validation_error(self): - state = { - "intermediate_steps": [ + state = AssistantState( + intermediate_steps=[ (AgentAction(tool="retrieve_entity_property_values", tool_input="input", log="log"), "test") ], - "messages": [], - } + messages=[], + ) node = self._get_node() state_update = node.run(state, {}) - self.assertEqual(len(state_update["intermediate_steps"]), 1) - action, observation = state_update["intermediate_steps"][0] + self.assertEqual(len(state_update.intermediate_steps), 1) + action, observation = state_update.intermediate_steps[0] self.assertIsNotNone(observation) self.assertIn("", observation) diff --git a/ee/hogai/taxonomy_agent/toolkit.py b/ee/hogai/taxonomy_agent/toolkit.py index dc8a0e092c2e6..d05b6f0c933ef 100644 --- a/ee/hogai/taxonomy_agent/toolkit.py +++ b/ee/hogai/taxonomy_agent/toolkit.py @@ -55,6 +55,7 @@ class SingleArgumentTaxonomyAgentTool(BaseModel): "retrieve_event_properties", "final_answer", "handle_incorrect_response", + "ask_user_for_help", ] arguments: str @@ -145,6 +146,16 @@ def _default_tools(self) -> list[ToolkitTool]: property_name: The name of the property that you want to retrieve values for. """, }, + { + "name": "ask_user_for_help", + "signature": "(question: str)", + "description": """ + Use this tool to ask a question to the user. Your question must be concise and clear. + + Args: + question: The question you want to ask. + """, + }, ] def render_text_description(self) -> str: diff --git a/ee/hogai/test/test_assistant.py b/ee/hogai/test/test_assistant.py index b6cd65bd4ec12..6d0bb8807d629 100644 --- a/ee/hogai/test/test_assistant.py +++ b/ee/hogai/test/test_assistant.py @@ -1,31 +1,63 @@ import json -from typing import Any +from typing import Any, Optional, cast from unittest.mock import patch -from uuid import uuid4 -from ee.hogai.utils import Conversation -from posthog.schema import AssistantMessage, HumanMessage -from ..assistant import Assistant + +from langchain_core import messages +from langchain_core.agents import AgentAction +from langchain_core.runnables import RunnableConfig, RunnableLambda from langgraph.graph.state import CompiledStateGraph +from langgraph.types import StateSnapshot +from pydantic import BaseModel + +from ee.models.assistant import Conversation +from posthog.schema import AssistantMessage, HumanMessage, ReasoningMessage +from posthog.test.base import NonAtomicBaseTest + +from ..assistant import Assistant from ..graph import AssistantGraph, AssistantNodeName -from posthog.test.base import BaseTest -from langchain_core.agents import AgentAction -class TestAssistant(BaseTest): - def _run_assistant_graph(self, test_graph: CompiledStateGraph) -> list[tuple[str, Any]]: +class TestAssistant(NonAtomicBaseTest): + CLASS_DATA_LEVEL_SETUP = False + + def setUp(self): + super().setUp() + self.conversation = Conversation.objects.create(team=self.team, user=self.user) + + def _run_assistant_graph( + self, + test_graph: Optional[CompiledStateGraph] = None, + message: Optional[str] = "Hello", + conversation: Optional[Conversation] = None, + is_new_conversation: bool = False, + ) -> list[tuple[str, Any]]: # Create assistant instance with our test graph assistant = Assistant( - team=self.team, - conversation=Conversation(messages=[HumanMessage(content="Hello")], session_id=str(uuid4())), + self.team, + conversation or self.conversation, + HumanMessage(content=message), + self.user, + is_new_conversation=is_new_conversation, ) - assistant._graph = test_graph + if test_graph: + assistant._graph = test_graph # Capture and parse output of assistant.stream() output: list[tuple[str, Any]] = [] for message in assistant.stream(): - event_line, data_line, *_ = message.split("\n") + event_line, data_line, *_ = cast(str, message).split("\n") output.append((event_line.removeprefix("event: "), json.loads(data_line.removeprefix("data: ")))) return output + def assertConversationEqual(self, output: list[tuple[str, Any]], expected_output: list[tuple[str, Any]]): + for i, ((output_msg_type, output_msg), (expected_msg_type, expected_msg)) in enumerate( + zip(output, expected_output) + ): + self.assertEqual(output_msg_type, expected_msg_type, f"Message type mismatch at index {i}") + msg_dict = ( + expected_msg.model_dump(exclude_none=True) if isinstance(expected_msg, BaseModel) else expected_msg + ) + self.assertDictContainsSubset(msg_dict, output_msg, f"Message content mismatch at index {i}") + @patch( "ee.hogai.trends.nodes.TrendsPlannerNode.run", return_value={"intermediate_steps": [(AgentAction(tool="final_answer", tool_input="", log=""), None)]}, @@ -39,19 +71,22 @@ def test_reasoning_messages_added(self, _mock_summarizer_run, _mock_funnel_plann .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) .add_trends_planner(AssistantNodeName.SUMMARIZER) .add_summarizer(AssistantNodeName.END) - .compile() + .compile(), + conversation=self.conversation, ) # Assert that ReasoningMessages are added - assert output == [ - ("status", {"type": "ack"}), + expected_output = [ + ( + "message", + HumanMessage(content="Hello").model_dump(exclude_none=True), + ), ( "message", { "type": "ai/reasoning", "content": "Picking relevant events and properties", # For TrendsPlannerNode "substeps": [], - "done": True, }, ), ( @@ -60,7 +95,6 @@ def test_reasoning_messages_added(self, _mock_summarizer_run, _mock_funnel_plann "type": "ai/reasoning", "content": "Picking relevant events and properties", # For TrendsPlannerToolsNode "substeps": [], - "done": True, }, ), ( @@ -71,6 +105,7 @@ def test_reasoning_messages_added(self, _mock_summarizer_run, _mock_funnel_plann }, ), ] + self.assertConversationEqual(output, expected_output) @patch( "ee.hogai.trends.nodes.TrendsPlannerNode.run", @@ -105,19 +140,22 @@ def test_reasoning_messages_with_substeps_added(self, _mock_funnel_planner_run): AssistantGraph(self.team) .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) .add_trends_planner(AssistantNodeName.END) - .compile() + .compile(), + conversation=self.conversation, ) # Assert that ReasoningMessages are added - assert output == [ - ("status", {"type": "ack"}), + expected_output = [ + ( + "message", + HumanMessage(content="Hello").model_dump(exclude_none=True), + ), ( "message", { "type": "ai/reasoning", "content": "Picking relevant events and properties", # For TrendsPlannerNode "substeps": [], - "done": True, }, ), ( @@ -131,7 +169,153 @@ def test_reasoning_messages_with_substeps_added(self, _mock_funnel_planner_run): "Analyzing `currency` event's property `purchase`", "Analyzing person property `country_of_birth`", ], - "done": True, }, ), ] + self.assertConversationEqual(output, expected_output) + + def _test_human_in_the_loop(self, graph: CompiledStateGraph): + with patch("ee.hogai.taxonomy_agent.nodes.TaxonomyAgentPlannerNode._model") as mock: + config: RunnableConfig = { + "configurable": { + "thread_id": self.conversation.id, + } + } + + # Interrupt the graph + message = """ + Thought: Let's ask for help. + Action: + ``` + { + "action": "ask_user_for_help", + "action_input": "Need help with this query" + } + ``` + """ + mock.return_value = RunnableLambda(lambda _: messages.AIMessage(content=message)) + output = self._run_assistant_graph(graph, conversation=self.conversation) + expected_output = [ + ("message", HumanMessage(content="Hello")), + ("message", ReasoningMessage(content="Picking relevant events and properties", substeps=[])), + ("message", ReasoningMessage(content="Picking relevant events and properties", substeps=[])), + ("message", AssistantMessage(content="Need help with this query")), + ] + self.assertConversationEqual(output, expected_output) + snapshot: StateSnapshot = graph.get_state(config) + self.assertTrue(snapshot.next) + self.assertIn("intermediate_steps", snapshot.values) + + # Resume the graph from the interruption point. + message = """ + Thought: Finish. + Action: + ``` + { + "action": "final_answer", + "action_input": "Plan" + } + ``` + """ + mock.return_value = RunnableLambda(lambda _: messages.AIMessage(content=message)) + output = self._run_assistant_graph(graph, conversation=self.conversation, message="It's straightforward") + expected_output = [ + ("message", HumanMessage(content="It's straightforward")), + ("message", ReasoningMessage(content="Picking relevant events and properties", substeps=[])), + ("message", ReasoningMessage(content="Picking relevant events and properties", substeps=[])), + ] + self.assertConversationEqual(output, expected_output) + snapshot: StateSnapshot = graph.get_state(config) + self.assertFalse(snapshot.next) + self.assertEqual(snapshot.values.get("intermediate_steps"), []) + self.assertEqual(snapshot.values["plan"], "Plan") + + def test_trends_interrupt_when_asking_for_help(self): + graph = ( + AssistantGraph(self.team) + .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) + .add_trends_planner(AssistantNodeName.END) + .compile() + ) + self._test_human_in_the_loop(graph) + + def test_funnels_interrupt_when_asking_for_help(self): + graph = ( + AssistantGraph(self.team) + .add_edge(AssistantNodeName.START, AssistantNodeName.FUNNEL_PLANNER) + .add_funnel_planner(AssistantNodeName.END) + .compile() + ) + self._test_human_in_the_loop(graph) + + def test_intermediate_steps_are_updated_after_feedback(self): + with patch("ee.hogai.taxonomy_agent.nodes.TaxonomyAgentPlannerNode._model") as mock: + graph = ( + AssistantGraph(self.team) + .add_edge(AssistantNodeName.START, AssistantNodeName.TRENDS_PLANNER) + .add_trends_planner(AssistantNodeName.END) + .compile() + ) + config: RunnableConfig = { + "configurable": { + "thread_id": self.conversation.id, + } + } + + # Interrupt the graph + message = """ + Thought: Let's ask for help. + Action: + ``` + { + "action": "ask_user_for_help", + "action_input": "Need help with this query" + } + ``` + """ + mock.return_value = RunnableLambda(lambda _: messages.AIMessage(content=message)) + self._run_assistant_graph(graph, conversation=self.conversation) + snapshot: StateSnapshot = graph.get_state(config) + self.assertTrue(snapshot.next) + self.assertIn("intermediate_steps", snapshot.values) + self.assertEqual(len(snapshot.values["intermediate_steps"]), 1) + action, observation = snapshot.values["intermediate_steps"][0] + self.assertEqual(action.tool, "ask_user_for_help") + self.assertIsNone(observation) + + self._run_assistant_graph(graph, conversation=self.conversation, message="It's straightforward") + snapshot: StateSnapshot = graph.get_state(config) + self.assertTrue(snapshot.next) + self.assertIn("intermediate_steps", snapshot.values) + self.assertEqual(len(snapshot.values["intermediate_steps"]), 2) + action, observation = snapshot.values["intermediate_steps"][0] + self.assertEqual(action.tool, "ask_user_for_help") + self.assertEqual(observation, "It's straightforward") + action, observation = snapshot.values["intermediate_steps"][1] + self.assertEqual(action.tool, "ask_user_for_help") + self.assertIsNone(observation) + + def test_new_conversation_handles_serialized_conversation(self): + graph = ( + AssistantGraph(self.team) + .add_node(AssistantNodeName.ROUTER, lambda _: {"messages": [AssistantMessage(content="Hello")]}) + .add_edge(AssistantNodeName.START, AssistantNodeName.ROUTER) + .add_edge(AssistantNodeName.ROUTER, AssistantNodeName.END) + .compile() + ) + output = self._run_assistant_graph( + graph, + conversation=self.conversation, + is_new_conversation=True, + ) + expected_output = [ + ("conversation", {"id": str(self.conversation.id)}), + ] + self.assertConversationEqual(output[:1], expected_output) + + output = self._run_assistant_graph( + graph, + conversation=self.conversation, + is_new_conversation=False, + ) + self.assertNotEqual(output[0][0], "conversation") diff --git a/ee/hogai/test/test_utils.py b/ee/hogai/test/test_utils.py index 42e54d058c556..8c32471c88508 100644 --- a/ee/hogai/test/test_utils.py +++ b/ee/hogai/test/test_utils.py @@ -1,6 +1,4 @@ -from langchain_core.messages import HumanMessage as LangchainHumanMessage - -from ee.hogai.utils import filter_visualization_conversation, merge_human_messages +from ee.hogai.utils.helpers import filter_messages from posthog.schema import ( AssistantMessage, AssistantTrendsQuery, @@ -13,40 +11,29 @@ class TestTrendsUtils(BaseTest): - def test_merge_human_messages(self): - res = merge_human_messages( - [ - LangchainHumanMessage(content="Text"), - LangchainHumanMessage(content="Text"), - LangchainHumanMessage(content="Te"), - LangchainHumanMessage(content="xt"), - ] - ) - self.assertEqual(len(res), 1) - self.assertEqual(res, [LangchainHumanMessage(content="Text\nTe\nxt")]) - - def test_filter_trends_conversation(self): - human_messages, visualization_messages = filter_visualization_conversation( + def test_filters_and_merges_human_messages(self): + conversation = [ + HumanMessage(content="Text"), + FailureMessage(content="Error"), + HumanMessage(content="Text"), + VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="plan"), + HumanMessage(content="Text2"), + VisualizationMessage(answer=None, plan="plan"), + ] + messages = filter_messages(conversation) + self.assertEqual(len(messages), 4) + self.assertEqual( [ - HumanMessage(content="Text"), - FailureMessage(content="Error"), - HumanMessage(content="Text"), + HumanMessage(content="Text\nText"), VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="plan"), HumanMessage(content="Text2"), VisualizationMessage(answer=None, plan="plan"), - ] - ) - self.assertEqual(len(human_messages), 2) - self.assertEqual(len(visualization_messages), 1) - self.assertEqual( - human_messages, [LangchainHumanMessage(content="Text"), LangchainHumanMessage(content="Text2")] - ) - self.assertEqual( - visualization_messages, [VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="plan")] + ], + messages, ) def test_filters_typical_conversation(self): - human_messages, visualization_messages = filter_visualization_conversation( + messages = filter_messages( [ HumanMessage(content="Question 1"), RouterMessage(content="trends"), @@ -58,15 +45,30 @@ def test_filters_typical_conversation(self): AssistantMessage(content="Summary 2"), ] ) - self.assertEqual(len(human_messages), 2) - self.assertEqual(len(visualization_messages), 2) - self.assertEqual( - human_messages, [LangchainHumanMessage(content="Question 1"), LangchainHumanMessage(content="Question 2")] - ) + self.assertEqual(len(messages), 6) self.assertEqual( - visualization_messages, + messages, [ + HumanMessage(content="Question 1"), VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 1"), + AssistantMessage(content="Summary 1"), + HumanMessage(content="Question 2"), VisualizationMessage(answer=AssistantTrendsQuery(series=[]), plan="Plan 2"), + AssistantMessage(content="Summary 2"), + ], + ) + + def test_joins_human_messages(self): + messages = filter_messages( + [ + HumanMessage(content="Question 1"), + HumanMessage(content="Question 2"), + ] + ) + self.assertEqual(len(messages), 1) + self.assertEqual( + messages, + [ + HumanMessage(content="Question 1\nQuestion 2"), ], ) diff --git a/ee/hogai/trends/nodes.py b/ee/hogai/trends/nodes.py index b6b33cf6d8354..e430b4036e043 100644 --- a/ee/hogai/trends/nodes.py +++ b/ee/hogai/trends/nodes.py @@ -6,12 +6,12 @@ from ee.hogai.taxonomy_agent.nodes import TaxonomyAgentPlannerNode, TaxonomyAgentPlannerToolsNode from ee.hogai.trends.prompts import REACT_SYSTEM_PROMPT, TRENDS_SYSTEM_PROMPT from ee.hogai.trends.toolkit import TRENDS_SCHEMA, TrendsTaxonomyAgentToolkit -from ee.hogai.utils import AssistantState +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import AssistantTrendsQuery class TrendsPlannerNode(TaxonomyAgentPlannerNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: toolkit = TrendsTaxonomyAgentToolkit(self._team) prompt = ChatPromptTemplate.from_messages( [ @@ -23,7 +23,7 @@ def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: class TrendsPlannerToolsNode(TaxonomyAgentPlannerToolsNode): - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: toolkit = TrendsTaxonomyAgentToolkit(self._team) return super()._run_with_toolkit(state, toolkit, config=config) @@ -36,7 +36,7 @@ class TrendsGeneratorNode(SchemaGeneratorNode[AssistantTrendsQuery]): OUTPUT_MODEL = TrendsSchemaGeneratorOutput OUTPUT_SCHEMA = TRENDS_SCHEMA - def run(self, state: AssistantState, config: RunnableConfig) -> AssistantState: + def run(self, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: prompt = ChatPromptTemplate.from_messages( [ ("system", TRENDS_SYSTEM_PROMPT), diff --git a/ee/hogai/trends/prompts.py b/ee/hogai/trends/prompts.py index 2ac9496480cdd..dcc1daeaa5a00 100644 --- a/ee/hogai/trends/prompts.py +++ b/ee/hogai/trends/prompts.py @@ -12,6 +12,8 @@ {{react_format}} +{{react_human_in_the_loop}} + Below you will find information on how to correctly discover the taxonomy of the user's data. diff --git a/ee/hogai/trends/test/test_nodes.py b/ee/hogai/trends/test/test_nodes.py index 44973b3195377..369ce8bc9b292 100644 --- a/ee/hogai/trends/test/test_nodes.py +++ b/ee/hogai/trends/test/test_nodes.py @@ -4,6 +4,7 @@ from langchain_core.runnables import RunnableLambda from ee.hogai.trends.nodes import TrendsGeneratorNode, TrendsSchemaGeneratorOutput +from ee.hogai.utils.types import AssistantState, PartialAssistantState from posthog.schema import ( AssistantTrendsQuery, HumanMessage, @@ -17,6 +18,7 @@ class TestTrendsGeneratorNode(ClickhouseTestMixin, APIBaseTest): maxDiff = None def setUp(self): + super().setUp() self.schema = AssistantTrendsQuery(series=[]) def test_node_runs(self): @@ -26,16 +28,16 @@ def test_node_runs(self): lambda _: TrendsSchemaGeneratorOutput(query=self.schema).model_dump() ) new_state = node.run( - { - "messages": [HumanMessage(content="Text")], - "plan": "Plan", - }, + AssistantState( + messages=[HumanMessage(content="Text")], + plan="Plan", + ), {}, ) self.assertEqual( new_state, - { - "messages": [VisualizationMessage(answer=self.schema, plan="Plan", done=True)], - "intermediate_steps": None, - }, + PartialAssistantState( + messages=[VisualizationMessage(answer=self.schema, plan="Plan", id=new_state.messages[0].id)], + intermediate_steps=None, + ), ) diff --git a/ee/hogai/trends/toolkit.py b/ee/hogai/trends/toolkit.py index d69830d2f2cd6..5fd7a35f0f18a 100644 --- a/ee/hogai/trends/toolkit.py +++ b/ee/hogai/trends/toolkit.py @@ -1,8 +1,6 @@ from ee.hogai.taxonomy_agent.toolkit import TaxonomyAgentToolkit, ToolkitTool -from ee.hogai.utils import dereference_schema -from posthog.schema import ( - AssistantTrendsQuery, -) +from ee.hogai.utils.helpers import dereference_schema +from posthog.schema import AssistantTrendsQuery class TrendsTaxonomyAgentToolkit(TaxonomyAgentToolkit): diff --git a/ee/hogai/utils.py b/ee/hogai/utils.py deleted file mode 100644 index 559a369df83c8..0000000000000 --- a/ee/hogai/utils.py +++ /dev/null @@ -1,117 +0,0 @@ -import operator -from abc import ABC, abstractmethod -from collections.abc import Sequence -from enum import StrEnum -from typing import Annotated, Optional, TypedDict, Union - -from jsonref import replace_refs -from langchain_core.agents import AgentAction -from langchain_core.messages import ( - HumanMessage as LangchainHumanMessage, - merge_message_runs, -) -from langchain_core.runnables import RunnableConfig -from langgraph.graph import END, START -from pydantic import BaseModel, Field - -from posthog.models.team.team import Team -from posthog.schema import ( - AssistantMessage, - FailureMessage, - HumanMessage, - ReasoningMessage, - RootAssistantMessage, - RouterMessage, - VisualizationMessage, -) - -AssistantMessageUnion = Union[ - AssistantMessage, HumanMessage, VisualizationMessage, FailureMessage, RouterMessage, ReasoningMessage -] - - -class Conversation(BaseModel): - messages: list[RootAssistantMessage] = Field(..., min_length=1, max_length=50) - session_id: str - - -class AssistantState(TypedDict, total=False): - messages: Annotated[Sequence[AssistantMessageUnion], operator.add] - intermediate_steps: Optional[list[tuple[AgentAction, Optional[str]]]] - plan: Optional[str] - - -class AssistantNodeName(StrEnum): - START = START - END = END - ROUTER = "router" - TRENDS_PLANNER = "trends_planner" - TRENDS_PLANNER_TOOLS = "trends_planner_tools" - TRENDS_GENERATOR = "trends_generator" - TRENDS_GENERATOR_TOOLS = "trends_generator_tools" - FUNNEL_PLANNER = "funnel_planner" - FUNNEL_PLANNER_TOOLS = "funnel_planner_tools" - FUNNEL_GENERATOR = "funnel_generator" - FUNNEL_GENERATOR_TOOLS = "funnel_generator_tools" - SUMMARIZER = "summarizer" - - -class AssistantNode(ABC): - _team: Team - - def __init__(self, team: Team): - self._team = team - - @abstractmethod - def run(cls, state: AssistantState, config: RunnableConfig) -> AssistantState: - raise NotImplementedError - - -def remove_line_breaks(line: str) -> str: - return line.replace("\n", " ") - - -def merge_human_messages(messages: list[LangchainHumanMessage]) -> list[LangchainHumanMessage]: - """ - Filters out duplicated human messages and merges them into one message. - """ - contents = set() - filtered_messages = [] - for message in messages: - if message.content in contents: - continue - contents.add(message.content) - filtered_messages.append(message) - return merge_message_runs(filtered_messages) - - -def filter_visualization_conversation( - messages: Sequence[AssistantMessageUnion], -) -> tuple[list[LangchainHumanMessage], list[VisualizationMessage]]: - """ - Splits, filters and merges the message history to be consumable by agents. Returns human and visualization messages. - """ - stack: list[LangchainHumanMessage] = [] - human_messages: list[LangchainHumanMessage] = [] - visualization_messages: list[VisualizationMessage] = [] - - for message in messages: - if isinstance(message, HumanMessage): - stack.append(LangchainHumanMessage(content=message.content)) - elif isinstance(message, VisualizationMessage) and message.answer: - if stack: - human_messages += merge_human_messages(stack) - stack = [] - visualization_messages.append(message) - - if stack: - human_messages += merge_human_messages(stack) - - return human_messages, visualization_messages - - -def dereference_schema(schema: dict) -> dict: - new_schema: dict = replace_refs(schema, proxies=False, lazy_load=False) - if "$defs" in new_schema: - new_schema.pop("$defs") - return new_schema diff --git a/ee/hogai/utils/__init__.py b/ee/hogai/utils/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/ee/hogai/utils/helpers.py b/ee/hogai/utils/helpers.py new file mode 100644 index 0000000000000..4fc8cf3b5d6a0 --- /dev/null +++ b/ee/hogai/utils/helpers.py @@ -0,0 +1,79 @@ +from collections.abc import Sequence +from typing import Optional, TypeVar, Union + +from jsonref import replace_refs +from langchain_core.messages import ( + HumanMessage as LangchainHumanMessage, + merge_message_runs, +) + +from posthog.schema import ( + AssistantMessage, + HumanMessage, + VisualizationMessage, +) + +from .types import AIMessageUnion, AssistantMessageUnion + + +def remove_line_breaks(line: str) -> str: + return line.replace("\n", " ") + + +def filter_messages( + messages: Sequence[AssistantMessageUnion], + entity_filter: Union[tuple[type[AIMessageUnion], ...], type[AIMessageUnion]] = ( + AssistantMessage, + VisualizationMessage, + ), +) -> list[AssistantMessageUnion]: + """ + Filters and merges the message history to be consumable by agents. Returns human and AI messages. + """ + stack: list[LangchainHumanMessage] = [] + filtered_messages: list[AssistantMessageUnion] = [] + + def _merge_stack(stack: list[LangchainHumanMessage]) -> list[HumanMessage]: + return [ + HumanMessage(content=langchain_message.content, id=langchain_message.id) + for langchain_message in merge_message_runs(stack) + ] + + for message in messages: + if isinstance(message, HumanMessage): + stack.append(LangchainHumanMessage(content=message.content, id=message.id)) + elif isinstance(message, entity_filter): + if stack: + filtered_messages += _merge_stack(stack) + stack = [] + filtered_messages.append(message) + + if stack: + filtered_messages += _merge_stack(stack) + + return filtered_messages + + +T = TypeVar("T", bound=AssistantMessageUnion) + + +def find_last_message_of_type(messages: Sequence[AssistantMessageUnion], message_type: type[T]) -> Optional[T]: + return next((msg for msg in reversed(messages) if isinstance(msg, message_type)), None) + + +def slice_messages_to_conversation_start( + messages: Sequence[AssistantMessageUnion], start_id: Optional[str] = None +) -> Sequence[AssistantMessageUnion]: + result = [] + for msg in messages: + result.append(msg) + if msg.id == start_id: + break + return result + + +def dereference_schema(schema: dict) -> dict: + new_schema: dict = replace_refs(schema, proxies=False, lazy_load=False) + if "$defs" in new_schema: + new_schema.pop("$defs") + return new_schema diff --git a/ee/hogai/utils/nodes.py b/ee/hogai/utils/nodes.py new file mode 100644 index 0000000000000..6a4358243b666 --- /dev/null +++ b/ee/hogai/utils/nodes.py @@ -0,0 +1,18 @@ +from abc import ABC, abstractmethod + +from langchain_core.runnables import RunnableConfig + +from posthog.models.team.team import Team + +from .types import AssistantState, PartialAssistantState + + +class AssistantNode(ABC): + _team: Team + + def __init__(self, team: Team): + self._team = team + + @abstractmethod + def run(cls, state: AssistantState, config: RunnableConfig) -> PartialAssistantState: + raise NotImplementedError diff --git a/ee/hogai/utils/state.py b/ee/hogai/utils/state.py new file mode 100644 index 0000000000000..3392f3362adb9 --- /dev/null +++ b/ee/hogai/utils/state.py @@ -0,0 +1,70 @@ +from typing import Any, Literal, TypedDict, TypeGuard, Union + +from langchain_core.messages import AIMessageChunk + +from ee.hogai.utils.types import AssistantNodeName, AssistantState, PartialAssistantState + +# A state update can have a partial state or a LangGraph's reserved dataclasses like Interrupt. +GraphValueUpdate = dict[AssistantNodeName, dict[Any, Any] | Any] + +GraphValueUpdateTuple = tuple[Literal["values"], GraphValueUpdate] + + +def is_value_update(update: list[Any]) -> TypeGuard[GraphValueUpdateTuple]: + """ + Transition between nodes. + + Returns: + PartialAssistantState, Interrupt, or other LangGraph reserved dataclasses. + """ + return len(update) == 2 and update[0] == "updates" + + +def validate_value_update(update: GraphValueUpdate) -> dict[AssistantNodeName, PartialAssistantState | Any]: + validated_update = {} + for node_name, value in update.items(): + if isinstance(value, dict): + validated_update[node_name] = PartialAssistantState.model_validate(value) + else: + validated_update[node_name] = value + return validated_update + + +class LangGraphState(TypedDict): + langgraph_node: AssistantNodeName + + +GraphMessageUpdateTuple = tuple[Literal["messages"], tuple[Union[AIMessageChunk, Any], LangGraphState]] + + +def is_message_update(update: list[Any]) -> TypeGuard[GraphMessageUpdateTuple]: + """ + Streaming of messages. + """ + return len(update) == 2 and update[0] == "messages" + + +GraphStateUpdateTuple = tuple[Literal["updates"], dict[Any, Any]] + + +def is_state_update(update: list[Any]) -> TypeGuard[GraphStateUpdateTuple]: + """ + Update of the state. Returns a full state. + """ + return len(update) == 2 and update[0] == "values" + + +def validate_state_update(state_update: dict[Any, Any]) -> AssistantState: + return AssistantState.model_validate(state_update) + + +GraphTaskStartedUpdateTuple = tuple[Literal["debug"], tuple[Union[AIMessageChunk, Any], LangGraphState]] + + +def is_task_started_update( + update: list[Any], +) -> TypeGuard[GraphTaskStartedUpdateTuple]: + """ + Streaming of messages. + """ + return len(update) == 2 and update[0] == "debug" and update[1]["type"] == "task" diff --git a/ee/hogai/utils/types.py b/ee/hogai/utils/types.py new file mode 100644 index 0000000000000..2df027b6f85af --- /dev/null +++ b/ee/hogai/utils/types.py @@ -0,0 +1,52 @@ +import operator +from collections.abc import Sequence +from enum import StrEnum +from typing import Annotated, Optional, Union + +from langchain_core.agents import AgentAction +from langgraph.graph import END, START +from pydantic import BaseModel, Field + +from posthog.schema import ( + AssistantMessage, + FailureMessage, + HumanMessage, + ReasoningMessage, + RouterMessage, + VisualizationMessage, +) + +AIMessageUnion = Union[AssistantMessage, VisualizationMessage, FailureMessage, RouterMessage, ReasoningMessage] +AssistantMessageUnion = Union[HumanMessage, AIMessageUnion] + + +class _SharedAssistantState(BaseModel): + intermediate_steps: Optional[list[tuple[AgentAction, Optional[str]]]] = Field(default=None) + start_id: Optional[str] = Field(default=None) + """ + The ID of the message from which the conversation started. + """ + plan: Optional[str] = Field(default=None) + + +class AssistantState(_SharedAssistantState): + messages: Annotated[Sequence[AssistantMessageUnion], operator.add] + + +class PartialAssistantState(_SharedAssistantState): + messages: Optional[Annotated[Sequence[AssistantMessageUnion], operator.add]] = Field(default=None) + + +class AssistantNodeName(StrEnum): + START = START + END = END + ROUTER = "router" + TRENDS_PLANNER = "trends_planner" + TRENDS_PLANNER_TOOLS = "trends_planner_tools" + TRENDS_GENERATOR = "trends_generator" + TRENDS_GENERATOR_TOOLS = "trends_generator_tools" + FUNNEL_PLANNER = "funnel_planner" + FUNNEL_PLANNER_TOOLS = "funnel_planner_tools" + FUNNEL_GENERATOR = "funnel_generator" + FUNNEL_GENERATOR_TOOLS = "funnel_generator_tools" + SUMMARIZER = "summarizer" diff --git a/ee/migrations/0018_conversation_conversationcheckpoint_and_more.py b/ee/migrations/0018_conversation_conversationcheckpoint_and_more.py new file mode 100644 index 0000000000000..ec48cc780ad57 --- /dev/null +++ b/ee/migrations/0018_conversation_conversationcheckpoint_and_more.py @@ -0,0 +1,147 @@ +# Generated by Django 4.2.15 on 2024-12-11 15:51 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion +import posthog.models.utils + + +class Migration(migrations.Migration): + dependencies = [ + ("posthog", "0528_project_field_in_taxonomy"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("ee", "0017_accesscontrol_and_more"), + ] + + operations = [ + migrations.CreateModel( + name="Conversation", + fields=[ + ( + "id", + models.UUIDField( + default=posthog.models.utils.UUIDT, editable=False, primary_key=True, serialize=False + ), + ), + ("team", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="posthog.team")), + ("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="ConversationCheckpoint", + fields=[ + ( + "id", + models.UUIDField( + default=posthog.models.utils.UUIDT, editable=False, primary_key=True, serialize=False + ), + ), + ( + "checkpoint_ns", + models.TextField( + default="", + help_text='Checkpoint namespace. Denotes the path to the subgraph node the checkpoint originates from, separated by `|` character, e.g. `"child|grandchild"`. Defaults to "" (root graph).', + ), + ), + ("checkpoint", models.JSONField(help_text="Serialized checkpoint data.", null=True)), + ("metadata", models.JSONField(help_text="Serialized checkpoint metadata.", null=True)), + ( + "parent_checkpoint", + models.ForeignKey( + help_text="Parent checkpoint ID.", + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="children", + to="ee.conversationcheckpoint", + ), + ), + ( + "thread", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, related_name="checkpoints", to="ee.conversation" + ), + ), + ], + ), + migrations.CreateModel( + name="ConversationCheckpointWrite", + fields=[ + ( + "id", + models.UUIDField( + default=posthog.models.utils.UUIDT, editable=False, primary_key=True, serialize=False + ), + ), + ("task_id", models.UUIDField(help_text="Identifier for the task creating the checkpoint write.")), + ( + "idx", + models.IntegerField( + help_text="Index of the checkpoint write. It is an integer value where negative numbers are reserved for special cases, such as node interruption." + ), + ), + ( + "channel", + models.TextField( + help_text="An arbitrary string defining the channel name. For example, it can be a node name or a reserved LangGraph's enum." + ), + ), + ("type", models.TextField(help_text="Type of the serialized blob. For example, `json`.", null=True)), + ("blob", models.BinaryField(null=True)), + ( + "checkpoint", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="writes", + to="ee.conversationcheckpoint", + ), + ), + ], + ), + migrations.CreateModel( + name="ConversationCheckpointBlob", + fields=[ + ( + "id", + models.UUIDField( + default=posthog.models.utils.UUIDT, editable=False, primary_key=True, serialize=False + ), + ), + ( + "channel", + models.TextField( + help_text="An arbitrary string defining the channel name. For example, it can be a node name or a reserved LangGraph's enum." + ), + ), + ("version", models.TextField(help_text="Monotonically increasing version of the channel.")), + ("type", models.TextField(help_text="Type of the serialized blob. For example, `json`.", null=True)), + ("blob", models.BinaryField(null=True)), + ( + "checkpoint", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="blobs", + to="ee.conversationcheckpoint", + ), + ), + ], + ), + migrations.AddConstraint( + model_name="conversationcheckpointwrite", + constraint=models.UniqueConstraint( + fields=("checkpoint_id", "task_id", "idx"), name="unique_checkpoint_write" + ), + ), + migrations.AddConstraint( + model_name="conversationcheckpointblob", + constraint=models.UniqueConstraint( + fields=("checkpoint_id", "channel", "version"), name="unique_checkpoint_blob" + ), + ), + migrations.AddConstraint( + model_name="conversationcheckpoint", + constraint=models.UniqueConstraint(fields=("id", "checkpoint_ns", "thread"), name="unique_checkpoint"), + ), + ] diff --git a/ee/migrations/max_migration.txt b/ee/migrations/max_migration.txt index 449d87290c304..fb889f1cc34cf 100644 --- a/ee/migrations/max_migration.txt +++ b/ee/migrations/max_migration.txt @@ -1 +1 @@ -0017_accesscontrol_and_more +0018_conversation_conversationcheckpoint_and_more diff --git a/ee/models/__init__.py b/ee/models/__init__.py index df7cfcba704e6..2067d11f7618f 100644 --- a/ee/models/__init__.py +++ b/ee/models/__init__.py @@ -1,3 +1,4 @@ +from .assistant import Conversation, ConversationCheckpoint, ConversationCheckpointBlob, ConversationCheckpointWrite from .dashboard_privilege import DashboardPrivilege from .event_definition import EnterpriseEventDefinition from .explicit_team_membership import ExplicitTeamMembership @@ -10,7 +11,11 @@ __all__ = [ "AccessControl", + "ConversationCheckpoint", + "ConversationCheckpointBlob", + "ConversationCheckpointWrite", "DashboardPrivilege", + "Conversation", "EnterpriseEventDefinition", "EnterprisePropertyDefinition", "ExplicitTeamMembership", diff --git a/ee/models/assistant.py b/ee/models/assistant.py new file mode 100644 index 0000000000000..390a7ab7a117f --- /dev/null +++ b/ee/models/assistant.py @@ -0,0 +1,83 @@ +from collections.abc import Iterable + +from django.db import models +from langgraph.checkpoint.serde.types import TASKS + +from posthog.models.team.team import Team +from posthog.models.user import User +from posthog.models.utils import UUIDModel + + +class Conversation(UUIDModel): + user = models.ForeignKey(User, on_delete=models.CASCADE) + team = models.ForeignKey(Team, on_delete=models.CASCADE) + + +class ConversationCheckpoint(UUIDModel): + thread = models.ForeignKey(Conversation, on_delete=models.CASCADE, related_name="checkpoints") + checkpoint_ns = models.TextField( + default="", + help_text='Checkpoint namespace. Denotes the path to the subgraph node the checkpoint originates from, separated by `|` character, e.g. `"child|grandchild"`. Defaults to "" (root graph).', + ) + parent_checkpoint = models.ForeignKey( + "self", null=True, on_delete=models.CASCADE, related_name="children", help_text="Parent checkpoint ID." + ) + checkpoint = models.JSONField(null=True, help_text="Serialized checkpoint data.") + metadata = models.JSONField(null=True, help_text="Serialized checkpoint metadata.") + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["id", "checkpoint_ns", "thread"], + name="unique_checkpoint", + ) + ] + + @property + def pending_sends(self) -> Iterable["ConversationCheckpointWrite"]: + if self.parent_checkpoint is None: + return [] + return self.parent_checkpoint.writes.filter(channel=TASKS).order_by("task_id", "idx") + + @property + def pending_writes(self) -> Iterable["ConversationCheckpointWrite"]: + return self.writes.order_by("idx", "task_id") + + +class ConversationCheckpointBlob(UUIDModel): + checkpoint = models.ForeignKey(ConversationCheckpoint, on_delete=models.CASCADE, related_name="blobs") + channel = models.TextField( + help_text="An arbitrary string defining the channel name. For example, it can be a node name or a reserved LangGraph's enum." + ) + version = models.TextField(help_text="Monotonically increasing version of the channel.") + type = models.TextField(null=True, help_text="Type of the serialized blob. For example, `json`.") + blob = models.BinaryField(null=True) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["checkpoint_id", "channel", "version"], + name="unique_checkpoint_blob", + ) + ] + + +class ConversationCheckpointWrite(UUIDModel): + checkpoint = models.ForeignKey(ConversationCheckpoint, on_delete=models.CASCADE, related_name="writes") + task_id = models.UUIDField(help_text="Identifier for the task creating the checkpoint write.") + idx = models.IntegerField( + help_text="Index of the checkpoint write. It is an integer value where negative numbers are reserved for special cases, such as node interruption." + ) + channel = models.TextField( + help_text="An arbitrary string defining the channel name. For example, it can be a node name or a reserved LangGraph's enum." + ) + type = models.TextField(null=True, help_text="Type of the serialized blob. For example, `json`.") + blob = models.BinaryField(null=True) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["checkpoint_id", "task_id", "idx"], + name="unique_checkpoint_write", + ) + ] diff --git a/ee/session_recordings/queries/test/__snapshots__/test_session_recording_list_from_filters.ambr b/ee/session_recordings/queries/test/__snapshots__/test_session_recording_list_from_filters.ambr deleted file mode 100644 index 389933177e2ca..0000000000000 --- a/ee/session_recordings/queries/test/__snapshots__/test_session_recording_list_from_filters.ambr +++ /dev/null @@ -1,1649 +0,0 @@ -# serializer version: 1 -# name: TestClickhouseSessionRecordingsListFromFilters.test_effect_of_poe_settings_on_query_generated_0_test_poe_v1_still_falls_back_to_person_subquery - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, %(hogql_val_0)s)) AS start_time, - max(toTimeZone(s.max_last_timestamp, %(hogql_val_1)s)) AS end_time, - dateDiff(%(hogql_val_2)s, start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, %(hogql_val_3)s)), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff(%(hogql_val_4)s, start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_5)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_6)s), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_7)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_8)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_9)s), now64(6, %(hogql_val_10)s)), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_11)s), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_12)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_rgInternal, ''), 'null'), %(hogql_val_13)s), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 50000 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_effect_of_poe_settings_on_query_generated_1_test_poe_being_unavailable_we_fall_back_to_person_id_overrides - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, %(hogql_val_0)s)) AS start_time, - max(toTimeZone(s.max_last_timestamp, %(hogql_val_1)s)) AS end_time, - dateDiff(%(hogql_val_2)s, start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, %(hogql_val_3)s)), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff(%(hogql_val_4)s, start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_5)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_6)s), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_7)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT argMax(replaceRegexpAll(nullIf(nullIf(JSONExtractRaw(person.properties, %(hogql_val_8)s), ''), 'null'), '^"|"$', ''), person.version) AS properties___rgInternal, person.id AS id - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, %(hogql_val_9)s), person.version), plus(now64(6, %(hogql_val_10)s), toIntervalDay(1))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_11)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_12)s), now64(6, %(hogql_val_13)s)), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_14)s), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_15)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___rgInternal, %(hogql_val_16)s), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 50000 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_effect_of_poe_settings_on_query_generated_2_test_poe_being_unavailable_we_fall_back_to_person_subquery_but_still_use_mat_props - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, %(hogql_val_0)s)) AS start_time, - max(toTimeZone(s.max_last_timestamp, %(hogql_val_1)s)) AS end_time, - dateDiff(%(hogql_val_2)s, start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, %(hogql_val_3)s)), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff(%(hogql_val_4)s, start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_5)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_6)s), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_7)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT argMax(replaceRegexpAll(nullIf(nullIf(JSONExtractRaw(person.properties, %(hogql_val_8)s), ''), 'null'), '^"|"$', ''), person.version) AS properties___rgInternal, person.id AS id - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, %(hogql_val_9)s), person.version), plus(now64(6, %(hogql_val_10)s), toIntervalDay(1))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_11)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_12)s), now64(6, %(hogql_val_13)s)), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_14)s), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_15)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___rgInternal, %(hogql_val_16)s), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 50000 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_effect_of_poe_settings_on_query_generated_3_test_allow_denormalised_props_fix_does_not_stop_all_poe_processing - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, %(hogql_val_0)s)) AS start_time, - max(toTimeZone(s.max_last_timestamp, %(hogql_val_1)s)) AS end_time, - dateDiff(%(hogql_val_2)s, start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, %(hogql_val_3)s)), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff(%(hogql_val_4)s, start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_5)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_6)s), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_7)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_8)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_9)s), now64(6, %(hogql_val_10)s)), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_11)s), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_12)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_rgInternal, ''), 'null'), %(hogql_val_13)s), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 50000 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_effect_of_poe_settings_on_query_generated_4_test_poe_v2_available_person_properties_are_used_in_replay_listing - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, %(hogql_val_0)s)) AS start_time, - max(toTimeZone(s.max_last_timestamp, %(hogql_val_1)s)) AS end_time, - dateDiff(%(hogql_val_2)s, start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, %(hogql_val_3)s)), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff(%(hogql_val_4)s, start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_5)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_6)s), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, %(hogql_val_7)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_8)s), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_9)s), now64(6, %(hogql_val_10)s)), greaterOrEquals(toTimeZone(events.timestamp, %(hogql_val_11)s), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, %(hogql_val_12)s), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_rgInternal, ''), 'null'), %(hogql_val_13)s), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 50000 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_00_poe_v2_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_00_poe_v2_and_materialized_columns_allowed_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_01_poe_v2_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_01_poe_v2_and_materialized_columns_allowed_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_02_poe_v2_and_materialized_columns_off_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_02_poe_v2_and_materialized_columns_off_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_03_poe_v2_and_materialized_columns_off_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_03_poe_v2_and_materialized_columns_off_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_04_poe_off_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_04_poe_off_and_materialized_columns_allowed_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT person.id AS id, nullIf(nullIf(person.pmat_email, ''), 'null') AS properties___email - FROM person - WHERE and(equals(person.team_id, 99999), ifNull(in(tuple(person.id, person.version), - (SELECT person.id AS id, max(person.version) AS version - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, 'UTC'), person.version), plus(now64(6, 'UTC'), toIntervalDay(1))), 0)))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___email, 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_05_poe_off_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_05_poe_off_and_materialized_columns_allowed_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT person.id AS id, nullIf(nullIf(person.pmat_email, ''), 'null') AS properties___email - FROM person - WHERE and(equals(person.team_id, 99999), ifNull(in(tuple(person.id, person.version), - (SELECT person.id AS id, max(person.version) AS version - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, 'UTC'), person.version), plus(now64(6, 'UTC'), toIntervalDay(1))), 0)))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___email, 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_06_poe_off_and_materialized_columns_not_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_06_poe_off_and_materialized_columns_not_allowed_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT person.id AS id, nullIf(nullIf(person.pmat_email, ''), 'null') AS properties___email - FROM person - WHERE and(equals(person.team_id, 99999), ifNull(in(tuple(person.id, person.version), - (SELECT person.id AS id, max(person.version) AS version - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, 'UTC'), person.version), plus(now64(6, 'UTC'), toIntervalDay(1))), 0)))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___email, 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_07_poe_off_and_materialized_columns_not_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_07_poe_off_and_materialized_columns_not_allowed_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - LEFT JOIN - (SELECT person.id AS id, nullIf(nullIf(person.pmat_email, ''), 'null') AS properties___email - FROM person - WHERE and(equals(person.team_id, 99999), ifNull(in(tuple(person.id, person.version), - (SELECT person.id AS id, max(person.version) AS version - FROM person - WHERE equals(person.team_id, 99999) - GROUP BY person.id - HAVING and(ifNull(equals(argMax(person.is_deleted, person.version), 0), 0), ifNull(less(argMax(toTimeZone(person.created_at, 'UTC'), person.version), plus(now64(6, 'UTC'), toIntervalDay(1))), 0)))), 0)) SETTINGS optimize_aggregation_in_order=1) AS events__person ON equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), events__person.id) - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(events__person.properties___email, 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_08_poe_v1_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_08_poe_v1_and_materialized_columns_allowed_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_09_poe_v1_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_09_poe_v1_and_materialized_columns_allowed_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_10_poe_v1_and_not_materialized_columns_not_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_10_poe_v1_and_not_materialized_columns_not_allowed_with_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_11_poe_v1_and_not_materialized_columns_not_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_event_filter_with_person_properties_materialized_11_poe_v1_and_not_materialized_columns_not_allowed_without_materialization.1 - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT events.`$session_id` AS session_id - FROM events - WHERE and(equals(events.team_id, 99999), notEmpty(events.`$session_id`), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), now64(6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-24 23:58:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), ifNull(equals(nullIf(nullIf(mat_pp_email, ''), 'null'), 'bla'), 0)) - GROUP BY events.`$session_id` - HAVING 1))) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_00_poe_v2_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - WHERE and(equals(events.team_id, 99999), equals(events.person_id, '00000000-0000-0000-0000-000000000000'), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_01_poe_v2_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - WHERE and(equals(events.team_id, 99999), equals(events.person_id, '00000000-0000-0000-0000-000000000000'), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_02_poe_v2_and_materialized_columns_off_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - WHERE and(equals(events.team_id, 99999), equals(events.person_id, '00000000-0000-0000-0000-000000000000'), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_03_poe_v2_and_materialized_columns_off_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - WHERE and(equals(events.team_id, 99999), equals(events.person_id, '00000000-0000-0000-0000-000000000000'), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_04_poe_off_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_05_poe_off_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_06_poe_off_and_materialized_columns_not_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_07_poe_off_and_materialized_columns_not_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_08_poe_v1_and_materialized_columns_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_09_poe_v1_and_materialized_columns_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_10_poe_v1_and_not_materialized_columns_not_allowed_with_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- -# name: TestClickhouseSessionRecordingsListFromFilters.test_person_id_filter_11_poe_v1_and_not_materialized_columns_not_allowed_without_materialization - ''' - SELECT s.session_id AS session_id, - any(s.team_id), - any(s.distinct_id), - min(toTimeZone(s.min_first_timestamp, 'UTC')) AS start_time, - max(toTimeZone(s.max_last_timestamp, 'UTC')) AS end_time, - dateDiff('SECOND', start_time, end_time) AS duration, - argMinMerge(s.first_url) AS first_url, - sum(s.click_count) AS click_count, - sum(s.keypress_count) AS keypress_count, - sum(s.mouse_activity_count) AS mouse_activity_count, - divide(sum(s.active_milliseconds), 1000) AS active_seconds, - minus(duration, active_seconds) AS inactive_seconds, - sum(s.console_log_count) AS console_log_count, - sum(s.console_warn_count) AS console_warn_count, - sum(s.console_error_count) AS console_error_count, - ifNull(greaterOrEquals(max(toTimeZone(s._timestamp, 'UTC')), toDateTime64('2021-01-01 13:41:23.000000', 6, 'UTC')), 0) AS ongoing, - round(multiply(divide(plus(plus(plus(divide(sum(s.active_milliseconds), 1000), sum(s.click_count)), sum(s.keypress_count)), sum(s.console_error_count)), plus(plus(plus(plus(sum(s.mouse_activity_count), dateDiff('SECOND', start_time, end_time)), sum(s.console_error_count)), sum(s.console_log_count)), sum(s.console_warn_count))), 100), 2) AS activity_score - FROM session_replay_events AS s - WHERE and(equals(s.team_id, 99999), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), 0), in(s.session_id, - (SELECT DISTINCT events.`$session_id` AS `$session_id` - FROM events - LEFT OUTER JOIN - (SELECT argMax(person_distinct_id_overrides.person_id, person_distinct_id_overrides.version) AS person_id, person_distinct_id_overrides.distinct_id AS distinct_id - FROM person_distinct_id_overrides - WHERE equals(person_distinct_id_overrides.team_id, 99999) - GROUP BY person_distinct_id_overrides.distinct_id - HAVING ifNull(equals(argMax(person_distinct_id_overrides.is_deleted, person_distinct_id_overrides.version), 0), 0) SETTINGS optimize_aggregation_in_order=1) AS events__override ON equals(events.distinct_id, events__override.distinct_id) - WHERE and(equals(events.team_id, 99999), ifNull(equals(if(not(empty(events__override.distinct_id)), events__override.person_id, events.person_id), '00000000-0000-0000-0000-000000000000'), 0), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-11 13:46:23.000000', 6, 'UTC')), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), lessOrEquals(toTimeZone(events.timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), notEmpty(events.`$session_id`)))), ifNull(greaterOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2020-12-25 00:00:00.000000', 6, 'UTC')), 0), ifNull(lessOrEquals(toTimeZone(s.min_first_timestamp, 'UTC'), toDateTime64('2021-01-01 13:46:23.000000', 6, 'UTC')), 0)) - GROUP BY s.session_id - HAVING 1 - ORDER BY start_time DESC - LIMIT 51 - OFFSET 0 SETTINGS readonly=2, - max_execution_time=60, - allow_experimental_object_type=1, - format_csv_allow_double_quotes=0, - max_ast_elements=4000000, - max_expanded_ast_elements=4000000, - max_bytes_before_external_group_by=0, - allow_experimental_analyzer=0 - ''' -# --- diff --git a/ee/session_recordings/queries/test/test_session_recording_list_from_filters.py b/ee/session_recordings/queries/test/test_session_recording_list_from_filters.py deleted file mode 100644 index 391d12071966f..0000000000000 --- a/ee/session_recordings/queries/test/test_session_recording_list_from_filters.py +++ /dev/null @@ -1,353 +0,0 @@ -import re -from itertools import product -from uuid import uuid4 - -from dateutil.relativedelta import relativedelta -from django.utils.timezone import now -from freezegun import freeze_time -from parameterized import parameterized - -from ee.clickhouse.materialized_columns.columns import materialize -from posthog.clickhouse.client import sync_execute -from posthog.hogql.ast import CompareOperation, And, SelectQuery -from posthog.hogql.base import Expr -from posthog.hogql.context import HogQLContext -from posthog.hogql.printer import print_ast -from posthog.models import Person -from posthog.models.filters import SessionRecordingsFilter -from posthog.schema import PersonsOnEventsMode -from posthog.session_recordings.queries.session_recording_list_from_filters import ( - SessionRecordingListFromFilters, -) -from posthog.session_recordings.queries.test.session_replay_sql import produce_replay_summary -from posthog.session_recordings.sql.session_replay_event_sql import TRUNCATE_SESSION_REPLAY_EVENTS_TABLE_SQL -from posthog.test.base import ( - APIBaseTest, - ClickhouseTestMixin, - QueryMatchingTest, - snapshot_clickhouse_queries, - _create_event, -) - - -# The HogQL pair of TestClickhouseSessionRecordingsListFromSessionReplay can be renamed when delete the old one -@freeze_time("2021-01-01T13:46:23") -class TestClickhouseSessionRecordingsListFromFilters(ClickhouseTestMixin, APIBaseTest, QueryMatchingTest): - def _print_query(self, query: SelectQuery) -> str: - return print_ast( - query, - HogQLContext(team_id=self.team.pk, enable_select_queries=True), - "clickhouse", - pretty=True, - ) - - def tearDown(self) -> None: - sync_execute(TRUNCATE_SESSION_REPLAY_EVENTS_TABLE_SQL()) - - @property - def base_time(self): - return (now() - relativedelta(hours=1)).replace(microsecond=0, second=0) - - def create_event( - self, - distinct_id, - timestamp, - team=None, - event_name="$pageview", - properties=None, - ): - if team is None: - team = self.team - if properties is None: - properties = {"$os": "Windows 95", "$current_url": "aloha.com/2"} - return _create_event( - team=team, - event=event_name, - timestamp=timestamp, - distinct_id=distinct_id, - properties=properties, - ) - - @parameterized.expand( - [ - [ - "test_poe_v1_still_falls_back_to_person_subquery", - True, - False, - False, - PersonsOnEventsMode.PERSON_ID_NO_OVERRIDE_PROPERTIES_ON_EVENTS, - ], - [ - "test_poe_being_unavailable_we_fall_back_to_person_id_overrides", - False, - False, - False, - PersonsOnEventsMode.PERSON_ID_OVERRIDE_PROPERTIES_JOINED, - ], - [ - "test_poe_being_unavailable_we_fall_back_to_person_subquery_but_still_use_mat_props", - False, - False, - False, - PersonsOnEventsMode.PERSON_ID_OVERRIDE_PROPERTIES_JOINED, - ], - [ - "test_allow_denormalised_props_fix_does_not_stop_all_poe_processing", - False, - True, - False, - PersonsOnEventsMode.PERSON_ID_OVERRIDE_PROPERTIES_ON_EVENTS, - ], - [ - "test_poe_v2_available_person_properties_are_used_in_replay_listing", - False, - True, - True, - PersonsOnEventsMode.PERSON_ID_OVERRIDE_PROPERTIES_ON_EVENTS, - ], - ] - ) - def test_effect_of_poe_settings_on_query_generated( - self, - _name: str, - poe_v1: bool, - poe_v2: bool, - allow_denormalized_props: bool, - expected_poe_mode: PersonsOnEventsMode, - ) -> None: - with self.settings( - PERSON_ON_EVENTS_OVERRIDE=poe_v1, - PERSON_ON_EVENTS_V2_OVERRIDE=poe_v2, - ALLOW_DENORMALIZED_PROPS_IN_LISTING=allow_denormalized_props, - ): - assert self.team.person_on_events_mode == expected_poe_mode - materialize("events", "rgInternal", table_column="person_properties") - - filter = SessionRecordingsFilter( - team=self.team, - data={ - "properties": [ - { - "key": "rgInternal", - "value": ["false"], - "operator": "exact", - "type": "person", - } - ] - }, - ) - session_recording_list_instance = SessionRecordingListFromFilters( - filter=filter, team=self.team, hogql_query_modifiers=None - ) - - hogql_parsed_select = session_recording_list_instance.get_query() - printed_query = self._print_query(hogql_parsed_select) - - person_filtering_expr = self._matching_person_filter_expr_from(hogql_parsed_select) - - self._assert_is_events_person_filter(person_filtering_expr) - - if poe_v1 or poe_v2: - # Property used directly from event (from materialized column) - assert "ifNull(equals(nullIf(nullIf(mat_pp_rgInternal, ''), 'null')" in printed_query - else: - # We get the person property value from the persons JOIN - assert re.search( - r"argMax\(replaceRegexpAll\(nullIf\(nullIf\(JSONExtractRaw\(person\.properties, %\(hogql_val_\d+\)s\), ''\), 'null'\), '^\"|\"\$', ''\), person\.version\) AS properties___rgInternal", - printed_query, - ) - # Then we actually filter on that property value - assert re.search( - r"ifNull\(equals\(events__person\.properties___rgInternal, %\(hogql_val_\d+\)s\), 0\)", - printed_query, - ) - self.assertQueryMatchesSnapshot(printed_query) - - def _assert_is_pdi_filter(self, person_filtering_expr: list[Expr]) -> None: - assert person_filtering_expr[0].right.select_from.table.chain == ["person_distinct_ids"] - assert person_filtering_expr[0].right.where.left.chain == ["person", "properties", "rgInternal"] - - def _assert_is_events_person_filter(self, person_filtering_expr: list[Expr]) -> None: - assert person_filtering_expr[0].right.select_from.table.chain == ["events"] - event_person_condition = [ - x - for x in person_filtering_expr[0].right.where.exprs - if isinstance(x, CompareOperation) and x.left.chain == ["person", "properties", "rgInternal"] - ] - assert len(event_person_condition) == 1 - - def _matching_person_filter_expr_from(self, hogql_parsed_select: SelectQuery) -> list[Expr]: - where_conditions: list[Expr] = hogql_parsed_select.where.exprs - ands = [x for x in where_conditions if isinstance(x, And)] - assert len(ands) == 1 - and_comparisons = [x for x in ands[0].exprs if isinstance(x, CompareOperation)] - assert len(and_comparisons) == 1 - assert isinstance(and_comparisons[0].right, SelectQuery) - return and_comparisons - - settings_combinations = [ - ["poe v2 and materialized columns allowed", False, True, True], - ["poe v2 and materialized columns off", False, True, False], - ["poe off and materialized columns allowed", False, False, True], - ["poe off and materialized columns not allowed", False, False, False], - ["poe v1 and materialized columns allowed", True, False, True], - ["poe v1 and not materialized columns not allowed", True, False, False], - ] - - # Options for "materialize person columns" - materialization_options = [ - [" with materialization", True], - [" without materialization", False], - ] - - # Expand the parameter list to the product of all combinations with "materialize person columns" - # e.g. [a, b] x [c, d] = [a, c], [a, d], [b, c], [b, d] - test_case_combinations = [ - [f"{name}{mat_option}", poe_v1, poe, mat_columns, mat_person] - for (name, poe_v1, poe, mat_columns), (mat_option, mat_person) in product( - settings_combinations, materialization_options - ) - ] - - @parameterized.expand(test_case_combinations) - @snapshot_clickhouse_queries - def test_event_filter_with_person_properties_materialized( - self, - _name: str, - poe1_enabled: bool, - poe2_enabled: bool, - allow_denormalised_props: bool, - materialize_person_props: bool, - ) -> None: - # KLUDGE: I couldn't figure out how to use @also_test_with_materialized_columns(person_properties=["email"]) - # KLUDGE: and the parameterized.expand decorator at the same time, so we generate test case combos - # KLUDGE: for materialization on and off to test both sides the way the decorator would have - if materialize_person_props: - materialize("events", "email", table_column="person_properties") - materialize("person", "email") - - with self.settings( - PERSON_ON_EVENTS_OVERRIDE=poe1_enabled, - PERSON_ON_EVENTS_V2_OVERRIDE=poe2_enabled, - ALLOW_DENORMALIZED_PROPS_IN_LISTING=allow_denormalised_props, - ): - user_one = "test_event_filter_with_person_properties-user" - user_two = "test_event_filter_with_person_properties-user2" - session_id_one = f"test_event_filter_with_person_properties-1-{str(uuid4())}" - session_id_two = f"test_event_filter_with_person_properties-2-{str(uuid4())}" - - Person.objects.create(team=self.team, distinct_ids=[user_one], properties={"email": "bla"}) - Person.objects.create(team=self.team, distinct_ids=[user_two], properties={"email": "bla2"}) - - self._add_replay_with_pageview(session_id_one, user_one) - produce_replay_summary( - distinct_id=user_one, - session_id=session_id_one, - first_timestamp=(self.base_time + relativedelta(seconds=30)), - team_id=self.team.id, - ) - self._add_replay_with_pageview(session_id_two, user_two) - produce_replay_summary( - distinct_id=user_two, - session_id=session_id_two, - first_timestamp=(self.base_time + relativedelta(seconds=30)), - team_id=self.team.id, - ) - - match_everyone_filter = SessionRecordingsFilter( - team=self.team, - data={"properties": []}, - ) - - session_recording_list_instance = SessionRecordingListFromFilters( - filter=match_everyone_filter, team=self.team, hogql_query_modifiers=None - ) - (session_recordings, _, _) = session_recording_list_instance.run() - - assert sorted([x["session_id"] for x in session_recordings]) == sorted([session_id_one, session_id_two]) - - match_bla_filter = SessionRecordingsFilter( - team=self.team, - data={ - "properties": [ - { - "key": "email", - "value": ["bla"], - "operator": "exact", - "type": "person", - } - ] - }, - ) - - session_recording_list_instance = SessionRecordingListFromFilters( - filter=match_bla_filter, team=self.team, hogql_query_modifiers=None - ) - (session_recordings, _, _) = session_recording_list_instance.run() - - assert len(session_recordings) == 1 - assert session_recordings[0]["session_id"] == session_id_one - - def _add_replay_with_pageview(self, session_id: str, user: str) -> None: - self.create_event( - user, - self.base_time, - properties={"$session_id": session_id, "$window_id": str(uuid4())}, - ) - produce_replay_summary( - distinct_id=user, - session_id=session_id, - first_timestamp=self.base_time, - team_id=self.team.id, - ) - - @parameterized.expand(test_case_combinations) - @snapshot_clickhouse_queries - def test_person_id_filter( - self, - _name: str, - poe2_enabled: bool, - poe1_enabled: bool, - allow_denormalised_props: bool, - materialize_person_props: bool, - ) -> None: - # KLUDGE: I couldn't figure out how to use @also_test_with_materialized_columns(person_properties=["email"]) - # KLUDGE: and the parameterized.expand decorator at the same time, so we generate test case combos - # KLUDGE: for materialization on and off to test both sides the way the decorator would have - if materialize_person_props: - # it shouldn't matter to this test whether any column is materialized - # but let's keep the tests in this file similar so we flush out any unexpected interactions - materialize("events", "email", table_column="person_properties") - materialize("person", "email") - - with self.settings( - PERSON_ON_EVENTS_OVERRIDE=poe1_enabled, - PERSON_ON_EVENTS_V2_OVERRIDE=poe2_enabled, - ALLOW_DENORMALIZED_PROPS_IN_LISTING=allow_denormalised_props, - ): - three_user_ids = ["person-1-distinct-1", "person-1-distinct-2", "person-2"] - session_id_one = f"test_person_id_filter-session-one" - session_id_two = f"test_person_id_filter-session-two" - session_id_three = f"test_person_id_filter-session-three" - - p = Person.objects.create( - team=self.team, - distinct_ids=[three_user_ids[0], three_user_ids[1]], - properties={"email": "bla"}, - ) - Person.objects.create( - team=self.team, - distinct_ids=[three_user_ids[2]], - properties={"email": "bla2"}, - ) - - self._add_replay_with_pageview(session_id_one, three_user_ids[0]) - self._add_replay_with_pageview(session_id_two, three_user_ids[1]) - self._add_replay_with_pageview(session_id_three, three_user_ids[2]) - - filter = SessionRecordingsFilter(team=self.team, data={"person_uuid": str(p.uuid)}) - session_recording_list_instance = SessionRecordingListFromFilters( - filter=filter, team=self.team, hogql_query_modifiers=None - ) - (session_recordings, _, _) = session_recording_list_instance.run() - assert sorted([r["session_id"] for r in session_recordings]) == sorted([session_id_two, session_id_one]) diff --git a/ee/session_recordings/session_recording_playlist.py b/ee/session_recordings/session_recording_playlist.py index a3dc50c1228f5..7b1a962b187b7 100644 --- a/ee/session_recordings/session_recording_playlist.py +++ b/ee/session_recordings/session_recording_playlist.py @@ -1,4 +1,3 @@ -import json from typing import Any, Optional import structlog @@ -13,7 +12,7 @@ from posthog.api.forbid_destroy_model import ForbidDestroyModel from posthog.api.routing import TeamAndOrgViewSetMixin from posthog.api.shared import UserBasicSerializer -from posthog.constants import SESSION_RECORDINGS_FILTER_IDS, AvailableFeature +from posthog.constants import AvailableFeature from posthog.models import ( SessionRecording, SessionRecordingPlaylist, @@ -27,7 +26,6 @@ changes_between, log_activity, ) -from posthog.models.filters.session_recordings_filter import SessionRecordingsFilter from posthog.models.team.team import check_is_feature_available_for_team from posthog.models.utils import UUIDT from posthog.rate_limit import ( @@ -37,7 +35,6 @@ from posthog.schema import RecordingsQuery from posthog.session_recordings.session_recording_api import ( list_recordings_response, - list_recordings, query_as_params_to_dict, list_recordings_from_query, ) @@ -230,19 +227,12 @@ def recordings(self, request: request.Request, *args: Any, **kwargs: Any) -> res .values_list("recording_id", flat=True) ) - use_query_type = (request.GET.get("as_query", "False")).lower() == "true" - - if use_query_type: - data_dict = query_as_params_to_dict(request.GET.dict()) - query = RecordingsQuery.model_validate(data_dict) - query.session_ids = playlist_items - return list_recordings_response( - list_recordings_from_query(query, request, context=self.get_serializer_context()) - ) - else: - filter = SessionRecordingsFilter(request=request, team=self.team) - filter = filter.shallow_clone({SESSION_RECORDINGS_FILTER_IDS: json.dumps(playlist_items)}) - return list_recordings_response(list_recordings(filter, request, context=self.get_serializer_context())) + data_dict = query_as_params_to_dict(request.GET.dict()) + query = RecordingsQuery.model_validate(data_dict) + query.session_ids = playlist_items + return list_recordings_response( + list_recordings_from_query(query, request, context=self.get_serializer_context()) + ) # As of now, you can only "update" a session recording by adding or removing a recording from a static playlist @action( diff --git a/ee/urls.py b/ee/urls.py index 7c722bc31852f..91b58e0fcb238 100644 --- a/ee/urls.py +++ b/ee/urls.py @@ -6,11 +6,11 @@ from django.urls.conf import path from ee.api import integration -from .api.rbac import organization_resource_access, role from .api import ( authentication, billing, + conversation, dashboard_collaborator, explicit_team_member, feature_flag_role_access, @@ -19,18 +19,20 @@ sentry_stats, subscription, ) +from .api.rbac import organization_resource_access, role from .session_recordings import session_recording_playlist def extend_api_router() -> None: from posthog.api import ( - router as root_router, - register_grandfathered_environment_nested_viewset, - projects_router, - organizations_router, - project_feature_flags_router, environment_dashboards_router, + environments_router, legacy_project_dashboards_router, + organizations_router, + project_feature_flags_router, + projects_router, + register_grandfathered_environment_nested_viewset, + router as root_router, ) root_router.register(r"billing", billing.BillingViewset, "billing") @@ -93,6 +95,10 @@ def extend_api_router() -> None: ["project_id"], ) + environments_router.register( + r"conversations", conversation.ConversationViewSet, "environment_conversations", ["team_id"] + ) + # The admin interface is disabled on self-hosted instances, as its misuse can be unsafe admin_urlpatterns = ( diff --git a/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--dark.png b/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--dark.png index 2caeb97213e25..25a32339e8edb 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--dark.png and b/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--dark.png differ diff --git a/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--light.png b/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--light.png index ac6439a3b0f62..513b3ed148bb7 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--light.png and b/frontend/__snapshots__/components-hogqleditor--hog-ql-editor--light.png differ diff --git a/frontend/__snapshots__/components-hogqleditor--no-value--dark.png b/frontend/__snapshots__/components-hogqleditor--no-value--dark.png index 1c538dbd29f44..5ceff04a1d206 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--no-value--dark.png and b/frontend/__snapshots__/components-hogqleditor--no-value--dark.png differ diff --git a/frontend/__snapshots__/components-hogqleditor--no-value--light.png b/frontend/__snapshots__/components-hogqleditor--no-value--light.png index 401a4e98e08ce..2038e29ae4087 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--no-value--light.png and b/frontend/__snapshots__/components-hogqleditor--no-value--light.png differ diff --git a/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--dark.png b/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--dark.png index 1c538dbd29f44..5ceff04a1d206 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--dark.png and b/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--dark.png differ diff --git a/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--light.png b/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--light.png index 401a4e98e08ce..2038e29ae4087 100644 Binary files a/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--light.png and b/frontend/__snapshots__/components-hogqleditor--no-value-person-properties-disabled--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--dark.png index 692203c75b49e..2796435eb68ff 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--light.png index 63975d42c24c9..958d095dc7b38 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--custom-styles--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--dark.png index af5eb35b4a393..320f0dfb39709 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--light.png index b7c759035f7be..1ec1d92d13bc5 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--default--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--dark.png index c70073759a9c3..e2a7d303c9bf3 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--light.png index fd30577a0f79f..c67e7eff5f2d6 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--friday-first--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--dark.png index 97b6b6671f4b7..62005fd240d46 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--light.png index 46de463f355a3..deb1a4130559e 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--hour--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--dark.png index 6e53b5cd15413..ccb0203a5d2c3 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--light.png index 6d29186846f99..c7001a1113b24 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--minute--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--dark.png index f86bca1b53ab8..0d37818b855a9 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--light.png index 3015065991f8b..32569ecc291ad 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--multiple-months--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--dark.png index d3cdc976bcd4d..0ca2d8c95c36b 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--light.png index 989bb7dc4e1f3..1382bcae5ed6b 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--saturday-first--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--dark.png index af5eb35b4a393..320f0dfb39709 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--light.png index b7c759035f7be..1ec1d92d13bc5 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--sunday-first--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--dark.png index f1cc69206e84b..d6279a75e1b90 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--light.png index 65cfd36b3081a..39a2c68e2cd5e 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--thursday-first--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--dark.png index 13aa1b9a431a2..bcc3113d51f69 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--light.png index a575e4ef6b228..161c7033dbea2 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar--wednesday-first--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--dark.png index fc30b65924d01..841266fbca5bf 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--light.png index 3737e331119c9..514ed70766db7 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--default--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--dark.png index c07fa500efba1..022a1933f0729 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--light.png index dd6fa19630b65..8081ad0512c0a 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--hour--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--dark.png index 09f57c49a2ddd..35a1560f55720 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--light.png index 7357eb0db768b..be88a38b80777 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--minute--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--dark.png index 0376fb230f343..0728b9da4ad59 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--light.png index 655ce9d87c47c..eda77f53e84d4 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--past--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--dark.png index 6b80d80b1df93..bf29f763f7162 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--light.png index 8248c8be132db..331ac9df5287a 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--upcoming--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--dark.png index f595113e318ad..57fa737a559ed 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--dark.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--light.png index f8fbf21738bde..91bf217461bb6 100644 Binary files a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--light.png and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle--light.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--dark.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--dark.png new file mode 100644 index 0000000000000..5d0408386245a Binary files /dev/null and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--dark.png differ diff --git a/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--light.png b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--light.png new file mode 100644 index 0000000000000..6386c0c6b6f99 Binary files /dev/null and b/frontend/__snapshots__/lemon-ui-lemon-calendar-lemon-calendar-select--with-time-toggle-and-multiple-months--light.png differ diff --git a/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--dark.png b/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--dark.png index e17597e989d60..f5ed8a89ac925 100644 Binary files a/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--dark.png and b/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--dark.png differ diff --git a/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--light.png b/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--light.png index afcf5cd9c035f..b2493c895f8bd 100644 Binary files a/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--light.png and b/frontend/__snapshots__/posthog-3000-navigation--navigation-3000--light.png differ diff --git a/frontend/__snapshots__/posthog-3000-sidebar--dashboards--dark.png b/frontend/__snapshots__/posthog-3000-sidebar--dashboards--dark.png index d66b1f11c1a6b..758c48c0daa81 100644 Binary files a/frontend/__snapshots__/posthog-3000-sidebar--dashboards--dark.png and b/frontend/__snapshots__/posthog-3000-sidebar--dashboards--dark.png differ diff --git a/frontend/__snapshots__/posthog-3000-sidebar--dashboards--light.png b/frontend/__snapshots__/posthog-3000-sidebar--dashboards--light.png index eef82575bea8b..c05ba32634144 100644 Binary files a/frontend/__snapshots__/posthog-3000-sidebar--dashboards--light.png and b/frontend/__snapshots__/posthog-3000-sidebar--dashboards--light.png differ diff --git a/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--dark.png b/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--dark.png index 687b3e5258355..8c556eeb82689 100644 Binary files a/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--dark.png and b/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--dark.png differ diff --git a/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--light.png b/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--light.png index 01823e88fef50..6600ad600011f 100644 Binary files a/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--light.png and b/frontend/__snapshots__/posthog-3000-sidebar--feature-flags--light.png differ diff --git a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png index 02bc921745ecd..00916265cd38b 100644 Binary files a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png and b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--dark.png b/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--dark.png index ab8433877a9e5..64cc334785eda 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--dark.png and b/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--light.png b/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--light.png index e5bac9eeee8f1..756eb74f6156b 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--light.png and b/frontend/__snapshots__/scenes-app-max-ai--empty-thread-loading--light.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--dark.png b/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--dark.png index db43716077ebe..27244118ba4a0 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--dark.png and b/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--light.png b/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--light.png index cc409784b1c45..6f13b687b5156 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--light.png and b/frontend/__snapshots__/scenes-app-max-ai--generation-failure-thread--light.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--thread--dark.png b/frontend/__snapshots__/scenes-app-max-ai--thread--dark.png index 6f6b491670b99..78d4b44700226 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--thread--dark.png and b/frontend/__snapshots__/scenes-app-max-ai--thread--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--thread--light.png b/frontend/__snapshots__/scenes-app-max-ai--thread--light.png index c456bd0c5fde0..d300a75be4dc6 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--thread--light.png and b/frontend/__snapshots__/scenes-app-max-ai--thread--light.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--dark.png b/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--dark.png index 803390e1b9822..f7e1627246573 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--dark.png and b/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--light.png b/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--light.png index b785e83e2206f..1bcb94198a8b6 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--light.png and b/frontend/__snapshots__/scenes-app-max-ai--thread-with-rate-limit--light.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--dark.png b/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--dark.png index be67ab75d125b..eee71a3783a06 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--dark.png and b/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--light.png b/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--light.png index cf026b65c4994..358c01f1ccdae 100644 Binary files a/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--light.png and b/frontend/__snapshots__/scenes-app-max-ai--welcome-loading-suggestions--light.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--dark.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--dark.png index b97b2329fdd96..9533045495757 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--dark.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--light.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--light.png index e797cc6467cfc..e6876dcaf9403 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--light.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-admin--light.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--dark.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--dark.png index 84a4c839eff8d..c299d9caf9d5d 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--dark.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--light.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--light.png index 750ff00450e15..3a632563f3dd4 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--light.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-member--light.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--dark.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--dark.png index b97b2329fdd96..9533045495757 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--dark.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--light.png b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--light.png index e797cc6467cfc..e6876dcaf9403 100644 Binary files a/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--light.png and b/frontend/__snapshots__/scenes-other-org-member-invites--current-user-is-owner--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-organization--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-organization--dark.png index 27889851dde0e..32d397abf284c 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-organization--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-organization--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-organization--light.png b/frontend/__snapshots__/scenes-other-settings--settings-organization--light.png index 53d8c00a4492e..7ae06bd540053 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-organization--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-organization--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-project--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-project--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project--light.png b/frontend/__snapshots__/scenes-other-settings--settings-project--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-project--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png index 2faf79fb30bf5..d2302cf2491a4 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png index 7e4b0c2348bae..ccab613598663 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-all-options--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-password-only--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-github--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-google--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-enforced-saml--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--dark.png index 2ecfbc10d3372..bcef1677c71eb 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--light.png b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--light.png index 0f4733b28c2a7..bf25bd234a23a 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-session-timeout-sso-only--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-user--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-user--dark.png index 0c0af5f10249e..f0f85a24787f5 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-user--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-user--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-user--light.png b/frontend/__snapshots__/scenes-other-settings--settings-user--light.png index 83d22ed903616..4cd020a3df66e 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-user--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-user--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--dark.png index 8168f2f6ab752..d16b8c42a3cd8 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--light.png b/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--light.png index cadfa35ff0a63..44413dfa61cd9 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-web-vitals--light.png differ diff --git a/frontend/src/layout/GlobalModals.tsx b/frontend/src/layout/GlobalModals.tsx index 803bb2c5e8237..81bdae758064a 100644 --- a/frontend/src/layout/GlobalModals.tsx +++ b/frontend/src/layout/GlobalModals.tsx @@ -5,14 +5,12 @@ import { TimeSensitiveAuthenticationModal } from 'lib/components/TimeSensitiveAu import { UpgradeModal } from 'lib/components/UpgradeModal/UpgradeModal' import { TwoFactorSetupModal } from 'scenes/authentication/TwoFactorSetupModal' import { CreateOrganizationModal } from 'scenes/organization/CreateOrganizationModal' -import { membersLogic } from 'scenes/organization/membersLogic' import { CreateEnvironmentModal } from 'scenes/project/CreateEnvironmentModal' import { CreateProjectModal } from 'scenes/project/CreateProjectModal' import { SessionPlayerModal } from 'scenes/session-recordings/player/modal/SessionPlayerModal' import { inviteLogic } from 'scenes/settings/organization/inviteLogic' import { InviteModal } from 'scenes/settings/organization/InviteModal' import { PreviewingCustomCssModal } from 'scenes/themes/PreviewingCustomCssModal' -import { userLogic } from 'scenes/userLogic' import type { globalModalsLogicType } from './GlobalModalsType' @@ -58,7 +56,6 @@ export function GlobalModals(): JSX.Element { useActions(globalModalsLogic) const { isInviteModalShown } = useValues(inviteLogic) const { hideInviteModal } = useActions(inviteLogic) - const { user } = useValues(userLogic) return ( <> @@ -71,17 +68,7 @@ export function GlobalModals(): JSX.Element { - {user && user.organization?.enforce_2fa && !user.is_2fa_enabled && ( - { - userLogic.actions.loadUser() - membersLogic.actions.loadAllMembers() - }} - forceOpen - closable={false} - required={true} - /> - )} + ) diff --git a/frontend/src/layout/navigation-3000/Navigation.scss b/frontend/src/layout/navigation-3000/Navigation.scss index df5f78ab272c6..42bb779a54d82 100644 --- a/frontend/src/layout/navigation-3000/Navigation.scss +++ b/frontend/src/layout/navigation-3000/Navigation.scss @@ -175,7 +175,7 @@ .Sidebar3000 { --sidebar-slider-padding: 0.125rem; --sidebar-horizontal-padding: 0.5rem; - --sidebar-row-height: 2.5rem; + --sidebar-row-height: 3rem; --sidebar-background: var(--bg-3000); position: relative; @@ -533,8 +533,6 @@ position: relative; display: flex; - flex-direction: column; - justify-content: center; width: 100%; height: 100%; color: inherit; @@ -549,7 +547,9 @@ } .SidebarListItem__link { + flex-direction: column; row-gap: 1px; + justify-content: center; padding: 0 var(--sidebar-horizontal-padding) 0 var(--sidebar-list-item-inset); color: inherit !important; // Disable link color .SidebarListItem[aria-disabled='true'] & { @@ -558,17 +558,33 @@ } .SidebarListItem__button { + flex-direction: row; + gap: 0.25rem; row-gap: 1px; + align-items: center; padding: 0 var(--sidebar-horizontal-padding) 0 var(--sidebar-list-item-inset); + font-size: 1.125rem; // Make icons bigger color: inherit !important; // Disable link color cursor: pointer; &:hover { background: var(--border-3000); } + + .SidebarListItem__icon { + flex-shrink: 0; + } + + .SidebarListItem__name { + overflow: hidden; + text-overflow: ellipsis; + } } .SidebarListItem__rename { + flex-direction: column; + justify-content: center; + // Pseudo-elements don't work on inputs, so we use a wrapper div background: var(--bg-light); diff --git a/frontend/src/layout/navigation-3000/components/SidebarList.tsx b/frontend/src/layout/navigation-3000/components/SidebarList.tsx index 2b63b9a61e9c6..65cd05d65c4d4 100644 --- a/frontend/src/layout/navigation-3000/components/SidebarList.tsx +++ b/frontend/src/layout/navigation-3000/components/SidebarList.tsx @@ -232,7 +232,8 @@ function SidebarListItem({ item, validateName, active, style }: SidebarListItemP if (isItemClickable(item)) { content = (
  • -
    {item.name}
    + {item.icon &&
    {item.icon}
    } +
    {item.name}
  • ) } else if (!save || (!isItemTentative(item) && newName === null)) { diff --git a/frontend/src/layout/navigation-3000/sidepanel/panels/SidePanelSupport.tsx b/frontend/src/layout/navigation-3000/sidepanel/panels/SidePanelSupport.tsx index d2bbaca004f2a..7701538ffd36c 100644 --- a/frontend/src/layout/navigation-3000/sidepanel/panels/SidePanelSupport.tsx +++ b/frontend/src/layout/navigation-3000/sidepanel/panels/SidePanelSupport.tsx @@ -18,6 +18,8 @@ import { LemonBanner, LemonButton, Link } from '@posthog/lemon-ui' import { useActions, useValues } from 'kea' import { SupportForm } from 'lib/components/Support/SupportForm' import { getPublicSupportSnippet, supportLogic } from 'lib/components/Support/supportLogic' +import { FEATURE_FLAGS } from 'lib/constants' +import { featureFlagLogic } from 'lib/logic/featureFlagLogic' import React from 'react' import { billingLogic } from 'scenes/billing/billingLogic' import { organizationLogic } from 'scenes/organizationLogic' @@ -95,8 +97,14 @@ const Section = ({ title, children }: { title: string; children: React.ReactNode ) } +// In order to set these turn on the `support-message-override` feature flag. +const SUPPORT_MESSAGE_OVERRIDE_TITLE = '🎄 🎅 Support during the holidays 🎁 ⛄' +const SUPPORT_MESSAGE_OVERRIDE_BODY = + "We're offering reduced support while we celebrate the holidays. Responses may be slower than normal over the holiday period (23rd December to the 6th January), and between the 25th and 27th of December we'll only be responding to critical issues. Thanks for your patience!" + const SupportFormBlock = ({ onCancel }: { onCancel: () => void }): JSX.Element => { const { supportPlans, hasSupportAddonPlan } = useValues(billingLogic) + const { featureFlags } = useValues(featureFlagLogic) return (
    @@ -123,36 +131,46 @@ const SupportFormBlock = ({ onCancel }: { onCancel: () => void }): JSX.Element = Cancel
    -
    -
    - {/* If placing a support message, replace the line below with explanation */} - Avg support response times -
    - Explore options + {featureFlags[FEATURE_FLAGS.SUPPORT_MESSAGE_OVERRIDE] ? ( +
    + {SUPPORT_MESSAGE_OVERRIDE_TITLE} +

    {SUPPORT_MESSAGE_OVERRIDE_BODY}

    +
    + ) : ( +
    +
    + {/* If placing a support message, replace the line below with explanation */} + Avg support response times +
    + + Explore options + +
    + {/* If placing a support message, comment out (don't remove) the section below */} + {supportPlans?.map((plan) => { + // If they have an addon plan, only show the addon plan + const currentPlan = + plan.current_plan && (!hasSupportAddonPlan || plan.plan_key?.includes('addon')) + return ( + +
    + {plan.name} + {currentPlan && ( + <> + {' '} + (your plan) + + )} +
    +
    + {plan.features.find((f) => f.key == AvailableFeature.SUPPORT_RESPONSE_TIME)?.note} +
    +
    + ) + })}
    - {/* If placing a support message, comment out (don't remove) the section below */} - {supportPlans?.map((plan) => { - // If they have an addon plan, only show the addon plan - const currentPlan = plan.current_plan && (!hasSupportAddonPlan || plan.plan_key?.includes('addon')) - return ( - -
    - {plan.name} - {currentPlan && ( - <> - {' '} - (your plan) - - )} -
    -
    - {plan.features.find((f) => f.key == AvailableFeature.SUPPORT_RESPONSE_TIME)?.note} -
    -
    - ) - })} -
    + )}
    ) } diff --git a/frontend/src/layout/navigation-3000/types.ts b/frontend/src/layout/navigation-3000/types.ts index 3f79f6dbda42f..a941e7dfaad74 100644 --- a/frontend/src/layout/navigation-3000/types.ts +++ b/frontend/src/layout/navigation-3000/types.ts @@ -151,4 +151,5 @@ export interface TentativeListItem { export interface ButtonListItem extends BasicListItem { key: '__button__' onClick: () => void + icon?: JSX.Element } diff --git a/frontend/src/lib/api.ts b/frontend/src/lib/api.ts index 37d394a7fa483..f1497f937c334 100644 --- a/frontend/src/lib/api.ts +++ b/frontend/src/lib/api.ts @@ -845,9 +845,9 @@ class ApiRequest { return apiRequest } - // Chat - public chat(teamId?: TeamType['id']): ApiRequest { - return this.environmentsDetail(teamId).addPathComponent('query').addPathComponent('chat') + // Conversations + public conversations(teamId?: TeamType['id']): ApiRequest { + return this.environmentsDetail(teamId).addPathComponent('conversations') } // Notebooks @@ -2547,12 +2547,10 @@ const api = { }) }, - chatURL: (): string => { - return new ApiRequest().chat().assembleFullUrl() - }, - - async chat(data: any): Promise { - return await api.createResponse(this.chatURL(), data) + conversations: { + async create(data: { content: string; conversation?: string | null }): Promise { + return api.createResponse(new ApiRequest().conversations().assembleFullUrl(), data) + }, }, /** Fetch data from specified URL. The result already is JSON-parsed. */ diff --git a/frontend/src/lib/components/HogQLEditor/HogQLEditor.tsx b/frontend/src/lib/components/HogQLEditor/HogQLEditor.tsx index 9d0cf1f42b893..95b809878a98d 100644 --- a/frontend/src/lib/components/HogQLEditor/HogQLEditor.tsx +++ b/frontend/src/lib/components/HogQLEditor/HogQLEditor.tsx @@ -58,7 +58,7 @@ export function HogQLEditor({ {placeholder ?? (metadataSource && isActorsQuery(metadataSource) ? "Enter HogQL expression, such as:\n- properties.$geoip_country_name\n- toInt(properties.$browser_version) * 10\n- concat(properties.name, ' <', properties.email, '>')\n- is_identified ? 'user' : 'anon'" - : "Enter HogQL Expression, such as:\n- properties.$current_url\n- person.properties.$geoip_country_name\n- toInt(properties.`Long Field Name`) * 10\n- concat(event, ' ', distinct_id)\n- if(1 < 2, 'small', 'large')")} + : "Enter HogQL Expression, such as:\n- properties.$current_url\n- person.properties.$geoip_country_name\n- pdi.person.properties.email\n- toInt(properties.`Long Field Name`) * 10\n- concat(event, ' ', distinct_id)\n- if(1 < 2, 'small', 'large')")} { { versionCount: 1, expectation: null }, { versionCount: 11, + expectation: null, + }, + { + versionCount: 51, expectation: { latestUsedVersion: '1.0.0', - latestAvailableVersion: '1.0.10', - numVersionsBehind: 10, - level: 'info', + latestAvailableVersion: '1.0.50', + numVersionsBehind: 50, + level: 'error', }, }, { - versionCount: 15, + minorUsedVersion: 40, + versionCount: 1, expectation: { latestUsedVersion: '1.0.0', - latestAvailableVersion: '1.0.14', - numVersionsBehind: 14, - level: 'info', + latestAvailableVersion: '1.40.0', + numVersionsBehind: 40, + level: 'warning', }, }, { - versionCount: 25, + majorUsedVersion: 2, + versionCount: 1, expectation: { latestUsedVersion: '1.0.0', - latestAvailableVersion: '1.0.24', - numVersionsBehind: 24, - level: 'error', + latestAvailableVersion: '2.0.0', + numVersionsBehind: 1, + level: 'info', }, }, ])('return a version warning if diff is great enough', async (options) => { // TODO: How do we clear the persisted value? const versionsList = Array.from({ length: options.versionCount }, (_, i) => ({ - version: `1.0.${i}`, + version: `${options.majorUsedVersion || 1}.${options.minorUsedVersion || 0}.${i}`, })).reverse() useMockedVersions( @@ -143,13 +149,14 @@ describe('versionCheckerLogic', () => { }, { usedVersions: [ - { version: '1.80.0', timestamp: '2023-01-01T12:00:00Z' }, - { version: '1.83.1-beta', timestamp: '2023-01-01T10:00:00Z' }, - { version: '1.84.0-delta', timestamp: '2023-01-01T08:00:00Z' }, + { version: '1.40.0', timestamp: '2023-01-01T12:00:00Z' }, + { version: '1.41.1-beta', timestamp: '2023-01-01T10:00:00Z' }, + { version: '1.42.0', timestamp: '2023-01-01T08:00:00Z' }, + { version: '1.42.0-delta', timestamp: '2023-01-01T08:00:00Z' }, ], expectation: { - latestUsedVersion: '1.84.0-delta', - numVersionsBehind: 1, + latestUsedVersion: '1.42.0', + numVersionsBehind: 42, latestAvailableVersion: '1.84.0', level: 'warning', }, diff --git a/frontend/src/lib/components/VersionChecker/versionCheckerLogic.ts b/frontend/src/lib/components/VersionChecker/versionCheckerLogic.ts index 7ffecbbf89c82..4c6067adf4afc 100644 --- a/frontend/src/lib/components/VersionChecker/versionCheckerLogic.ts +++ b/frontend/src/lib/components/VersionChecker/versionCheckerLogic.ts @@ -174,6 +174,7 @@ export const versionCheckerLogic = kea([ if (!warning && sdkVersions && latestAvailableVersion) { const diff = diffVersions(latestAvailableVersion, latestUsedVersion) + if (diff && diff.diff > 0) { // there's a difference between the latest used version and the latest available version @@ -188,18 +189,14 @@ export const versionCheckerLogic = kea([ } let level: 'warning' | 'info' | 'error' | undefined - if (diff.kind === 'major' || numVersionsBehind >= 20) { + if (diff.kind === 'major') { + level = 'info' // it is desirable to be on the latest major version, but not critical + } else if (diff.kind === 'minor') { + level = numVersionsBehind >= 40 ? 'warning' : undefined + } + + if (level === undefined && numVersionsBehind >= 50) { level = 'error' - } else if (diff.kind === 'minor' && diff.diff >= 15) { - level = 'warning' - } else if ((diff.kind === 'minor' && diff.diff >= 10) || numVersionsBehind >= 10) { - level = 'info' - } else if (latestUsedVersion.extra) { - // if we have an extra (alpha/beta/rc/etc.) version, we should always show a warning if they aren't on the latest - level = 'warning' - } else { - // don't warn for a small number of patch versions behind - level = undefined } // we check if there is a "latest user version string" to avoid returning odd data in unexpected cases diff --git a/frontend/src/lib/constants.tsx b/frontend/src/lib/constants.tsx index 4f5dab68b9942..d41a232518b18 100644 --- a/frontend/src/lib/constants.tsx +++ b/frontend/src/lib/constants.tsx @@ -198,19 +198,14 @@ export const FEATURE_FLAGS = { SETTINGS_BOUNCE_RATE_PAGE_VIEW_MODE: 'settings-bounce-rate-page-view-mode', // owner: @robbie-c ONBOARDING_DASHBOARD_TEMPLATES: 'onboarding-dashboard-templates', // owner: @raquelmsmith MULTIPLE_BREAKDOWNS: 'multiple-breakdowns', // owner: @skoob13 #team-product-analytics - WEB_ANALYTICS_LIVE_USER_COUNT: 'web-analytics-live-user-count', // owner: @robbie-c SETTINGS_SESSION_TABLE_VERSION: 'settings-session-table-version', // owner: @robbie-c INSIGHT_FUNNELS_USE_UDF: 'insight-funnels-use-udf', // owner: @aspicer #team-product-analytics INSIGHT_FUNNELS_USE_UDF_TRENDS: 'insight-funnels-use-udf-trends', // owner: @aspicer #team-product-analytics FIRST_TIME_FOR_USER_MATH: 'first-time-for-user-math', // owner: @skoob13 #team-product-analytics MULTITAB_EDITOR: 'multitab-editor', // owner: @EDsCODE #team-data-warehouse - WEB_ANALYTICS_REPLAY: 'web-analytics-replay', // owner: @robbie-c BATCH_EXPORTS_POSTHOG_HTTP: 'posthog-http-batch-exports', EXPERIMENT_MAKE_DECISION: 'experiment-make-decision', // owner: @jurajmajerik #team-feature-success DATA_MODELING: 'data-modeling', // owner: @EDsCODE #team-data-warehouse - WEB_ANALYTICS_CONVERSION_GOALS: 'web-analytics-conversion-goals', // owner: @robbie-c - WEB_ANALYTICS_LAST_CLICK: 'web-analytics-last-click', // owner: @robbie-c - WEB_ANALYTICS_LCP_SCORE: 'web-analytics-lcp-score', // owner: @robbie-c HEDGEHOG_SKIN_SPIDERHOG: 'hedgehog-skin-spiderhog', // owner: @benjackwhite INSIGHT_VARIABLES: 'insight_variables', // owner: @Gilbert09 #team-data-warehouse WEB_EXPERIMENTS: 'web-experiments', // owner: @team-feature-success @@ -225,19 +220,18 @@ export const FEATURE_FLAGS = { BILLING_TRIAL_FLOW: 'billing-trial-flow', // owner: @zach EDIT_DWH_SOURCE_CONFIG: 'edit_dwh_source_config', // owner: @Gilbert09 #team-data-warehouse AI_SURVEY_RESPONSE_SUMMARY: 'ai-survey-response-summary', // owner: @pauldambra - CUSTOM_CHANNEL_TYPE_RULES: 'custom-channel-type-rules', // owner: @robbie-c #team-web-analytics SELF_SERVE_CREDIT_OVERRIDE: 'self-serve-credit-override', // owner: @zach FEATURE_MANAGEMENT_UI: 'feature-management-ui', // owner: @haven #team-feature-flags CUSTOM_CSS_THEMES: 'custom-css-themes', // owner: @daibhin METALYTICS: 'metalytics', // owner: @surbhi EXPERIMENTS_MULTIPLE_METRICS: 'experiments-multiple-metrics', // owner: @jurajmajerik #team-experiments - WEB_ANALYTICS_WARN_CUSTOM_EVENT_NO_SESSION: 'web-analytics-warn-custom-event-no-session', // owner: @robbie-c #team-web-analytics REMOTE_CONFIG: 'remote-config', // owner: @benjackwhite SITE_DESTINATIONS: 'site-destinations', // owner: @mariusandra #team-cdp SITE_APP_FUNCTIONS: 'site-app-functions', // owner: @mariusandra #team-cdp HOG_TRANSFORMATIONS: 'hog-transformations', // owner: #team-cdp REPLAY_HOGQL_FILTERS: 'replay-hogql-filters', // owner: @pauldambra #team-replay REPLAY_LIST_RECORDINGS_AS_QUERY: 'replay-list-recordings-as-query', // owner: @pauldambra #team-replay + SUPPORT_MESSAGE_OVERRIDE: 'support-message-override', // owner: @abigail BILLING_SKIP_FORECASTING: 'billing-skip-forecasting', // owner: @zach EXPERIMENT_STATS_V2: 'experiment-stats-v2', // owner: @danielbachhuber #team-experiments WEB_ANALYTICS_PERIOD_COMPARISON: 'web-analytics-period-comparison', // owner: @rafaeelaudibert #team-web-analytics diff --git a/frontend/src/lib/lemon-ui/LemonButton/LemonButton.scss b/frontend/src/lib/lemon-ui/LemonButton/LemonButton.scss index c7b7a7e40f3e1..6cebd0b3a7c8a 100644 --- a/frontend/src/lib/lemon-ui/LemonButton/LemonButton.scss +++ b/frontend/src/lib/lemon-ui/LemonButton/LemonButton.scss @@ -73,7 +73,6 @@ user-select: none; background: none; border-radius: var(--radius); - outline: none; transition: var(--lemon-button-transition); .font-normal, diff --git a/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.scss b/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.scss index b97f0e30bcc3f..cf41641c4c8ea 100644 --- a/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.scss +++ b/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.scss @@ -1,4 +1,5 @@ .LemonCalendar { + --lemon-calendar-month-height: 305px; --lemon-calendar-row-gap: 2px; --lemon-calendar-day-width: 40px; --lemon-calendar-today-radius: 2px; @@ -7,6 +8,9 @@ // Tricky: needs to match the equivalent height button from LemonButton.scss --lemon-calendar-time-button-height: 2.3125rem; + // Force height for month so when swtiching to longer months, the height doesn't change + height: var(--lemon-calendar-month-height); + .LemonCalendar__month { width: 100%; } @@ -23,6 +27,10 @@ } .LemonCalendar__month tr { + &.LemonCalendar__month-header { + height: var(--lemon-calendar-time-button-height); + } + .LemonButton { &.rounded-none { border-radius: 0; diff --git a/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.tsx b/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.tsx index f955b57a5de06..632cc6c2af229 100644 --- a/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.tsx +++ b/frontend/src/lib/lemon-ui/LemonCalendar/LemonCalendar.tsx @@ -81,7 +81,7 @@ export const LemonCalendar = forwardRef(function LemonCalendar( return ( - +
    {showLeftMonth && ( sourceQuery?: AnyDataNode globals?: Record schema?: Record | null - + onMetadata?: (metadata: HogQLMetadataResponse) => void onError?: (error: string | null, isValidView: boolean) => void } let codeEditorIndex = 0 @@ -121,6 +121,7 @@ export function CodeEditor({ sourceQuery, schema, onError, + onMetadata, ...editorProps }: CodeEditorProps): JSX.Element { const { isDarkModeOn } = useValues(themeLogic) @@ -140,6 +141,7 @@ export function CodeEditor({ monaco: monaco, editor: editor, onError, + onMetadata, }) useMountedLogic(builtCodeEditorLogic) diff --git a/frontend/src/lib/monaco/codeEditorLogic.tsx b/frontend/src/lib/monaco/codeEditorLogic.tsx index 63290fa0012b7..42e95f25209a4 100644 --- a/frontend/src/lib/monaco/codeEditorLogic.tsx +++ b/frontend/src/lib/monaco/codeEditorLogic.tsx @@ -50,6 +50,7 @@ export interface CodeEditorLogicProps { globals?: Record multitab?: boolean onError?: (error: string | null, isValidView: boolean) => void + onMetadata?: (metadata: HogQLMetadataResponse) => void } export const codeEditorLogic = kea([ @@ -100,6 +101,7 @@ export const codeEditorLogic = kea([ variables, }) breakpoint() + props.onMetadata?.(response) return [query, response] }, }, diff --git a/frontend/src/mocks/handlers.ts b/frontend/src/mocks/handlers.ts index 9d14c1b3c0acf..de9e072e7f69b 100644 --- a/frontend/src/mocks/handlers.ts +++ b/frontend/src/mocks/handlers.ts @@ -119,6 +119,7 @@ export const defaultMocks: Mocks = { }, }, ], + '/api/users/@me/two_factor_status/': () => [200, { is_enabled: true, backup_codes: [], method: 'TOTP' }], '/api/environments/@current/': MOCK_DEFAULT_TEAM, '/api/projects/@current/': MOCK_DEFAULT_TEAM, '/api/projects/:team_id/comments/count': { count: 0 }, diff --git a/frontend/src/queries/nodes/WebOverview/WebOverview.tsx b/frontend/src/queries/nodes/WebOverview/WebOverview.tsx index 925087cd8f9dc..83af7956c55da 100644 --- a/frontend/src/queries/nodes/WebOverview/WebOverview.tsx +++ b/frontend/src/queries/nodes/WebOverview/WebOverview.tsx @@ -2,11 +2,9 @@ import { IconTrending } from '@posthog/icons' import { LemonSkeleton } from '@posthog/lemon-ui' import { useValues } from 'kea' import { getColorVar } from 'lib/colors' -import { FEATURE_FLAGS } from 'lib/constants' import { IconTrendingDown, IconTrendingFlat } from 'lib/lemon-ui/icons' import { LemonBanner } from 'lib/lemon-ui/LemonBanner' import { Tooltip } from 'lib/lemon-ui/Tooltip' -import { featureFlagLogic } from 'lib/logic/featureFlagLogic' import { humanFriendlyDuration, humanFriendlyLargeNumber, isNotNil, range } from 'lib/utils' import { useState } from 'react' @@ -42,14 +40,13 @@ export function WebOverview(props: { onData, dataNodeCollectionId: dataNodeCollectionId ?? key, }) - const { featureFlags } = useValues(featureFlagLogic) const { response, responseLoading } = useValues(logic) const webOverviewQueryResponse = response as WebOverviewQueryResponse | undefined const samplingRate = webOverviewQueryResponse?.samplingRate - const numSkeletons = props.query.conversionGoal ? 4 : featureFlags[FEATURE_FLAGS.WEB_ANALYTICS_LCP_SCORE] ? 6 : 5 + const numSkeletons = props.query.conversionGoal ? 4 : 6 return ( <> diff --git a/frontend/src/queries/schema.json b/frontend/src/queries/schema.json index c2d5aadc147bb..b81e4669af38a 100644 --- a/frontend/src/queries/schema.json +++ b/frontend/src/queries/schema.json @@ -546,22 +546,6 @@ }, "type": "object" }, - "AssistantCompareFilter": { - "additionalProperties": false, - "properties": { - "compare": { - "default": false, - "description": "Whether to compare the current date range to a previous date range.", - "type": "boolean" - }, - "compare_to": { - "default": "-7d", - "description": "The date range to compare to. The value is a relative date. Examples of relative dates are: `-1y` for 1 year ago, `-14m` for 14 months ago, `-100w` for 100 weeks ago, `-14d` for 14 days ago, `-30h` for 30 hours ago.", - "type": "string" - } - }, - "type": "object" - }, "AssistantDateTimePropertyFilter": { "additionalProperties": false, "properties": { @@ -585,7 +569,7 @@ "type": "string" }, "AssistantEventType": { - "enum": ["status", "message"], + "enum": ["status", "message", "conversation"], "type": "string" }, "AssistantFunnelsBreakdownFilter": { @@ -742,7 +726,7 @@ "description": "Breakdown the chart by a property" }, "dateRange": { - "$ref": "#/definitions/AssistantInsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -1043,27 +1027,11 @@ } ] }, - "AssistantInsightDateRange": { - "additionalProperties": false, - "properties": { - "date_from": { - "default": "-7d", - "description": "Start date. The value can be:\n- a relative date. Examples of relative dates are: `-1y` for 1 year ago, `-14m` for 14 months ago, `-1w` for 1 week ago, `-14d` for 14 days ago, `-30h` for 30 hours ago.\n- an absolute ISO 8601 date string. a constant `yStart` for the current year start. a constant `mStart` for the current month start. a constant `dStart` for the current day start. Prefer using relative dates.", - "type": ["string", "null"] - }, - "date_to": { - "default": null, - "description": "Right boundary of the date range. Use `null` for the current date. You can not use relative dates here.", - "type": ["string", "null"] - } - }, - "type": "object" - }, "AssistantInsightsQueryBase": { "additionalProperties": false, "properties": { "dateRange": { - "$ref": "#/definitions/AssistantInsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -1092,9 +1060,8 @@ "content": { "type": "string" }, - "done": { - "description": "We only need this \"done\" value to tell when the particular message is finished during its streaming. It won't be necessary when we optimize streaming to NOT send the entire message every time a character is added.", - "type": "boolean" + "id": { + "type": "string" }, "type": { "const": "ai", @@ -1365,7 +1332,7 @@ "description": "Compare to date range" }, "dateRange": { - "$ref": "#/definitions/AssistantInsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -1469,6 +1436,15 @@ ], "type": "string" }, + "BaseAssistantMessage": { + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + } + }, + "type": "object" + }, "BaseMathType": { "enum": [ "total", @@ -3803,9 +3779,12 @@ "additionalProperties": false, "properties": { "compare": { + "default": false, + "description": "Whether to compare the current date range to a previous date range.", "type": "boolean" }, "compare_to": { + "description": "The date range to compare to. The value is a relative date. Examples of relative dates are: `-1y` for 1 year ago, `-14m` for 14 months ago, `-100w` for 100 weeks ago, `-14d` for 14 days ago, `-30h` for 30 hours ago.", "type": "string" } }, @@ -6220,16 +6199,15 @@ "content": { "type": "string" }, - "done": { - "const": true, - "type": "boolean" + "id": { + "type": "string" }, "type": { "const": "ai/failure", "type": "string" } }, - "required": ["type", "done"], + "required": ["type"], "type": "object" }, "FeaturePropertyFilter": { @@ -6873,7 +6851,7 @@ "description": "Breakdown of the events and actions" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -7265,6 +7243,12 @@ "query": { "type": "string" }, + "table_names": { + "items": { + "type": "string" + }, + "type": "array" + }, "warnings": { "items": { "$ref": "#/definitions/HogQLNotice" @@ -7553,17 +7537,15 @@ "content": { "type": "string" }, - "done": { - "const": true, - "description": "Human messages are only appended when done.", - "type": "boolean" + "id": { + "type": "string" }, "type": { "const": "human", "type": "string" } }, - "required": ["type", "content", "done"], + "required": ["type", "content"], "type": "object" }, "InsightActorsQuery": { @@ -7786,24 +7768,6 @@ }, "type": "object" }, - "InsightDateRange": { - "additionalProperties": false, - "properties": { - "date_from": { - "default": "-7d", - "type": ["string", "null"] - }, - "date_to": { - "type": ["string", "null"] - }, - "explicitDate": { - "default": false, - "description": "Whether the date_from and date_to should be used verbatim. Disables rounding to the start and end of period.", - "type": ["boolean", "null"] - } - }, - "type": "object" - }, "InsightFilter": { "anyOf": [ { @@ -7966,7 +7930,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8023,7 +7987,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8080,7 +8044,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8137,7 +8101,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8194,7 +8158,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8304,7 +8268,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -8713,7 +8677,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -9426,6 +9390,12 @@ "query": { "type": "string" }, + "table_names": { + "items": { + "type": "string" + }, + "type": "array" + }, "warnings": { "items": { "$ref": "#/definitions/HogQLNotice" @@ -11163,9 +11133,8 @@ "content": { "type": "string" }, - "done": { - "const": true, - "type": "boolean" + "id": { + "type": "string" }, "substeps": { "items": { @@ -11178,7 +11147,7 @@ "type": "string" } }, - "required": ["type", "content", "done"], + "required": ["type", "content"], "type": "object" }, "RecordingOrder": { @@ -11481,7 +11450,7 @@ "description": "Groups aggregation" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -11625,17 +11594,15 @@ "content": { "type": "string" }, - "done": { - "const": true, - "description": "Router messages are not streamed, so they can only be done.", - "type": "boolean" + "id": { + "type": "string" }, "type": { "const": "ai/router", "type": "string" } }, - "required": ["type", "content", "done"], + "required": ["type", "content"], "type": "object" }, "SamplingRate": { @@ -12153,7 +12120,7 @@ "description": "Compare to date range" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -12704,7 +12671,7 @@ "description": "Whether we should be comparing against a specific conversion goal" }, "dateRange": { - "$ref": "#/definitions/InsightDateRange", + "$ref": "#/definitions/DateRange", "description": "Date range for the query" }, "filterTestAccounts": { @@ -12815,8 +12782,11 @@ } ] }, - "done": { - "type": "boolean" + "id": { + "type": "string" + }, + "initiator": { + "type": "string" }, "plan": { "type": "string" @@ -12895,6 +12865,9 @@ "WebExternalClicksTableQuery": { "additionalProperties": false, "properties": { + "compareFilter": { + "$ref": "#/definitions/CompareFilter" + }, "conversionGoal": { "anyOf": [ { @@ -13008,6 +12981,9 @@ "WebGoalsQuery": { "additionalProperties": false, "properties": { + "compareFilter": { + "$ref": "#/definitions/CompareFilter" + }, "conversionGoal": { "anyOf": [ { @@ -13148,14 +13124,7 @@ "additionalProperties": false, "properties": { "compareFilter": { - "anyOf": [ - { - "$ref": "#/definitions/CompareFilter" - }, - { - "type": "null" - } - ] + "$ref": "#/definitions/CompareFilter" }, "conversionGoal": { "anyOf": [ @@ -13287,14 +13256,7 @@ "$ref": "#/definitions/WebStatsBreakdown" }, "compareFilter": { - "anyOf": [ - { - "$ref": "#/definitions/CompareFilter" - }, - { - "type": "null" - } - ] + "$ref": "#/definitions/CompareFilter" }, "conversionGoal": { "anyOf": [ diff --git a/frontend/src/queries/schema.ts b/frontend/src/queries/schema.ts index 9f8f101449564..7375910003a3f 100644 --- a/frontend/src/queries/schema.ts +++ b/frontend/src/queries/schema.ts @@ -367,6 +367,7 @@ export interface HogQLMetadataResponse { warnings: HogQLNotice[] notices: HogQLNotice[] query_status?: never + table_names?: string[] } export type AutocompleteCompletionItemKind = @@ -819,7 +820,7 @@ interface InsightVizNodeViewProps { /** Base class for insight query nodes. Should not be used directly. */ export interface InsightsQueryBase> extends Node { /** Date range for the query */ - dateRange?: InsightDateRange + dateRange?: DateRange /** * Exclude internal and test users by applying the respective filters * @@ -1004,31 +1005,11 @@ export type AssistantGroupPropertyFilter = AssistantBasePropertyFilter & { export type AssistantPropertyFilter = AssistantGenericPropertyFilter | AssistantGroupPropertyFilter -export interface AssistantInsightDateRange { - /** - * Start date. The value can be: - * - a relative date. Examples of relative dates are: `-1y` for 1 year ago, `-14m` for 14 months ago, `-1w` for 1 week ago, `-14d` for 14 days ago, `-30h` for 30 hours ago. - * - an absolute ISO 8601 date string. - * a constant `yStart` for the current year start. - * a constant `mStart` for the current month start. - * a constant `dStart` for the current day start. - * Prefer using relative dates. - * @default -7d - */ - date_from?: string | null - - /** - * Right boundary of the date range. Use `null` for the current date. You can not use relative dates here. - * @default null - */ - date_to?: string | null -} - export interface AssistantInsightsQueryBase { /** * Date range for the query */ - dateRange?: AssistantInsightDateRange + dateRange?: DateRange /** * Exclude internal and test users by applying the respective filters @@ -1171,7 +1152,7 @@ export interface AssistantTrendsFilter { yAxisScaleType?: TrendsFilterLegacy['y_axis_scale_type'] } -export interface AssistantCompareFilter { +export interface CompareFilter { /** * Whether to compare the current date range to a previous date range. * @default false @@ -1180,7 +1161,6 @@ export interface AssistantCompareFilter { /** * The date range to compare to. The value is a relative date. Examples of relative dates are: `-1y` for 1 year ago, `-14m` for 14 months ago, `-100w` for 100 weeks ago, `-14d` for 14 days ago, `-30h` for 30 hours ago. - * @default -7d */ compare_to?: string } @@ -1789,6 +1769,7 @@ interface WebAnalyticsQueryBase> extends DataNode< dateRange?: DateRange properties: WebAnalyticsPropertyFilters conversionGoal?: WebAnalyticsConversionGoal | null + compareFilter?: CompareFilter sampling?: { enabled?: boolean forceSamplingRate?: SamplingRate @@ -1800,7 +1781,6 @@ interface WebAnalyticsQueryBase> extends DataNode< export interface WebOverviewQuery extends WebAnalyticsQueryBase { kind: NodeKind.WebOverviewQuery - compareFilter?: CompareFilter | null includeLCPScore?: boolean } @@ -1852,7 +1832,6 @@ export enum WebStatsBreakdown { export interface WebStatsTableQuery extends WebAnalyticsQueryBase { kind: NodeKind.WebStatsTableQuery breakdownBy: WebStatsBreakdown - compareFilter?: CompareFilter | null includeScrollDepth?: boolean // automatically sets includeBounceRate to true includeBounceRate?: boolean doPathCleaning?: boolean @@ -2321,17 +2300,6 @@ export interface DateRange { explicitDate?: boolean | null } -export interface InsightDateRange { - /** @default -7d */ - date_from?: string | null - date_to?: string | null - /** Whether the date_from and date_to should be used verbatim. Disables - * rounding to the start and end of period. - * @default false - * */ - explicitDate?: boolean | null -} - export type MultipleBreakdownType = Extract export interface Breakdown { @@ -2358,11 +2326,6 @@ export interface BreakdownFilter { breakdown_hide_other_aggregation?: boolean | null // hides the "other" field for trends } -export interface CompareFilter { - compare?: boolean - compare_to?: string -} - // TODO: Rename to `DashboardFilters` for consistency with `HogQLFilters` export interface DashboardFilter { date_from?: string | null @@ -2489,48 +2452,41 @@ export enum AssistantMessageType { Router = 'ai/router', } -export interface HumanMessage { +export interface BaseAssistantMessage { + id?: string +} + +export interface HumanMessage extends BaseAssistantMessage { type: AssistantMessageType.Human content: string - /** Human messages are only appended when done. */ - done: true } -export interface AssistantMessage { +export interface AssistantMessage extends BaseAssistantMessage { type: AssistantMessageType.Assistant content: string - /** - * We only need this "done" value to tell when the particular message is finished during its streaming. - * It won't be necessary when we optimize streaming to NOT send the entire message every time a character is added. - */ - done?: boolean } -export interface ReasoningMessage { +export interface ReasoningMessage extends BaseAssistantMessage { type: AssistantMessageType.Reasoning content: string substeps?: string[] - done: true } -export interface VisualizationMessage { +export interface VisualizationMessage extends BaseAssistantMessage { type: AssistantMessageType.Visualization plan?: string answer?: AssistantTrendsQuery | AssistantFunnelsQuery - done?: boolean + initiator?: string } -export interface FailureMessage { +export interface FailureMessage extends BaseAssistantMessage { type: AssistantMessageType.Failure content?: string - done: true } -export interface RouterMessage { +export interface RouterMessage extends BaseAssistantMessage { type: AssistantMessageType.Router content: string - /** Router messages are not streamed, so they can only be done. */ - done: true } export type RootAssistantMessage = @@ -2544,6 +2500,7 @@ export type RootAssistantMessage = export enum AssistantEventType { Status = 'status', Message = 'message', + Conversation = 'conversation', } export enum AssistantGenerationStatusType { diff --git a/frontend/src/scenes/authentication/TwoFactorSetupModal.tsx b/frontend/src/scenes/authentication/TwoFactorSetupModal.tsx index 8da04b39ed0bd..ae63d8649e87d 100644 --- a/frontend/src/scenes/authentication/TwoFactorSetupModal.tsx +++ b/frontend/src/scenes/authentication/TwoFactorSetupModal.tsx @@ -1,35 +1,25 @@ import { useActions, useValues } from 'kea' import { LemonBanner } from 'lib/lemon-ui/LemonBanner' import { LemonModal } from 'lib/lemon-ui/LemonModal' +import { membersLogic } from 'scenes/organization/membersLogic' +import { userLogic } from 'scenes/userLogic' import { twoFactorLogic } from './twoFactorLogic' import { TwoFactorSetup } from './TwoFactorSetup' -interface TwoFactorSetupModalProps { - onSuccess: () => void - closable?: boolean - required?: boolean - forceOpen?: boolean -} - -export function TwoFactorSetupModal({ - onSuccess, - closable = true, - required = false, - forceOpen = false, -}: TwoFactorSetupModalProps): JSX.Element { - const { isTwoFactorSetupModalOpen } = useValues(twoFactorLogic) - const { toggleTwoFactorSetupModal } = useActions(twoFactorLogic) +export function TwoFactorSetupModal(): JSX.Element { + const { isTwoFactorSetupModalOpen, forceOpenTwoFactorSetupModal } = useValues(twoFactorLogic) + const { closeTwoFactorSetupModal } = useActions(twoFactorLogic) return ( toggleTwoFactorSetupModal(false) : undefined} - closable={closable} + isOpen={isTwoFactorSetupModalOpen || forceOpenTwoFactorSetupModal} + onClose={!forceOpenTwoFactorSetupModal ? () => closeTwoFactorSetupModal() : undefined} + closable={!forceOpenTwoFactorSetupModal} >
    - {required && ( + {forceOpenTwoFactorSetupModal && ( Your organization requires you to set up 2FA. @@ -37,10 +27,9 @@ export function TwoFactorSetupModal({

    Use an authenticator app like Google Authenticator or 1Password to scan the QR code below.

    { - toggleTwoFactorSetupModal(false) - if (onSuccess) { - onSuccess() - } + closeTwoFactorSetupModal() + userLogic.actions.loadUser() + membersLogic.actions.loadAllMembers() }} />
    diff --git a/frontend/src/scenes/authentication/twoFactorLogic.ts b/frontend/src/scenes/authentication/twoFactorLogic.ts index 43d31a7f4d189..37c331b809868 100644 --- a/frontend/src/scenes/authentication/twoFactorLogic.ts +++ b/frontend/src/scenes/authentication/twoFactorLogic.ts @@ -4,7 +4,9 @@ import { forms } from 'kea-forms' import { loaders } from 'kea-loaders' import api from 'lib/api' import { featureFlagLogic } from 'lib/logic/featureFlagLogic' +import { membersLogic } from 'scenes/organization/membersLogic' import { preflightLogic } from 'scenes/PreflightCheck/preflightLogic' +import { userLogic } from 'scenes/userLogic' import type { twoFactorLogicType } from './twoFactorLogicType' @@ -26,7 +28,8 @@ export const twoFactorLogic = kea([ path(['scenes', 'authentication', 'loginLogic']), props({} as TwoFactorLogicProps), connect({ - values: [preflightLogic, ['preflight'], featureFlagLogic, ['featureFlags']], + values: [preflightLogic, ['preflight'], featureFlagLogic, ['featureFlags'], userLogic, ['user']], + actions: [userLogic, ['loadUser'], membersLogic, ['loadAllMembers']], }), actions({ setGeneralError: (code: string, detail: string) => ({ code, detail }), @@ -34,16 +37,24 @@ export const twoFactorLogic = kea([ loadStatus: true, generateBackupCodes: true, disable2FA: true, - toggleTwoFactorSetupModal: (open: boolean) => ({ open }), + openTwoFactorSetupModal: (forceOpen?: boolean) => ({ forceOpen }), + closeTwoFactorSetupModal: true, toggleDisable2FAModal: (open: boolean) => ({ open }), toggleBackupCodesModal: (open: boolean) => ({ open }), - startSetup: true, }), reducers({ isTwoFactorSetupModalOpen: [ false, { - toggleTwoFactorSetupModal: (_, { open }) => open, + openTwoFactorSetupModal: () => true, + closeTwoFactorSetupModal: () => false, + }, + ], + forceOpenTwoFactorSetupModal: [ + false, + { + openTwoFactorSetupModal: (_, { forceOpen }) => !!forceOpen, + closeTwoFactorSetupModal: () => false, }, ], isDisable2FAModalOpen: [ @@ -89,11 +100,9 @@ export const twoFactorLogic = kea([ startSetup: [ {}, { - toggleTwoFactorSetupModal: async ({ open }, breakpoint) => { - if (open) { - breakpoint() - await api.get('api/users/@me/two_factor_start_setup/') - } + openTwoFactorSetupModal: async (_, breakpoint) => { + breakpoint() + await api.get('api/users/@me/two_factor_start_setup/') return { status: 'completed' } }, }, @@ -144,6 +153,10 @@ export const twoFactorLogic = kea([ await api.create('api/users/@me/two_factor_disable/') lemonToast.success('2FA disabled successfully') actions.loadStatus() + + // Refresh user and members + actions.loadUser() + actions.loadAllMembers() } catch (e) { const { code, detail } = e as Record actions.setGeneralError(code, detail) @@ -153,19 +166,17 @@ export const twoFactorLogic = kea([ generateBackupCodesSuccess: () => { lemonToast.success('Backup codes generated successfully') }, - toggleTwoFactorSetupModal: ({ open }) => { - if (!open) { - // Clear the form when closing the modal - actions.resetToken() - } - }, - startSetup: async () => { - await api.get('api/users/@me/two_factor_start_setup/') + closeTwoFactorSetupModal: () => { + // Clear the form when closing the modal + actions.resetToken() }, })), - afterMount(({ actions }) => { - actions.startSetup() + afterMount(({ actions, values }) => { actions.loadStatus() + + if (values.user && values.user.organization?.enforce_2fa && !values.user.is_2fa_enabled) { + actions.openTwoFactorSetupModal(true) + } }), ]) diff --git a/frontend/src/scenes/data-warehouse/editor/OutputPane.tsx b/frontend/src/scenes/data-warehouse/editor/OutputPane.tsx index f3ac96bb2d949..2fd8adf883adb 100644 --- a/frontend/src/scenes/data-warehouse/editor/OutputPane.tsx +++ b/frontend/src/scenes/data-warehouse/editor/OutputPane.tsx @@ -31,13 +31,14 @@ import { ChartDisplayType, ExporterFormat } from '~/types' import { dataWarehouseViewsLogic } from '../saved_queries/dataWarehouseViewsLogic' import { multitabEditorLogic } from './multitabEditorLogic' import { outputPaneLogic, OutputTab } from './outputPaneLogic' +import { InfoTab } from './OutputPaneTabs/InfoTab' export function OutputPane(): JSX.Element { const { activeTab } = useValues(outputPaneLogic) const { setActiveTab } = useActions(outputPaneLogic) const { variablesForInsight } = useValues(variablesLogic) - const { editingView, sourceQuery, exportContext, isValidView, error } = useValues(multitabEditorLogic) + const { editingView, sourceQuery, exportContext, isValidView, error, editorKey } = useValues(multitabEditorLogic) const { saveAsInsight, saveAsView, setSourceQuery, runQuery } = useActions(multitabEditorLogic) const { isDarkModeOn } = useValues(themeLogic) const { response, responseLoading, responseError, queryId, pollResponse } = useValues(dataNodeLogic) @@ -90,6 +91,10 @@ export function OutputPane(): JSX.Element { key: OutputTab.Visualization, label: 'Visualization', }, + { + key: OutputTab.Info, + label: 'Info', + }, ]} />
    @@ -151,7 +156,7 @@ export function OutputPane(): JSX.Element {
    -
    +
    @@ -294,6 +300,7 @@ const Content = ({ saveAsInsight, queryId, pollResponse, + editorKey, }: any): JSX.Element | null => { if (activeTab === OutputTab.Results) { if (responseError) { @@ -310,7 +317,9 @@ const Content = ({ return responseLoading ? ( ) : !response ? ( - Query results will appear here +
    + Query results will appear here +
    ) : (
    Query be results will be visualized here +
    + Query results will be visualized here +
    ) : (
    + +
    + ) + } + return null } diff --git a/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/InfoTab.tsx b/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/InfoTab.tsx new file mode 100644 index 0000000000000..1c3bbe26558cc --- /dev/null +++ b/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/InfoTab.tsx @@ -0,0 +1,111 @@ +import { LemonButton, LemonTag, Tooltip } from '@posthog/lemon-ui' +import { useActions, useValues } from 'kea' +import { LemonTable } from 'lib/lemon-ui/LemonTable' +import { humanFriendlyDetailedTime } from 'lib/utils' + +import { multitabEditorLogic } from '../multitabEditorLogic' +import { infoTabLogic } from './infoTabLogic' + +interface InfoTabProps { + codeEditorKey: string +} + +export function InfoTab({ codeEditorKey }: InfoTabProps): JSX.Element { + const { sourceTableItems } = useValues(infoTabLogic({ codeEditorKey: codeEditorKey })) + const { editingView, isEditingMaterializedView } = useValues(multitabEditorLogic) + const { runDataWarehouseSavedQuery } = useActions(multitabEditorLogic) + + return ( +
    +
    +
    +

    Materialization

    + BETA +
    +
    + {isEditingMaterializedView ? ( +
    + {editingView?.last_run_at ? ( + `Last run at ${humanFriendlyDetailedTime(editingView.last_run_at)}` + ) : ( +
    + Materialization scheduled +
    + )} + editingView && runDataWarehouseSavedQuery(editingView.id)} + className="mt-2" + type="secondary" + > + Run now + +
    + ) : ( +
    +

    + Materialized views are a way to pre-compute data in your data warehouse. This allows you + to run queries faster and more efficiently. +

    + editingView && runDataWarehouseSavedQuery(editingView.id)} + type="primary" + disabledReason={editingView ? undefined : 'You must save the view first'} + > + Materialize + +
    + )} +
    +
    +
    +

    Dependencies

    +

    + Dependencies are tables that this query uses. See when a source or materialized table was last run. +

    +
    + name, + }, + { + key: 'Type', + title: 'Type', + render: (_, { type }) => type, + }, + { + key: 'Status', + title: 'Status', + render: (_, { type, status }) => { + if (type === 'source') { + return ( + + N/A + + ) + } + return status + }, + }, + { + key: 'Last run at', + title: 'Last run at', + render: (_, { type, last_run_at }) => { + if (type === 'source') { + return ( + + N/A + + ) + } + return humanFriendlyDetailedTime(last_run_at) + }, + }, + ]} + dataSource={sourceTableItems} + /> +
    + ) +} diff --git a/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/infoTabLogic.ts b/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/infoTabLogic.ts new file mode 100644 index 0000000000000..4510e80db6693 --- /dev/null +++ b/frontend/src/scenes/data-warehouse/editor/OutputPaneTabs/infoTabLogic.ts @@ -0,0 +1,63 @@ +import { connect, kea, key, path, props, selectors } from 'kea' +import { databaseTableListLogic } from 'scenes/data-management/database/databaseTableListLogic' +import { dataWarehouseViewsLogic } from 'scenes/data-warehouse/saved_queries/dataWarehouseViewsLogic' + +import { multitabEditorLogic } from '../multitabEditorLogic' +import type { infoTabLogicType } from './infoTabLogicType' + +export interface InfoTableRow { + name: string + type: 'source' | 'table' + status?: string + last_run_at?: string +} + +export interface InfoTabLogicProps { + codeEditorKey: string +} + +export const infoTabLogic = kea([ + path(['data-warehouse', 'editor', 'outputPaneTabs', 'infoTabLogic']), + props({} as InfoTabLogicProps), + key((props) => props.codeEditorKey), + connect((props: InfoTabLogicProps) => ({ + values: [ + multitabEditorLogic({ key: props.codeEditorKey }), + ['metadata'], + databaseTableListLogic, + ['posthogTablesMap', 'dataWarehouseTablesMap'], + dataWarehouseViewsLogic, + ['dataWarehouseSavedQueryMap'], + ], + })), + selectors({ + sourceTableItems: [ + (s) => [s.metadata, s.dataWarehouseSavedQueryMap], + (metadata, dataWarehouseSavedQueryMap) => { + if (!metadata) { + return [] + } + return ( + metadata.table_names?.map((table_name) => { + const view = dataWarehouseSavedQueryMap[table_name] + if (view) { + return { + name: table_name, + type: 'table', + status: view.status, + last_run_at: view.last_run_at || 'never', + } + } + + return { + name: table_name, + type: 'source', + status: undefined, + last_run_at: undefined, + } + }) || [] + ) + }, + ], + }), +]) diff --git a/frontend/src/scenes/data-warehouse/editor/QueryWindow.tsx b/frontend/src/scenes/data-warehouse/editor/QueryWindow.tsx index 02c2457a0381e..7bfbe9310d7e8 100644 --- a/frontend/src/scenes/data-warehouse/editor/QueryWindow.tsx +++ b/frontend/src/scenes/data-warehouse/editor/QueryWindow.tsx @@ -36,7 +36,8 @@ export function QueryWindow(): JSX.Element { }) const { allTabs, activeModelUri, queryInput, editingView, sourceQuery } = useValues(logic) - const { selectTab, deleteTab, createTab, setQueryInput, runQuery, setError, setIsValidView } = useActions(logic) + const { selectTab, deleteTab, createTab, setQueryInput, runQuery, setError, setIsValidView, setMetadata } = + useActions(logic) return (
    @@ -51,7 +52,9 @@ export function QueryWindow(): JSX.Element {
    {editingView && (
    - Editing view "{editingView.name}" + + Editing {editingView.status ? 'materialized view' : 'view'} "{editingView.name}" +
    )} { + setMetadata(metadata) + }, }} /> diff --git a/frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.ts b/frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.tsx similarity index 63% rename from frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.ts rename to frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.tsx index cfd559e59506a..c45ea5559fb5a 100644 --- a/frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.ts +++ b/frontend/src/scenes/data-warehouse/editor/editorSidebarLogic.tsx @@ -1,9 +1,9 @@ +import { Tooltip } from '@posthog/lemon-ui' import Fuse from 'fuse.js' import { connect, kea, path, selectors } from 'kea' import { router } from 'kea-router' import { subscriptions } from 'kea-subscriptions' -import { FEATURE_FLAGS } from 'lib/constants' -import { featureFlagLogic } from 'lib/logic/featureFlagLogic' +import { IconCalculate, IconClipboardEdit } from 'lib/lemon-ui/icons' import { databaseTableListLogic } from 'scenes/data-management/database/databaseTableListLogic' import { sceneLogic } from 'scenes/sceneLogic' import { Scene } from 'scenes/sceneTypes' @@ -42,20 +42,6 @@ const savedQueriesfuse = new Fuse([], { includeMatches: true, }) -const nonMaterializedViewsfuse = new Fuse([], { - keys: [{ name: 'name', weight: 2 }], - threshold: 0.3, - ignoreLocation: true, - includeMatches: true, -}) - -const materializedViewsfuse = new Fuse([], { - keys: [{ name: 'name', weight: 2 }], - threshold: 0.3, - ignoreLocation: true, - includeMatches: true, -}) - export const editorSidebarLogic = kea([ path(['data-warehouse', 'editor', 'editorSidebarLogic']), connect({ @@ -66,8 +52,6 @@ export const editorSidebarLogic = kea([ ['dataWarehouseSavedQueries', 'dataWarehouseSavedQueryMapById', 'dataWarehouseSavedQueriesLoading'], databaseTableListLogic, ['posthogTables', 'dataWarehouseTables', 'databaseLoading', 'views', 'viewsMapById'], - featureFlagLogic, - ['featureFlags'], ], actions: [ editorSceneLogic, @@ -86,19 +70,13 @@ export const editorSidebarLogic = kea([ s.relevantPosthogTables, s.relevantDataWarehouseTables, s.databaseLoading, - s.relevantNonMaterializedViews, - s.relevantMaterializedViews, - s.featureFlags, ], ( relevantSavedQueries, dataWarehouseSavedQueriesLoading, relevantPosthogTables, relevantDataWarehouseTables, - databaseLoading, - relevantNonMaterializedViews, - relevantMaterializedViews, - featureFlags + databaseLoading ) => [ { key: 'data-warehouse-sources', @@ -163,13 +141,19 @@ export const editorSidebarLogic = kea([ key: 'data-warehouse-views', noun: ['view', 'views'], loading: dataWarehouseSavedQueriesLoading, - items: (featureFlags[FEATURE_FLAGS.DATA_MODELING] - ? relevantNonMaterializedViews - : relevantSavedQueries - ).map(([savedQuery, matches]) => ({ + items: relevantSavedQueries.map(([savedQuery, matches]) => ({ key: savedQuery.id, name: savedQuery.name, url: '', + icon: savedQuery.status ? ( + + + + ) : ( + + + + ), searchMatch: matches ? { matchingFields: matches.map((match) => match.key), @@ -195,16 +179,6 @@ export const editorSidebarLogic = kea([ actions.toggleJoinTableModal() }, }, - ...(featureFlags[FEATURE_FLAGS.DATA_MODELING] && !savedQuery.status - ? [ - { - label: 'Materialize', - onClick: () => { - actions.runDataWarehouseSavedQuery(savedQuery.id) - }, - }, - ] - : []), { label: 'Delete', status: 'danger', @@ -215,63 +189,6 @@ export const editorSidebarLogic = kea([ ], })), } as SidebarCategory, - ...(featureFlags[FEATURE_FLAGS.DATA_MODELING] - ? [ - { - key: 'data-warehouse-materialized-views', - noun: ['materialized view', 'materialized views'], - loading: dataWarehouseSavedQueriesLoading, - items: relevantMaterializedViews.map(([materializedView, matches]) => ({ - key: materializedView.id, - name: materializedView.name, - url: '', - searchMatch: matches - ? { - matchingFields: matches.map((match) => match.key), - nameHighlightRanges: matches.find((match) => match.key === 'name')?.indices, - } - : null, - onClick: () => { - actions.selectSchema(materializedView) - }, - menuItems: [ - { - label: 'Edit view definition', - onClick: () => { - multitabEditorLogic({ - key: `hogQLQueryEditor/${router.values.location.pathname}`, - }).actions.createTab(materializedView.query.query, materializedView) - }, - }, - { - label: 'Add join', - onClick: () => { - actions.selectSourceTable(materializedView.name) - actions.toggleJoinTableModal() - }, - }, - ...(featureFlags[FEATURE_FLAGS.DATA_MODELING] && materializedView.status - ? [ - { - label: 'Run', - onClick: () => { - actions.runDataWarehouseSavedQuery(materializedView.id) - }, - }, - ] - : []), - { - label: 'Delete', - status: 'danger', - onClick: () => { - actions.deleteDataWarehouseSavedQuery(materializedView.id) - }, - }, - ], - })), - }, - ] - : []), ], ], nonMaterializedViews: [ @@ -327,28 +244,6 @@ export const editorSidebarLogic = kea([ return dataWarehouseSavedQueries.map((savedQuery) => [savedQuery, null]) }, ], - relevantNonMaterializedViews: [ - (s) => [s.nonMaterializedViews, navigation3000Logic.selectors.searchTerm], - (nonMaterializedViews, searchTerm): [DataWarehouseSavedQuery, FuseSearchMatch[] | null][] => { - if (searchTerm) { - return nonMaterializedViewsfuse - .search(searchTerm) - .map((result) => [result.item, result.matches as FuseSearchMatch[]]) - } - return nonMaterializedViews.map((view) => [view, null]) - }, - ], - relevantMaterializedViews: [ - (s) => [s.materializedViews, navigation3000Logic.selectors.searchTerm], - (materializedViews, searchTerm): [DataWarehouseSavedQuery, FuseSearchMatch[] | null][] => { - if (searchTerm) { - return materializedViewsfuse - .search(searchTerm) - .map((result) => [result.item, result.matches as FuseSearchMatch[]]) - } - return materializedViews.map((view) => [view, null]) - }, - ], })), subscriptions({ dataWarehouseTables: (dataWarehouseTables) => { diff --git a/frontend/src/scenes/data-warehouse/editor/multitabEditorLogic.tsx b/frontend/src/scenes/data-warehouse/editor/multitabEditorLogic.tsx index 740ea33aced83..94995a446ae2d 100644 --- a/frontend/src/scenes/data-warehouse/editor/multitabEditorLogic.tsx +++ b/frontend/src/scenes/data-warehouse/editor/multitabEditorLogic.tsx @@ -48,7 +48,12 @@ export const multitabEditorLogic = kea([ connect({ actions: [ dataWarehouseViewsLogic, - ['deleteDataWarehouseSavedQuerySuccess', 'createDataWarehouseSavedQuerySuccess'], + [ + 'loadDataWarehouseSavedQueriesSuccess', + 'deleteDataWarehouseSavedQuerySuccess', + 'createDataWarehouseSavedQuerySuccess', + 'runDataWarehouseSavedQuery', + ], ], }), actions({ @@ -66,13 +71,13 @@ export const multitabEditorLogic = kea([ initialize: true, saveAsView: true, saveAsViewSubmit: (name: string) => ({ name }), - setMetadata: (query: string, metadata: HogQLMetadataResponse) => ({ query, metadata }), saveAsInsight: true, saveAsInsightSubmit: (name: string) => ({ name }), setCacheLoading: (loading: boolean) => ({ loading }), setError: (error: string | null) => ({ error }), setIsValidView: (isValidView: boolean) => ({ isValidView }), setSourceQuery: (sourceQuery: DataVisualizationNode) => ({ sourceQuery }), + setMetadata: (metadata: HogQLMetadataResponse) => ({ metadata }), editView: (query: string, view: DataWarehouseSavedQuery) => ({ query, view }), }), propsChanged(({ actions, props }, oldProps) => { @@ -80,7 +85,7 @@ export const multitabEditorLogic = kea([ actions.initialize() } }), - reducers({ + reducers(({ props }) => ({ cacheLoading: [ true, { @@ -149,7 +154,14 @@ export const multitabEditorLogic = kea([ setIsValidView: (_, { isValidView }) => isValidView, }, ], - }), + metadata: [ + null as HogQLMetadataResponse | null, + { + setMetadata: (_, { metadata }) => metadata, + }, + ], + editorKey: [props.key], + })), listeners(({ values, props, actions, asyncActions }) => ({ editView: ({ query, view }) => { const maybeExistingTab = values.allTabs.find((tab) => tab.view?.id === view.id) @@ -388,6 +400,15 @@ export const multitabEditorLogic = kea([ router.actions.push(urls.insightView(insight.short_id)) }, + loadDataWarehouseSavedQueriesSuccess: ({ dataWarehouseSavedQueries }) => { + // keep tab views up to date + const newTabs = values.allTabs.map((tab) => ({ + ...tab, + view: dataWarehouseSavedQueries.find((v) => v.id === tab.view?.id), + })) + actions.setTabs(newTabs) + actions.updateState() + }, deleteDataWarehouseSavedQuerySuccess: ({ payload: viewId }) => { const tabToRemove = values.allTabs.find((tab) => tab.view?.id === viewId) if (tabToRemove) { @@ -412,7 +433,7 @@ export const multitabEditorLogic = kea([ lemonToast.success('View updated') }, })), - subscriptions(({ props, actions }) => ({ + subscriptions(({ props, actions, values }) => ({ activeModelUri: (activeModelUri) => { if (props.monaco) { const _model = props.monaco.editor.getModel(activeModelUri.uri) @@ -421,6 +442,11 @@ export const multitabEditorLogic = kea([ actions.runQuery(undefined, true) } }, + allTabs: () => { + // keep selected tab up to date + const activeTab = values.allTabs.find((tab) => tab.uri.path === values.activeModelUri?.uri.path) + activeTab && actions.selectTab(activeTab) + }, })), selectors({ exportContext: [ @@ -435,5 +461,11 @@ export const multitabEditorLogic = kea([ } as ExportContext }, ], + isEditingMaterializedView: [ + (s) => [s.editingView], + (editingView) => { + return !!editingView?.status + }, + ], }), ]) diff --git a/frontend/src/scenes/data-warehouse/editor/outputPaneLogic.ts b/frontend/src/scenes/data-warehouse/editor/outputPaneLogic.ts index 659c79b440635..4e06f611dc49d 100644 --- a/frontend/src/scenes/data-warehouse/editor/outputPaneLogic.ts +++ b/frontend/src/scenes/data-warehouse/editor/outputPaneLogic.ts @@ -5,6 +5,7 @@ import type { outputPaneLogicType } from './outputPaneLogicType' export enum OutputTab { Results = 'results', Visualization = 'visualization', + Info = 'info', } export const outputPaneLogic = kea([ diff --git a/frontend/src/scenes/data-warehouse/saved_queries/dataWarehouseViewsLogic.tsx b/frontend/src/scenes/data-warehouse/saved_queries/dataWarehouseViewsLogic.tsx index d66a0285526ba..ae61570189150 100644 --- a/frontend/src/scenes/data-warehouse/saved_queries/dataWarehouseViewsLogic.tsx +++ b/frontend/src/scenes/data-warehouse/saved_queries/dataWarehouseViewsLogic.tsx @@ -70,8 +70,13 @@ export const dataWarehouseViewsLogic = kea([ actions.loadDatabase() }, runDataWarehouseSavedQuery: async ({ viewId }) => { - await api.dataWarehouseSavedQueries.run(viewId) - actions.loadDataWarehouseSavedQueries() + try { + await api.dataWarehouseSavedQueries.run(viewId) + lemonToast.success('Materialization started') + actions.loadDataWarehouseSavedQueries() + } catch (error) { + lemonToast.error(`Failed to run materialization`) + } }, })), selectors({ @@ -92,6 +97,17 @@ export const dataWarehouseViewsLogic = kea([ ) }, ], + dataWarehouseSavedQueryMap: [ + (s) => [s.dataWarehouseSavedQueries], + (dataWarehouseSavedQueries) => { + return ( + dataWarehouseSavedQueries?.reduce((acc, cur) => { + acc[cur.name] = cur + return acc + }, {} as Record) ?? {} + ) + }, + ], }), events(({ actions, cache }) => ({ afterMount: () => { diff --git a/frontend/src/scenes/experiments/ExperimentView/DataCollectionCalculator.tsx b/frontend/src/scenes/experiments/ExperimentView/DataCollectionCalculator.tsx index 95938242c143d..e7797f03de7ba 100644 --- a/frontend/src/scenes/experiments/ExperimentView/DataCollectionCalculator.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/DataCollectionCalculator.tsx @@ -3,10 +3,10 @@ import { LemonBanner, LemonInput, Link, Tooltip } from '@posthog/lemon-ui' import { BindLogic, useActions, useValues } from 'kea' import { LemonSlider } from 'lib/lemon-ui/LemonSlider' import { humanFriendlyNumber } from 'lib/utils' -import { insightDataLogic } from 'scenes/insights/insightDataLogic' import { insightLogic } from 'scenes/insights/insightLogic' import { Query } from '~/queries/Query/Query' +import { ExperimentFunnelsQuery, ExperimentTrendsQuery, NodeKind } from '~/queries/schema' import { ExperimentIdType, InsightType } from '~/types' import { MetricInsightId } from '../constants' @@ -120,7 +120,16 @@ export function DataCollectionCalculator({ experimentId }: ExperimentCalculatorP syncWithUrl: false, }) const { insightProps } = useValues(insightLogicInstance) - const { query } = useValues(insightDataLogic(insightProps)) + let query = null + if (experiment.metrics.length > 0) { + query = { + kind: NodeKind.InsightVizNode, + source: + metricType === InsightType.FUNNELS + ? (experiment.metrics[0] as ExperimentFunnelsQuery).funnels_query + : (experiment.metrics[0] as ExperimentTrendsQuery).count_query, + } + } const funnelConversionRate = conversionMetrics?.totalRate * 100 || 0 diff --git a/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx b/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx index 52a189c4c324a..5474962ec738b 100644 --- a/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx +++ b/frontend/src/scenes/experiments/ExperimentView/SecondaryMetricsTable.tsx @@ -2,19 +2,13 @@ import { IconInfo, IconPencil, IconPlus } from '@posthog/icons' import { LemonButton, LemonTable, LemonTableColumns, Tooltip } from '@posthog/lemon-ui' import { useActions, useValues } from 'kea' import { EntityFilterInfo } from 'lib/components/EntityFilterInfo' -import { FEATURE_FLAGS } from 'lib/constants' import { IconAreaChart } from 'lib/lemon-ui/icons' import { capitalizeFirstLetter, humanFriendlyNumber } from 'lib/utils' import { useState } from 'react' import { Experiment, InsightType } from '~/types' -import { - experimentLogic, - getDefaultFilters, - getDefaultFunnelsMetric, - TabularSecondaryMetricResults, -} from '../experimentLogic' +import { experimentLogic, getDefaultFunnelsMetric, TabularSecondaryMetricResults } from '../experimentLogic' import { SecondaryMetricChartModal } from '../Metrics/SecondaryMetricChartModal' import { SecondaryMetricModal } from '../Metrics/SecondaryMetricModal' import { VariantTag } from './components' @@ -39,7 +33,6 @@ export function SecondaryMetricsTable({ experimentId }: { experimentId: Experime credibleIntervalForVariant, experimentMathAggregationForTrends, getHighestProbabilityVariant, - featureFlags, } = useValues(experimentLogic({ experimentId })) const { loadExperiment } = useActions(experimentLogic({ experimentId })) @@ -64,13 +57,7 @@ export function SecondaryMetricsTable({ experimentId }: { experimentId: Experime setModalMetricIdx(null) } - // :FLAG: CLEAN UP AFTER MIGRATION - let metrics - if (featureFlags[FEATURE_FLAGS.EXPERIMENTS_HOGQL]) { - metrics = experiment.metrics_secondary - } else { - metrics = experiment.secondary_metrics - } + const metrics = experiment.metrics_secondary const columns: LemonTableColumns = [ { @@ -339,7 +326,7 @@ const AddSecondaryMetricButton = ({ metrics: any openEditModal: (metricIdx: number) => void }): JSX.Element => { - const { experiment, featureFlags } = useValues(experimentLogic({ experimentId })) + const { experiment } = useValues(experimentLogic({ experimentId })) const { setExperiment } = useActions(experimentLogic({ experimentId })) return ( { - // :FLAG: CLEAN UP AFTER MIGRATION - if (featureFlags[FEATURE_FLAGS.EXPERIMENTS_HOGQL]) { - const newMetricsSecondary = [...experiment.metrics_secondary, getDefaultFunnelsMetric()] - setExperiment({ - metrics_secondary: newMetricsSecondary, - }) - openEditModal(newMetricsSecondary.length - 1) - } else { - const newSecondaryMetrics = [ - ...experiment.secondary_metrics, - { - name: '', - filters: getDefaultFilters(InsightType.FUNNELS, undefined), - }, - ] - setExperiment({ - secondary_metrics: newSecondaryMetrics, - }) - openEditModal(newSecondaryMetrics.length - 1) - } + const newMetricsSecondary = [...experiment.metrics_secondary, getDefaultFunnelsMetric()] + setExperiment({ + metrics_secondary: newMetricsSecondary, + }) + openEditModal(newMetricsSecondary.length - 1) }} disabledReason={ metrics.length >= MAX_SECONDARY_METRICS diff --git a/frontend/src/scenes/funnels/funnelDataLogic.ts b/frontend/src/scenes/funnels/funnelDataLogic.ts index 0a48c272e929b..54ed637e4d2bd 100644 --- a/frontend/src/scenes/funnels/funnelDataLogic.ts +++ b/frontend/src/scenes/funnels/funnelDataLogic.ts @@ -169,6 +169,7 @@ export const funnelDataLogic = kea([ if ( // TODO: Ideally we don't check filters anymore, but tests are still using this insightData?.filters?.insight !== InsightType.FUNNELS && + querySource && querySource?.kind !== NodeKind.FunnelsQuery ) { return [] @@ -275,6 +276,7 @@ export const funnelDataLogic = kea([ if ( // TODO: Ideally we don't check filters anymore, but tests are still using this insightData?.filters?.insight !== InsightType.FUNNELS && + querySource && querySource?.kind !== NodeKind.FunnelsQuery ) { return false diff --git a/frontend/src/scenes/max/Intro.tsx b/frontend/src/scenes/max/Intro.tsx index c43cd86b53d2a..97f4f9fbfdc56 100644 --- a/frontend/src/scenes/max/Intro.tsx +++ b/frontend/src/scenes/max/Intro.tsx @@ -3,6 +3,7 @@ import { LemonButton, Popover } from '@posthog/lemon-ui' import { useActions, useValues } from 'kea' import { HedgehogBuddy } from 'lib/components/HedgehogBuddy/HedgehogBuddy' import { hedgehogBuddyLogic } from 'lib/components/HedgehogBuddy/hedgehogBuddyLogic' +import { uuid } from 'lib/utils' import { useMemo, useState } from 'react' import { maxGlobalLogic } from './maxGlobalLogic' @@ -19,13 +20,13 @@ export function Intro(): JSX.Element { const { hedgehogConfig } = useValues(hedgehogBuddyLogic) const { acceptDataProcessing } = useActions(maxGlobalLogic) const { dataProcessingAccepted } = useValues(maxGlobalLogic) - const { sessionId } = useValues(maxLogic) + const { conversation } = useValues(maxLogic) const [hedgehogDirection, setHedgehogDirection] = useState<'left' | 'right'>('right') const headline = useMemo(() => { - return HEADLINES[parseInt(sessionId.split('-').at(-1) as string, 16) % HEADLINES.length] - }, []) + return HEADLINES[parseInt((conversation?.id || uuid()).split('-').at(-1) as string, 16) % HEADLINES.length] + }, [conversation?.id]) return ( <> diff --git a/frontend/src/scenes/max/Max.stories.tsx b/frontend/src/scenes/max/Max.stories.tsx index bec5a519de8e0..51dc03ab0cb5c 100644 --- a/frontend/src/scenes/max/Max.stories.tsx +++ b/frontend/src/scenes/max/Max.stories.tsx @@ -6,7 +6,13 @@ import { projectLogic } from 'scenes/projectLogic' import { mswDecorator, useStorybookMocks } from '~/mocks/browser' -import { chatResponseChunk, failureChunk, generationFailureChunk } from './__mocks__/chatResponse.mocks' +import { + chatResponseChunk, + CONVERSATION_ID, + failureChunk, + generationFailureChunk, + humanMessage, +} from './__mocks__/chatResponse.mocks' import { MaxInstance } from './Max' import { maxGlobalLogic } from './maxGlobalLogic' import { maxLogic } from './maxLogic' @@ -16,7 +22,7 @@ const meta: Meta = { decorators: [ mswDecorator({ post: { - '/api/environments/:team_id/query/chat/': (_, res, ctx) => res(ctx.text(chatResponseChunk)), + '/api/environments/:team_id/conversations/': (_, res, ctx) => res(ctx.text(chatResponseChunk)), }, }), ], @@ -28,10 +34,7 @@ const meta: Meta = { } export default meta -// The session ID is hard-coded here, as it's used for randomizing the welcome headline -const SESSION_ID = 'b1b4b3b4-1b3b-4b3b-1b3b4b3b4b3b' - -const Template = ({ sessionId: SESSION_ID }: { sessionId: string }): JSX.Element => { +const Template = ({ conversationId: CONVERSATION_ID }: { conversationId: string }): JSX.Element => { const { acceptDataProcessing } = useActions(maxGlobalLogic) useEffect(() => { @@ -40,7 +43,7 @@ const Template = ({ sessionId: SESSION_ID }: { sessionId: string }): JSX.Element return (
    - +
    @@ -69,7 +72,7 @@ export const Welcome: StoryFn = () => { acceptDataProcessing(false) }, []) - return