diff --git a/bin/copy-posthog-js b/bin/copy-posthog-js index f57e2841bb784..24bc26c761f1b 100755 --- a/bin/copy-posthog-js +++ b/bin/copy-posthog-js @@ -6,6 +6,7 @@ set -e cp node_modules/posthog-js/dist/array.js* frontend/dist/ cp node_modules/posthog-js/dist/array.full.js* frontend/dist/ +cp node_modules/posthog-js/dist/array.full.es5.js* frontend/dist/ cp node_modules/posthog-js/dist/recorder.js* frontend/dist/ cp node_modules/posthog-js/dist/recorder-v2.js* frontend/dist/ cp node_modules/posthog-js/dist/surveys.js* frontend/dist/ diff --git a/cypress/e2e/alerts.cy.ts b/cypress/e2e/alerts.cy.ts index b55f1e09b9494..bd6ca01bcb734 100644 --- a/cypress/e2e/alerts.cy.ts +++ b/cypress/e2e/alerts.cy.ts @@ -16,7 +16,8 @@ describe('Alerts', () => { const createAlert = ( name: string = 'Alert name', lowerThreshold: string = '100', - upperThreshold: string = '200' + upperThreshold: string = '200', + condition?: string ): void => { cy.get('[data-attr=more-button]').click() cy.contains('Manage alerts').click() @@ -24,6 +25,13 @@ describe('Alerts', () => { cy.get('[data-attr=alertForm-name]').clear().type(name) cy.get('[data-attr=subscribed-users').click().type('{downarrow}{enter}') + + if (condition) { + cy.get('[data-attr=alertForm-condition').click() + cy.contains(condition).click() + cy.contains('%').click() + } + cy.get('[data-attr=alertForm-lower-threshold').clear().type(lowerThreshold) cy.get('[data-attr=alertForm-upper-threshold').clear().type(upperThreshold) cy.contains('Create alert').click() @@ -39,7 +47,6 @@ describe('Alerts', () => { cy.get('[data-attr=insight-edit-button]').click() cy.get('[data-attr=chart-filter]').click() cy.contains(displayType).click() - cy.get('.insight-empty-state').should('not.exist') cy.get('[data-attr=insight-save-button]').contains('Save').click() cy.url().should('not.include', '/edit') } @@ -69,7 +76,7 @@ describe('Alerts', () => { }) it('Should warn about an alert deletion', () => { - setInsightDisplayTypeAndSave('Number') + setInsightDisplayTypeAndSave('Area chart') createAlert('Alert to be deleted because of a changed insight') @@ -90,4 +97,28 @@ describe('Alerts', () => { cy.contains('Manage alerts').click() cy.contains('Alert to be deleted because of a changed insight').should('not.exist') }) + + it('Should allow create and delete a relative alert', () => { + cy.get('[data-attr=more-button]').click() + // Alerts should be disabled for trends represented with graphs + cy.get('[data-attr=manage-alerts-button]').should('have.attr', 'aria-disabled', 'true') + + setInsightDisplayTypeAndSave('Bar chart') + + createAlert('Alert name', '10', '20', 'increases by') + cy.reload() + + // Check the alert has the same values as when it was created + cy.get('[data-attr=more-button]').click() + cy.contains('Manage alerts').click() + cy.get('[data-attr=alert-list-item]').contains('Alert name').click() + cy.get('[data-attr=alertForm-name]').should('have.value', 'Alert name') + cy.get('[data-attr=alertForm-lower-threshold').should('have.value', '10') + cy.get('[data-attr=alertForm-upper-threshold').should('have.value', '20') + cy.contains('Delete alert').click() + cy.wait(2000) + + cy.reload() + cy.contains('Alert name').should('not.exist') + }) }) diff --git a/cypress/e2e/notebooks-insights.ts b/cypress/e2e/notebooks-insights.ts new file mode 100644 index 0000000000000..0b007744576c6 --- /dev/null +++ b/cypress/e2e/notebooks-insights.ts @@ -0,0 +1,18 @@ +import { insight, savedInsights } from '../productAnalytics' + +describe('Notebooks', () => { + beforeEach(() => { + cy.clickNavMenu('notebooks') + cy.location('pathname').should('include', '/notebooks') + }) + ;['SQL', 'TRENDS', 'FUNNELS', 'RETENTION', 'PATHS', 'STICKINESS', 'LIFECYCLE'].forEach((insightType) => { + it(`Can add a ${insightType} insight`, () => { + savedInsights.createNewInsightOfType(insightType) + insight.editName(`${insightType} Insight`) + insight.save() + cy.get('[data-attr="notebooks-add-button"]').click() + cy.get('[data-attr="notebooks-select-button-create"]').click() + cy.get('.ErrorBoundary').should('not.exist') + }) + }) +}) diff --git a/ee/clickhouse/queries/experiments/funnel_experiment_result.py b/ee/clickhouse/queries/experiments/funnel_experiment_result.py index 20f9631a4dc4f..e311657cc52c7 100644 --- a/ee/clickhouse/queries/experiments/funnel_experiment_result.py +++ b/ee/clickhouse/queries/experiments/funnel_experiment_result.py @@ -8,7 +8,7 @@ from posthog.constants import ExperimentNoResultsErrorKeys from posthog.hogql_queries.experiments import CONTROL_VARIANT_KEY -from posthog.hogql_queries.experiments.funnel_statistics import ( +from posthog.hogql_queries.experiments.funnels_statistics import ( are_results_significant, calculate_credible_intervals, calculate_probabilities, diff --git a/ee/clickhouse/queries/experiments/test_funnel_experiment_result.py b/ee/clickhouse/queries/experiments/test_funnel_experiment_result.py index 374720c3141f1..55fca255ed9ca 100644 --- a/ee/clickhouse/queries/experiments/test_funnel_experiment_result.py +++ b/ee/clickhouse/queries/experiments/test_funnel_experiment_result.py @@ -4,13 +4,13 @@ from flaky import flaky -from posthog.hogql_queries.experiments.funnel_statistics import ( +from posthog.hogql_queries.experiments.funnels_statistics import ( are_results_significant, calculate_expected_loss, calculate_probabilities, calculate_credible_intervals as calculate_funnel_credible_intervals, ) -from posthog.schema import ExperimentSignificanceCode, ExperimentVariantFunnelResult +from posthog.schema import ExperimentSignificanceCode, ExperimentVariantFunnelsBaseStats Probability = float @@ -25,7 +25,7 @@ def logbeta(x: int, y: int) -> float: def calculate_probability_of_winning_for_target( - target_variant: ExperimentVariantFunnelResult, other_variants: list[ExperimentVariantFunnelResult] + target_variant: ExperimentVariantFunnelsBaseStats, other_variants: list[ExperimentVariantFunnelsBaseStats] ) -> Probability: """ Calculates the probability of winning for target variant. @@ -146,8 +146,8 @@ def probability_D_beats_A_B_and_C( @flaky(max_runs=10, min_passes=1) class TestFunnelExperimentCalculator(unittest.TestCase): def test_calculate_results(self): - variant_test = ExperimentVariantFunnelResult(key="A", success_count=100, failure_count=10) - variant_control = ExperimentVariantFunnelResult(key="B", success_count=100, failure_count=18) + variant_test = ExperimentVariantFunnelsBaseStats(key="A", success_count=100, failure_count=10) + variant_control = ExperimentVariantFunnelsBaseStats(key="B", success_count=100, failure_count=18) _, probability = calculate_probabilities(variant_control, [variant_test]) self.assertAlmostEqual(probability, 0.918, places=2) @@ -164,8 +164,8 @@ def test_calculate_results(self): self.assertAlmostEqual(credible_intervals[variant_test.key][1], 0.9494, places=3) def test_simulation_result_is_close_to_closed_form_solution(self): - variant_test = ExperimentVariantFunnelResult(key="A", success_count=100, failure_count=10) - variant_control = ExperimentVariantFunnelResult(key="B", success_count=100, failure_count=18) + variant_test = ExperimentVariantFunnelsBaseStats(key="A", success_count=100, failure_count=10) + variant_control = ExperimentVariantFunnelsBaseStats(key="B", success_count=100, failure_count=18) _, probability = calculate_probabilities(variant_control, [variant_test]) self.assertAlmostEqual(probability, 0.918, places=1) @@ -174,9 +174,9 @@ def test_simulation_result_is_close_to_closed_form_solution(self): self.assertAlmostEqual(probability, alternative_probability, places=1) def test_calculate_results_for_two_test_variants(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=100, failure_count=10) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=100, failure_count=3) - variant_control = ExperimentVariantFunnelResult(key="C", success_count=100, failure_count=18) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=100, failure_count=10) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=100, failure_count=3) + variant_control = ExperimentVariantFunnelsBaseStats(key="C", success_count=100, failure_count=18) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2]) self.assertAlmostEqual(sum(probabilities), 1) @@ -210,9 +210,9 @@ def test_calculate_results_for_two_test_variants(self): self.assertAlmostEqual(credible_intervals[variant_test_2.key][1], 0.9894, places=3) def test_calculate_results_for_two_test_variants_almost_equal(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=120, failure_count=60) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=110, failure_count=52) - variant_control = ExperimentVariantFunnelResult(key="C", success_count=130, failure_count=65) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=120, failure_count=60) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=110, failure_count=52) + variant_control = ExperimentVariantFunnelsBaseStats(key="C", success_count=130, failure_count=65) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2]) self.assertAlmostEqual(sum(probabilities), 1) @@ -245,8 +245,8 @@ def test_calculate_results_for_two_test_variants_almost_equal(self): self.assertAlmostEqual(credible_intervals[variant_test_2.key][1], 0.7460, places=3) def test_absolute_loss_less_than_one_percent_but_not_significant(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=286, failure_count=2014) - variant_control = ExperimentVariantFunnelResult(key="B", success_count=267, failure_count=2031) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=286, failure_count=2014) + variant_control = ExperimentVariantFunnelsBaseStats(key="B", success_count=267, failure_count=2031) probabilities = calculate_probabilities(variant_control, [variant_test_1]) self.assertAlmostEqual(sum(probabilities), 1) @@ -267,10 +267,10 @@ def test_absolute_loss_less_than_one_percent_but_not_significant(self): self.assertAlmostEqual(credible_intervals[variant_test_1.key][1], 0.1384, places=3) def test_calculate_results_for_three_test_variants(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=100, failure_count=10) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=100, failure_count=3) - variant_test_3 = ExperimentVariantFunnelResult(key="C", success_count=100, failure_count=30) - variant_control = ExperimentVariantFunnelResult(key="D", success_count=100, failure_count=18) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=100, failure_count=10) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=100, failure_count=3) + variant_test_3 = ExperimentVariantFunnelsBaseStats(key="C", success_count=100, failure_count=30) + variant_control = ExperimentVariantFunnelsBaseStats(key="D", success_count=100, failure_count=18) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2, variant_test_3]) self.assertAlmostEqual(sum(probabilities), 1) @@ -313,10 +313,10 @@ def test_calculate_results_for_three_test_variants(self): self.assertAlmostEqual(credible_intervals[variant_test_3.key][1], 0.8332, places=3) def test_calculate_results_for_three_test_variants_almost_equal(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=120, failure_count=60) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=110, failure_count=52) - variant_test_3 = ExperimentVariantFunnelResult(key="C", success_count=100, failure_count=46) - variant_control = ExperimentVariantFunnelResult(key="D", success_count=130, failure_count=65) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=120, failure_count=60) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=110, failure_count=52) + variant_test_3 = ExperimentVariantFunnelsBaseStats(key="C", success_count=100, failure_count=46) + variant_control = ExperimentVariantFunnelsBaseStats(key="D", success_count=130, failure_count=65) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2, variant_test_3]) self.assertAlmostEqual(sum(probabilities), 1) @@ -357,10 +357,10 @@ def test_calculate_results_for_three_test_variants_almost_equal(self): self.assertAlmostEqual(credible_intervals[variant_test_3.key][1], 0.7547, places=3) def test_calculate_results_for_three_test_variants_much_better_than_control(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=130, failure_count=60) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=135, failure_count=62) - variant_test_3 = ExperimentVariantFunnelResult(key="C", success_count=132, failure_count=60) - variant_control = ExperimentVariantFunnelResult(key="D", success_count=80, failure_count=65) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=130, failure_count=60) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=135, failure_count=62) + variant_test_3 = ExperimentVariantFunnelsBaseStats(key="C", success_count=132, failure_count=60) + variant_control = ExperimentVariantFunnelsBaseStats(key="D", success_count=80, failure_count=65) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2, variant_test_3]) self.assertAlmostEqual(sum(probabilities), 1) @@ -392,14 +392,14 @@ def test_calculate_results_for_three_test_variants_much_better_than_control(self self.assertAlmostEqual(credible_intervals[variant_test_3.key][1], 0.7488, places=3) def test_calculate_results_for_seven_test_variants(self): - variant_test_1 = ExperimentVariantFunnelResult(key="A", success_count=100, failure_count=17) - variant_test_2 = ExperimentVariantFunnelResult(key="B", success_count=100, failure_count=16) - variant_test_3 = ExperimentVariantFunnelResult(key="C", success_count=100, failure_count=30) - variant_test_4 = ExperimentVariantFunnelResult(key="D", success_count=100, failure_count=31) - variant_test_5 = ExperimentVariantFunnelResult(key="E", success_count=100, failure_count=29) - variant_test_6 = ExperimentVariantFunnelResult(key="F", success_count=100, failure_count=32) - variant_test_7 = ExperimentVariantFunnelResult(key="G", success_count=100, failure_count=33) - variant_control = ExperimentVariantFunnelResult(key="H", success_count=100, failure_count=18) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="A", success_count=100, failure_count=17) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="B", success_count=100, failure_count=16) + variant_test_3 = ExperimentVariantFunnelsBaseStats(key="C", success_count=100, failure_count=30) + variant_test_4 = ExperimentVariantFunnelsBaseStats(key="D", success_count=100, failure_count=31) + variant_test_5 = ExperimentVariantFunnelsBaseStats(key="E", success_count=100, failure_count=29) + variant_test_6 = ExperimentVariantFunnelsBaseStats(key="F", success_count=100, failure_count=32) + variant_test_7 = ExperimentVariantFunnelsBaseStats(key="G", success_count=100, failure_count=33) + variant_control = ExperimentVariantFunnelsBaseStats(key="H", success_count=100, failure_count=18) probabilities = calculate_probabilities( variant_control, @@ -487,8 +487,8 @@ def test_calculate_results_for_seven_test_variants(self): self.assertAlmostEqual(credible_intervals[variant_test_7.key][1], 0.8174, places=3) def test_calculate_results_control_is_significant(self): - variant_test = ExperimentVariantFunnelResult(key="test", success_count=100, failure_count=18) - variant_control = ExperimentVariantFunnelResult(key="control", success_count=100, failure_count=10) + variant_test = ExperimentVariantFunnelsBaseStats(key="test", success_count=100, failure_count=18) + variant_control = ExperimentVariantFunnelsBaseStats(key="control", success_count=100, failure_count=10) probabilities = calculate_probabilities(variant_control, [variant_test]) @@ -507,13 +507,13 @@ def test_calculate_results_control_is_significant(self): self.assertAlmostEqual(credible_intervals[variant_test.key][1], 0.9010, places=3) def test_calculate_results_many_variants_control_is_significant(self): - variant_test_1 = ExperimentVariantFunnelResult(key="test_1", success_count=100, failure_count=20) - variant_test_2 = ExperimentVariantFunnelResult(key="test_2", success_count=100, failure_count=21) - variant_test_3 = ExperimentVariantFunnelResult(key="test_3", success_count=100, failure_count=22) - variant_test_4 = ExperimentVariantFunnelResult(key="test_4", success_count=100, failure_count=23) - variant_test_5 = ExperimentVariantFunnelResult(key="test_5", success_count=100, failure_count=24) - variant_test_6 = ExperimentVariantFunnelResult(key="test_6", success_count=100, failure_count=25) - variant_control = ExperimentVariantFunnelResult(key="control", success_count=100, failure_count=10) + variant_test_1 = ExperimentVariantFunnelsBaseStats(key="test_1", success_count=100, failure_count=20) + variant_test_2 = ExperimentVariantFunnelsBaseStats(key="test_2", success_count=100, failure_count=21) + variant_test_3 = ExperimentVariantFunnelsBaseStats(key="test_3", success_count=100, failure_count=22) + variant_test_4 = ExperimentVariantFunnelsBaseStats(key="test_4", success_count=100, failure_count=23) + variant_test_5 = ExperimentVariantFunnelsBaseStats(key="test_5", success_count=100, failure_count=24) + variant_test_6 = ExperimentVariantFunnelsBaseStats(key="test_6", success_count=100, failure_count=25) + variant_control = ExperimentVariantFunnelsBaseStats(key="control", success_count=100, failure_count=10) variants_test = [ variant_test_1, diff --git a/ee/clickhouse/queries/experiments/test_trend_experiment_result.py b/ee/clickhouse/queries/experiments/test_trend_experiment_result.py index 4799e3026d624..de983e6f1496c 100644 --- a/ee/clickhouse/queries/experiments/test_trend_experiment_result.py +++ b/ee/clickhouse/queries/experiments/test_trend_experiment_result.py @@ -4,13 +4,13 @@ from flaky import flaky -from posthog.hogql_queries.experiments.trend_statistics import ( +from posthog.hogql_queries.experiments.trends_statistics import ( are_results_significant, calculate_credible_intervals, calculate_p_value, calculate_probabilities, ) -from posthog.schema import ExperimentSignificanceCode, ExperimentVariantTrendBaseStats +from posthog.schema import ExperimentSignificanceCode, ExperimentVariantTrendsBaseStats Probability = float @@ -23,7 +23,7 @@ def logbeta(x: float, y: float) -> float: # Helper function to calculate probability using a different method than the one used in actual code # calculation: https://www.evanmiller.org/bayesian-ab-testing.html#count_ab def calculate_probability_of_winning_for_target_count_data( - target_variant: ExperimentVariantTrendBaseStats, other_variants: list[ExperimentVariantTrendBaseStats] + target_variant: ExperimentVariantTrendsBaseStats, other_variants: list[ExperimentVariantTrendsBaseStats] ) -> Probability: """ Calculates the probability of winning for target variant. @@ -97,8 +97,8 @@ def probability_C_beats_A_and_B_count_data( @flaky(max_runs=10, min_passes=1) class TestTrendExperimentCalculator(unittest.TestCase): def test_calculate_results(self): - variant_control = ExperimentVariantTrendBaseStats(key="A", count=20, exposure=1, absolute_exposure=200) - variant_test = ExperimentVariantTrendBaseStats(key="B", count=30, exposure=1, absolute_exposure=200) + variant_control = ExperimentVariantTrendsBaseStats(key="A", count=20, exposure=1, absolute_exposure=200) + variant_test = ExperimentVariantTrendsBaseStats(key="B", count=30, exposure=1, absolute_exposure=200) probabilities = calculate_probabilities(variant_control, [variant_test]) self.assertAlmostEqual(probabilities[1], 0.92, places=1) @@ -117,8 +117,8 @@ def test_calculate_results(self): self.assertAlmostEqual(credible_intervals[variant_test.key][1], 0.2141, places=3) def test_calculate_results_small_numbers(self): - variant_control = ExperimentVariantTrendBaseStats(key="A", count=2, exposure=1, absolute_exposure=200) - variant_test = ExperimentVariantTrendBaseStats(key="B", count=1, exposure=1, absolute_exposure=200) + variant_control = ExperimentVariantTrendsBaseStats(key="A", count=2, exposure=1, absolute_exposure=200) + variant_test = ExperimentVariantTrendsBaseStats(key="B", count=1, exposure=1, absolute_exposure=200) probabilities = calculate_probabilities(variant_control, [variant_test]) self.assertAlmostEqual(probabilities[1], 0.31, places=1) @@ -145,9 +145,9 @@ def test_calculate_count_data_probability(self): self.assertAlmostEqual(probability, probability2) def test_calculate_results_with_three_variants(self): - variant_control = ExperimentVariantTrendBaseStats(key="A", count=20, exposure=1, absolute_exposure=200) - variant_test_1 = ExperimentVariantTrendBaseStats(key="B", count=26, exposure=1, absolute_exposure=200) - variant_test_2 = ExperimentVariantTrendBaseStats(key="C", count=19, exposure=1, absolute_exposure=200) + variant_control = ExperimentVariantTrendsBaseStats(key="A", count=20, exposure=1, absolute_exposure=200) + variant_test_1 = ExperimentVariantTrendsBaseStats(key="B", count=26, exposure=1, absolute_exposure=200) + variant_test_2 = ExperimentVariantTrendsBaseStats(key="C", count=19, exposure=1, absolute_exposure=200) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2]) self.assertAlmostEqual(probabilities[0], 0.16, places=1) @@ -171,9 +171,9 @@ def test_calculate_results_with_three_variants(self): self.assertAlmostEqual(credible_intervals[variant_test_2.key][1], 0.1484, places=3) def test_calculate_significance_when_target_variants_underperform(self): - variant_control = ExperimentVariantTrendBaseStats(key="A", count=250, exposure=1, absolute_exposure=200) - variant_test_1 = ExperimentVariantTrendBaseStats(key="B", count=180, exposure=1, absolute_exposure=200) - variant_test_2 = ExperimentVariantTrendBaseStats(key="C", count=50, exposure=1, absolute_exposure=200) + variant_control = ExperimentVariantTrendsBaseStats(key="A", count=250, exposure=1, absolute_exposure=200) + variant_test_1 = ExperimentVariantTrendsBaseStats(key="B", count=180, exposure=1, absolute_exposure=200) + variant_test_2 = ExperimentVariantTrendsBaseStats(key="C", count=50, exposure=1, absolute_exposure=200) # in this case, should choose B as best test variant p_value = calculate_p_value(variant_control, [variant_test_1, variant_test_2]) @@ -187,7 +187,7 @@ def test_calculate_significance_when_target_variants_underperform(self): self.assertEqual(significant, ExperimentSignificanceCode.LOW_WIN_PROBABILITY) # new B variant is worse, such that control probability ought to be high enough - variant_test_1 = ExperimentVariantTrendBaseStats(key="B", count=100, exposure=1, absolute_exposure=200) + variant_test_1 = ExperimentVariantTrendsBaseStats(key="B", count=100, exposure=1, absolute_exposure=200) significant, p_value = are_results_significant( variant_control, [variant_test_1, variant_test_2], [0.95, 0.03, 0.02] @@ -204,9 +204,9 @@ def test_calculate_significance_when_target_variants_underperform(self): self.assertAlmostEqual(credible_intervals[variant_test_2.key][1], 0.3295, places=3) def test_results_with_different_exposures(self): - variant_control = ExperimentVariantTrendBaseStats(key="A", count=50, exposure=1.3, absolute_exposure=260) - variant_test_1 = ExperimentVariantTrendBaseStats(key="B", count=30, exposure=1.8, absolute_exposure=360) - variant_test_2 = ExperimentVariantTrendBaseStats(key="C", count=20, exposure=0.7, absolute_exposure=140) + variant_control = ExperimentVariantTrendsBaseStats(key="A", count=50, exposure=1.3, absolute_exposure=260) + variant_test_1 = ExperimentVariantTrendsBaseStats(key="B", count=30, exposure=1.8, absolute_exposure=360) + variant_test_2 = ExperimentVariantTrendsBaseStats(key="C", count=20, exposure=0.7, absolute_exposure=140) probabilities = calculate_probabilities(variant_control, [variant_test_1, variant_test_2]) # a is control self.assertAlmostEqual(probabilities[0], 0.86, places=1) diff --git a/ee/clickhouse/queries/experiments/trend_experiment_result.py b/ee/clickhouse/queries/experiments/trend_experiment_result.py index 89aae12d05895..0971120f2366a 100644 --- a/ee/clickhouse/queries/experiments/trend_experiment_result.py +++ b/ee/clickhouse/queries/experiments/trend_experiment_result.py @@ -17,7 +17,7 @@ UNIQUE_USERS, ExperimentNoResultsErrorKeys, ) -from posthog.hogql_queries.experiments.trend_statistics import ( +from posthog.hogql_queries.experiments.trends_statistics import ( are_results_significant, calculate_credible_intervals, calculate_probabilities, diff --git a/ee/clickhouse/views/groups.py b/ee/clickhouse/views/groups.py index bfbb375e70990..4970a770854a2 100644 --- a/ee/clickhouse/views/groups.py +++ b/ee/clickhouse/views/groups.py @@ -177,38 +177,32 @@ def property_definitions(self, request: request.Request, **kw): return response.Response(group_type_index_to_properties) - @extend_schema( - parameters=[ - OpenApiParameter( - "group_type_index", - OpenApiTypes.INT, - description="Specify the group type to find property values of", - required=True, - ), - OpenApiParameter( - "key", - OpenApiTypes.STR, - description="Specify the property key to find values for", - required=True, - ), - ] - ) @action(methods=["GET"], detail=False) def property_values(self, request: request.Request, **kw): - rows = sync_execute( - f""" - SELECT {trim_quotes_expr("tupleElement(keysAndValues, 2)")} as value + value_filter = request.GET.get("value") + + query = f""" + SELECT {trim_quotes_expr("tupleElement(keysAndValues, 2)")} as value, count(*) as count FROM groups ARRAY JOIN JSONExtractKeysAndValuesRaw(group_properties) as keysAndValues - WHERE team_id = %(team_id)s AND group_type_index = %(group_type_index)s AND tupleElement(keysAndValues, 1) = %(key)s - GROUP BY tupleElement(keysAndValues, 2) - ORDER BY value ASC - """, - { - "team_id": self.team.pk, - "group_type_index": request.GET["group_type_index"], - "key": request.GET["key"], - }, - ) + WHERE team_id = %(team_id)s + AND group_type_index = %(group_type_index)s + AND tupleElement(keysAndValues, 1) = %(key)s + {f"AND {trim_quotes_expr('tupleElement(keysAndValues, 2)')} ILIKE %(value_filter)s" if value_filter else ""} + GROUP BY value + ORDER BY count DESC, value ASC + LIMIT 20 + """ + + params = { + "team_id": self.team.pk, + "group_type_index": request.GET["group_type_index"], + "key": request.GET["key"], + } + + if value_filter: + params["value_filter"] = f"%{value_filter}%" + + rows = sync_execute(query, params) - return response.Response([{"name": name[0]} for name in rows]) + return response.Response([{"name": name, "count": count} for name, count in rows]) diff --git a/ee/clickhouse/views/test/test_clickhouse_groups.py b/ee/clickhouse/views/test/test_clickhouse_groups.py index 10e064095c421..22e0d6e21b5ae 100644 --- a/ee/clickhouse/views/test/test_clickhouse_groups.py +++ b/ee/clickhouse/views/test/test_clickhouse_groups.py @@ -309,17 +309,71 @@ def test_property_values(self): group_key="org:6", properties={"industry": "technology"}, ) + create_group( + team_id=self.team.pk, + group_type_index=0, + group_key="org:7", + properties={"industry": "finance-technology"}, + ) create_group( team_id=self.team.pk, group_type_index=1, group_key="org:1", properties={"industry": "finance"}, ) + + # Test without query parameter response_data = self.client.get( f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=0" ).json() + self.assertEqual(len(response_data), 3) + self.assertEqual( + response_data, + [ + {"name": "finance", "count": 1}, + {"name": "finance-technology", "count": 1}, + {"name": "technology", "count": 1}, + ], + ) + + # Test with query parameter + response_data = self.client.get( + f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=0&value=fin" + ).json() + self.assertEqual(len(response_data), 2) + self.assertEqual(response_data, [{"name": "finance", "count": 1}, {"name": "finance-technology", "count": 1}]) + + # Test with query parameter - case insensitive + response_data = self.client.get( + f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=0&value=TECH" + ).json() self.assertEqual(len(response_data), 2) - self.assertEqual(response_data, [{"name": "finance"}, {"name": "technology"}]) + self.assertEqual( + response_data, [{"name": "finance-technology", "count": 1}, {"name": "technology", "count": 1}] + ) + + # Test with query parameter - no matches + response_data = self.client.get( + f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=0&value=healthcare" + ).json() + self.assertEqual(len(response_data), 0) + self.assertEqual(response_data, []) + + # Test with query parameter - exact match + response_data = self.client.get( + f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=0&value=technology" + ).json() + self.assertEqual(len(response_data), 2) + self.assertEqual( + response_data, [{"name": "finance-technology", "count": 1}, {"name": "technology", "count": 1}] + ) + + # Test with different group_type_index + response_data = self.client.get( + f"/api/projects/{self.team.id}/groups/property_values/?key=industry&group_type_index=1&value=fin" + ).json() + self.assertEqual(len(response_data), 1) + self.assertEqual(response_data, [{"name": "finance", "count": 1}]) def test_empty_property_values(self): create_group( diff --git a/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--dark.png b/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--dark.png index 0d5d3ebcc36d0..98d4a882ab5d5 100644 Binary files a/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--dark.png and b/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--light.png b/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--light.png index 7f58cbcc0104b..5336f30852c7d 100644 Binary files a/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--light.png and b/frontend/__snapshots__/scenes-app-experiments--experiment-not-found--light.png differ diff --git a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png index 74e8409c16b8b..78ea79ea3f745 100644 Binary files a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png and b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-breakdown-edit--light.png differ diff --git a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-edit--dark.png b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-edit--dark.png index e86df67429be0..ebceafa1ecce5 100644 Binary files a/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-edit--dark.png and b/frontend/__snapshots__/scenes-app-insights--funnel-top-to-bottom-edit--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--dark.png b/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--dark.png index 8ef8f708a388a..b8b549ad31a31 100644 Binary files a/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--dark.png and b/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--dark.png differ diff --git a/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--light.png b/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--light.png index 3110451381835..7df6f432cbc78 100644 Binary files a/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--light.png and b/frontend/__snapshots__/scenes-app-pipeline--pipeline-node-new-hog-function--light.png differ diff --git a/frontend/__snapshots__/scenes-app-sidepanels--side-panel-docs--light.png b/frontend/__snapshots__/scenes-app-sidepanels--side-panel-docs--light.png index a4ecd58d0b242..ca494887f274e 100644 Binary files a/frontend/__snapshots__/scenes-app-sidepanels--side-panel-docs--light.png and b/frontend/__snapshots__/scenes-app-sidepanels--side-panel-docs--light.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png index 2a139e2120b78..e97345976d3d7 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png and b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--dark.png differ diff --git a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png index 8de50aeef6858..a61ad424b9c7f 100644 Binary files a/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png and b/frontend/__snapshots__/scenes-other-settings--settings-project-with-replay-features--light.png differ diff --git a/frontend/public/services/airtable.png b/frontend/public/services/airtable.png new file mode 100644 index 0000000000000..4d496006b1cc2 Binary files /dev/null and b/frontend/public/services/airtable.png differ diff --git a/frontend/src/layout.ejs b/frontend/src/layout.ejs index fe1f77f7731cb..b9649a9934ad4 100644 --- a/frontend/src/layout.ejs +++ b/frontend/src/layout.ejs @@ -7,7 +7,7 @@ {% include "head.html" %} <%= htmlWebpackPlugin.tags.headTags %><%/* This adds the main.css file! */%> diff --git a/frontend/src/layout.html b/frontend/src/layout.html index 688b942f5b217..a07ce3034f1a8 100644 --- a/frontend/src/layout.html +++ b/frontend/src/layout.html @@ -7,7 +7,7 @@ {% include "head.html" %} route.includes(redirectRoute)) + + if (shouldRedirectToHome) { + return urls.project(team.id) // Go to project home + } + return urls.project(team.id, route) }, [location.pathname]) diff --git a/frontend/src/lib/api.ts b/frontend/src/lib/api.ts index a8e6024171bee..5796e0e2e56df 100644 --- a/frontend/src/lib/api.ts +++ b/frontend/src/lib/api.ts @@ -16,6 +16,7 @@ import { DatabaseSerializedFieldType, ErrorTrackingGroup, HogCompileResponse, + HogQLVariable, QuerySchema, QueryStatusResponse, RecordingsQuery, @@ -962,7 +963,8 @@ const api = { shortId: InsightModel['short_id'], basic?: boolean, refresh?: RefreshType, - filtersOverride?: DashboardFilter | null + filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null ): Promise>> { return new ApiRequest() .insights() @@ -972,6 +974,7 @@ const api = { basic, refresh, filters_override: filtersOverride, + variables_override: variablesOverride, }) ) .get() @@ -2429,7 +2432,8 @@ const api = { queryId?: string, refresh?: boolean, async?: boolean, - filtersOverride?: DashboardFilter | null + filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null ): Promise< T extends { [response: string]: any } ? T['response'] extends infer P | undefined @@ -2440,7 +2444,13 @@ const api = { const refreshParam: RefreshType | undefined = refresh && async ? 'force_async' : async ? 'async' : refresh return await new ApiRequest().query().create({ ...options, - data: { query, client_query_id: queryId, refresh: refreshParam, filters_override: filtersOverride }, + data: { + query, + client_query_id: queryId, + refresh: refreshParam, + filters_override: filtersOverride, + variables_override: variablesOverride, + }, }) }, diff --git a/frontend/src/lib/components/Alerts/SnoozeButton.tsx b/frontend/src/lib/components/Alerts/SnoozeButton.tsx new file mode 100644 index 0000000000000..28516638f209c --- /dev/null +++ b/frontend/src/lib/components/Alerts/SnoozeButton.tsx @@ -0,0 +1,43 @@ +import { dayjs } from 'lib/dayjs' +import { formatDate } from 'lib/utils' + +import { DateFilter } from '../DateFilter/DateFilter' + +const DATETIME_FORMAT = 'MMM D - HH:mm' + +interface SnoozeButtonProps { + onChange: (snoonzeUntil: string) => void + value?: string +} + +export function SnoozeButton({ onChange, value }: SnoozeButtonProps): JSX.Element { + return ( + { + snoozeUntil && onChange(snoozeUntil) + }} + placeholder="Snooze until" + max={31} + isFixedDateMode + showRollingRangePicker={false} + allowedRollingDateOptions={['days', 'weeks', 'months', 'years']} + showCustom + dateOptions={[ + { + key: 'Tomorrow', + values: ['+1d'], + getFormattedDate: (date: dayjs.Dayjs): string => formatDate(date.add(1, 'd'), DATETIME_FORMAT), + defaultInterval: 'day', + }, + { + key: 'One week from now', + values: ['+1w'], + getFormattedDate: (date: dayjs.Dayjs): string => formatDate(date.add(1, 'w'), DATETIME_FORMAT), + defaultInterval: 'day', + }, + ]} + size="medium" + /> + ) +} diff --git a/frontend/src/lib/components/Alerts/alertFormLogic.ts b/frontend/src/lib/components/Alerts/alertFormLogic.ts index 4230dc9238d01..3c0ab234a8ae1 100644 --- a/frontend/src/lib/components/Alerts/alertFormLogic.ts +++ b/frontend/src/lib/components/Alerts/alertFormLogic.ts @@ -3,7 +3,7 @@ import { forms } from 'kea-forms' import api from 'lib/api' import { lemonToast } from 'lib/lemon-ui/LemonToast/LemonToast' -import { AlertCalculationInterval } from '~/queries/schema' +import { AlertCalculationInterval, AlertConditionType, InsightThresholdType } from '~/queries/schema' import { QueryBasedInsightModel } from '~/types' import type { alertFormLogicType } from './alertFormLogicType' @@ -11,7 +11,7 @@ import { AlertType, AlertTypeWrite } from './types' export type AlertFormType = Pick< AlertType, - 'name' | 'enabled' | 'created_at' | 'threshold' | 'subscribed_users' | 'checks' | 'config' + 'name' | 'enabled' | 'created_at' | 'threshold' | 'condition' | 'subscribed_users' | 'checks' | 'config' > & { id?: AlertType['id'] created_by?: AlertType['created_by'] | null @@ -31,6 +31,8 @@ export const alertFormLogic = kea([ actions({ deleteAlert: true, + snoozeAlert: (snoozeUntil: string) => ({ snoozeUntil }), + clearSnooze: true, }), forms(({ props }) => ({ @@ -47,10 +49,9 @@ export const alertFormLogic = kea([ type: 'TrendsAlertConfig', series_index: 0, }, - threshold: { - configuration: { - absoluteThreshold: {}, - }, + threshold: { configuration: { type: InsightThresholdType.ABSOLUTE, bounds: {} } }, + condition: { + type: AlertConditionType.ABSOLUTE_VALUE, }, subscribed_users: [], checks: [], @@ -61,12 +62,17 @@ export const alertFormLogic = kea([ name: !name ? 'You need to give your alert a name' : undefined, }), submit: async (alert) => { - const payload: Partial = { + const payload: AlertTypeWrite = { ...alert, subscribed_users: alert.subscribed_users?.map(({ id }) => id), insight: props.insightId, } + // absolute value alert can only have absolute threshold + if (payload.condition.type === AlertConditionType.ABSOLUTE_VALUE) { + payload.threshold.configuration.type = InsightThresholdType.ABSOLUTE + } + try { if (alert.id === undefined) { const updatedAlert: AlertType = await api.alerts.create(payload) @@ -101,5 +107,21 @@ export const alertFormLogic = kea([ await api.alerts.delete(values.alertForm.id) props.onEditSuccess() }, + snoozeAlert: async ({ snoozeUntil }) => { + // resolution only allowed on created alert (which will have alertId) + if (!values.alertForm.id) { + throw new Error("Cannot resolve alert that doesn't exist") + } + await api.alerts.update(values.alertForm.id, { snoozed_until: snoozeUntil }) + props.onEditSuccess() + }, + clearSnooze: async () => { + // resolution only allowed on created alert (which will have alertId) + if (!values.alertForm.id) { + throw new Error("Cannot resolve alert that doesn't exist") + } + await api.alerts.update(values.alertForm.id, { snoozed_until: null }) + props.onEditSuccess() + }, })), ]) diff --git a/frontend/src/lib/components/Alerts/insightAlertsLogic.ts b/frontend/src/lib/components/Alerts/insightAlertsLogic.ts index dd6a09a29d08c..6bca4dc317fa1 100644 --- a/frontend/src/lib/components/Alerts/insightAlertsLogic.ts +++ b/frontend/src/lib/components/Alerts/insightAlertsLogic.ts @@ -3,7 +3,7 @@ import { loaders } from 'kea-loaders' import api from 'lib/api' import { insightVizDataLogic } from 'scenes/insights/insightVizDataLogic' -import { GoalLine } from '~/queries/schema' +import { GoalLine, InsightThresholdType } from '~/queries/schema' import { getBreakdown, isInsightVizNode, isTrendsQuery } from '~/queries/utils' import { InsightLogicProps } from '~/types' @@ -65,21 +65,27 @@ export const insightAlertsLogic = kea([ (s) => [s.alerts], (alerts: AlertType[]): GoalLine[] => alerts.flatMap((alert) => { - const thresholds = [] + if ( + alert.threshold.configuration.type !== InsightThresholdType.ABSOLUTE || + !alert.threshold.configuration.bounds + ) { + return [] + } - const absoluteThreshold = alert.threshold.configuration.absoluteThreshold + const bounds = alert.threshold.configuration.bounds - if (absoluteThreshold?.upper !== undefined) { + const thresholds = [] + if (bounds?.upper !== undefined) { thresholds.push({ label: `${alert.name} Upper Threshold`, - value: absoluteThreshold?.upper, + value: bounds?.upper, }) } - if (absoluteThreshold?.lower !== undefined) { + if (bounds?.lower !== undefined) { thresholds.push({ label: `${alert.name} Lower Threshold`, - value: absoluteThreshold?.lower, + value: bounds?.lower, }) } diff --git a/frontend/src/lib/components/Alerts/types.ts b/frontend/src/lib/components/Alerts/types.ts index 864c2a2321909..4641d7fe0728f 100644 --- a/frontend/src/lib/components/Alerts/types.ts +++ b/frontend/src/lib/components/Alerts/types.ts @@ -12,6 +12,7 @@ export type AlertConfig = TrendsAlertConfig export interface AlertTypeBase { name: string condition: AlertCondition + threshold: { configuration: InsightThreshold } enabled: boolean insight: QueryBasedInsightModel config: AlertConfig @@ -20,6 +21,7 @@ export interface AlertTypeBase { export interface AlertTypeWrite extends Omit { subscribed_users: number[] insight: number + snoozed_until?: string | null } export interface AlertCheck { @@ -33,7 +35,7 @@ export interface AlertCheck { export interface AlertType extends AlertTypeBase { id: string subscribed_users: UserBasicType[] - threshold: { configuration: InsightThreshold } + condition: AlertCondition created_by: UserBasicType created_at: string state: AlertState @@ -41,4 +43,5 @@ export interface AlertType extends AlertTypeBase { last_checked_at: string checks: AlertCheck[] calculation_interval: AlertCalculationInterval + snoozed_until?: string } diff --git a/frontend/src/lib/components/Alerts/views/EditAlertModal.tsx b/frontend/src/lib/components/Alerts/views/EditAlertModal.tsx index 9a0c568bda465..b3c63ea6973e6 100644 --- a/frontend/src/lib/components/Alerts/views/EditAlertModal.tsx +++ b/frontend/src/lib/components/Alerts/views/EditAlertModal.tsx @@ -1,22 +1,24 @@ -import { LemonBanner, LemonCheckbox, LemonInput, LemonSelect, SpinnerOverlay } from '@posthog/lemon-ui' +import { LemonCheckbox, LemonInput, LemonSegmentedButton, LemonSelect, SpinnerOverlay } from '@posthog/lemon-ui' import { useActions, useValues } from 'kea' import { Form, Group } from 'kea-forms' import { AlertStateIndicator } from 'lib/components/Alerts/views/ManageAlertsModal' import { MemberSelectMultiple } from 'lib/components/MemberSelectMultiple' import { TZLabel } from 'lib/components/TZLabel' import { UserActivityIndicator } from 'lib/components/UserActivityIndicator/UserActivityIndicator' +import { dayjs } from 'lib/dayjs' import { IconChevronLeft } from 'lib/lemon-ui/icons' import { LemonButton } from 'lib/lemon-ui/LemonButton' import { LemonField } from 'lib/lemon-ui/LemonField' import { LemonModal } from 'lib/lemon-ui/LemonModal' -import { alphabet } from 'lib/utils' +import { alphabet, formatDate } from 'lib/utils' import { trendsDataLogic } from 'scenes/trends/trendsDataLogic' -import { AlertCalculationInterval } from '~/queries/schema' +import { AlertCalculationInterval, AlertConditionType, AlertState, InsightThresholdType } from '~/queries/schema' import { InsightShortId, QueryBasedInsightModel } from '~/types' import { alertFormLogic } from '../alertFormLogic' import { alertLogic } from '../alertLogic' +import { SnoozeButton } from '../SnoozeButton' import { AlertType } from '../types' export function AlertStateTable({ alert }: { alert: AlertType }): JSX.Element | null { @@ -27,7 +29,8 @@ export function AlertStateTable({ alert }: { alert: AlertType }): JSX.Element | return (

- Current status {alert.state} + Current status - {alert.state} + {alert.snoozed_until && ` until ${formatDate(dayjs(alert?.snoozed_until), 'MMM D, HH:mm')}`}{' '}

@@ -78,11 +81,11 @@ export function EditAlertModal({ const formLogicProps = { alert, insightId, onEditSuccess } const formLogic = alertFormLogic(formLogicProps) const { alertForm, isAlertFormSubmitting, alertFormChanged } = useValues(formLogic) - const { deleteAlert } = useActions(formLogic) + const { deleteAlert, snoozeAlert, clearSnooze } = useActions(formLogic) const { setAlertFormValue } = useActions(formLogic) const trendsLogic = trendsDataLogic({ dashboardItemId: insightShortId }) - const { alertSeries, breakdownFilter } = useValues(trendsLogic) + const { alertSeries, isNonTimeSeriesDisplay } = useValues(trendsLogic) const creatingNewAlert = alertForm.id === undefined @@ -107,112 +110,220 @@ export function EditAlertModal({ -
- {alert?.created_by ? ( - - ) : null} - - - - - - - - - - {breakdownFilter && ( - - - Alerts on insights with breakdowns alert when any of the breakdown values - breaches the threshold - - - )} - - - - ({ - label: `${alphabet[index]} - ${event}`, - value: index, - }))} - /> - - - - - ['hourly', 'daily'].includes(interval)) - .map((interval) => ({ - label: interval, - value: interval, - }))} - /> - - - - - - +
+
+
+ + - - + - - +
+ {alert?.created_by ? ( + + ) : null} +
- u.id) ?? []} - idKey="id" - onChange={(value) => setAlertFormValue('subscribed_users', value)} - /> +
+

Definition

+
+
+
When
+ + + ({ + label: `${alphabet[index]} - ${event}`, + value: index, + }))} + /> + + + + + + + +
+
+
less than
+ + + setAlertFormValue('threshold', { + configuration: { + type: alertForm.threshold.configuration.type, + bounds: { + ...alertForm.threshold.configuration.bounds, + lower: + value && + alertForm.threshold.configuration.type === + InsightThresholdType.PERCENTAGE + ? value / 100 + : value, + }, + }, + }) + } + /> + +
or more than
+ + + setAlertFormValue('threshold', { + configuration: { + type: alertForm.threshold.configuration.type, + bounds: { + ...alertForm.threshold.configuration.bounds, + upper: + value && + alertForm.threshold.configuration.type === + InsightThresholdType.PERCENTAGE + ? value / 100 + : value, + }, + }, + }) + } + /> + + {alertForm.condition.type !== AlertConditionType.ABSOLUTE_VALUE && ( + + + + + + )} +
+
+
+ {alertForm.condition.type === AlertConditionType.ABSOLUTE_VALUE + ? 'check' + : 'compare'} +
+ + ({ + label: interval, + value: interval, + }))} + /> + +
and notify
+
+ u.id) ?? []} + idKey="id" + onChange={(value) => setAlertFormValue('subscribed_users', value)} + /> +
+
+
+
{alert && }
- {!creatingNewAlert ? ( - - Delete alert - - ) : null} +
+ {!creatingNewAlert ? ( + + Delete alert + + ) : null} + {!creatingNewAlert && alert?.state === AlertState.FIRING ? ( + + ) : null} + {!creatingNewAlert && alert?.state === AlertState.SNOOZED ? ( + + Clear snooze + + ) : null} +
- - Cancel - - -
- ) : ( + return alert.state === AlertState.FIRING ? ( + ) : ( + + + ) } @@ -32,7 +32,9 @@ interface AlertListItemProps { } export function AlertListItem({ alert, onClick }: AlertListItemProps): JSX.Element { - const absoluteThreshold = alert.threshold?.configuration?.absoluteThreshold + const bounds = alert.threshold?.configuration?.bounds + const isPercentage = alert.threshold?.configuration.type === InsightThresholdType.PERCENTAGE + return (
@@ -42,9 +44,11 @@ export function AlertListItem({ alert, onClick }: AlertListItemProps): JSX.Eleme {alert.enabled ? (
- {absoluteThreshold?.lower && `Low ${absoluteThreshold.lower}`} - {absoluteThreshold?.lower && absoluteThreshold?.upper ? ' · ' : ''} - {absoluteThreshold?.upper && `High ${absoluteThreshold.upper}`} + {bounds?.lower && + `Low ${isPercentage ? bounds.lower * 100 : bounds.lower}${isPercentage ? '%' : ''}`} + {bounds?.lower && bounds?.upper ? ' · ' : ''} + {bounds?.upper && + `High ${isPercentage ? bounds.upper * 100 : bounds.upper}${isPercentage ? '%' : ''}`}
) : (
Disabled
diff --git a/frontend/src/lib/components/Cards/InsightCard/InsightCard.tsx b/frontend/src/lib/components/Cards/InsightCard/InsightCard.tsx index 474665182957f..a029e3271651a 100644 --- a/frontend/src/lib/components/Cards/InsightCard/InsightCard.tsx +++ b/frontend/src/lib/components/Cards/InsightCard/InsightCard.tsx @@ -11,6 +11,7 @@ import { insightLogic } from 'scenes/insights/insightLogic' import { ErrorBoundary } from '~/layout/ErrorBoundary' import { themeLogic } from '~/layout/navigation-3000/themeLogic' import { Query } from '~/queries/Query/Query' +import { HogQLVariable } from '~/queries/schema' import { DashboardBasicType, DashboardPlacement, @@ -60,6 +61,8 @@ export interface InsightCardProps extends Resizeable, React.HTMLAttributes } function InsightCardInternal( @@ -90,6 +93,7 @@ function InsightCardInternal( placement, loadPriority, doNotLoad, + variablesOverride, ...divProps }: InsightCardProps, ref: React.Ref @@ -141,6 +145,7 @@ function InsightCardInternal( showEditingControls={showEditingControls} showDetailsControls={showDetailsControls} moreButtons={moreButtons} + variablesOverride={variablesOverride} />
diff --git a/frontend/src/lib/components/Cards/InsightCard/InsightMeta.tsx b/frontend/src/lib/components/Cards/InsightCard/InsightMeta.tsx index 6de4f0c2ff638..9ab7a431ff18c 100644 --- a/frontend/src/lib/components/Cards/InsightCard/InsightMeta.tsx +++ b/frontend/src/lib/components/Cards/InsightCard/InsightMeta.tsx @@ -44,6 +44,7 @@ interface InsightMetaProps | 'showEditingControls' | 'showDetailsControls' | 'moreButtons' + | 'variablesOverride' > { insight: QueryBasedInsightModel areDetailsShown?: boolean @@ -55,6 +56,7 @@ export function InsightMeta({ ribbonColor, dashboardId, updateColor, + variablesOverride, removeFromDashboard, deleteWithUndo, refresh, @@ -98,7 +100,7 @@ export function InsightMeta({ topHeading={} meta={ <> - +

{name || {summary}} {loading && ( @@ -130,7 +132,7 @@ export function InsightMeta({ moreButtons={ <> <> - + View {refresh && ( diff --git a/frontend/src/lib/components/DateFilter/DateFilter.tsx b/frontend/src/lib/components/DateFilter/DateFilter.tsx index e8597357d3e58..d3a35d762b144 100644 --- a/frontend/src/lib/components/DateFilter/DateFilter.tsx +++ b/frontend/src/lib/components/DateFilter/DateFilter.tsx @@ -38,6 +38,7 @@ export interface DateFilterProps { dropdownPlacement?: Placement /* True when we're not dealing with ranges, but a single date / relative date */ isFixedDateMode?: boolean + placeholder?: string } interface RawDateFilterProps extends DateFilterProps { dateFrom?: string | null | dayjs.Dayjs @@ -62,6 +63,7 @@ export function DateFilter({ max, isFixedDateMode = false, allowedRollingDateOptions, + placeholder, }: RawDateFilterProps): JSX.Element { const key = useRef(uuid()).current const logicProps: DateFilterLogicProps = { @@ -72,6 +74,7 @@ export function DateFilter({ dateOptions, isDateFormatted, isFixedDateMode, + placeholder, } const { open, diff --git a/frontend/src/lib/components/DateFilter/dateFilterLogic.ts b/frontend/src/lib/components/DateFilter/dateFilterLogic.ts index 0a1f3680dbc1b..7d8593963d7b7 100644 --- a/frontend/src/lib/components/DateFilter/dateFilterLogic.ts +++ b/frontend/src/lib/components/DateFilter/dateFilterLogic.ts @@ -112,8 +112,9 @@ export const dateFilterLogic = kea([ s.isFixedDate, s.dateOptions, (_, p) => p.isFixedDateMode, + (_, p) => p.placeholder, ], - (dateFrom, dateTo, isFixedRange, isDateToNow, isFixedDate, dateOptions, isFixedDateMode) => + (dateFrom, dateTo, isFixedRange, isDateToNow, isFixedDate, dateOptions, isFixedDateMode, placeholder) => isFixedRange ? formatDateRange(dayjs(dateFrom), dayjs(dateTo)) : isDateToNow @@ -123,7 +124,9 @@ export const dateFilterLogic = kea([ : dateFilterToText( dateFrom, dateTo, - isFixedDateMode ? SELECT_FIXED_VALUE_PLACEHOLDER : NO_OVERRIDE_RANGE_PLACEHOLDER, + isFixedDateMode + ? placeholder ?? SELECT_FIXED_VALUE_PLACEHOLDER + : NO_OVERRIDE_RANGE_PLACEHOLDER, dateOptions, false ), diff --git a/frontend/src/lib/components/DateFilter/types.ts b/frontend/src/lib/components/DateFilter/types.ts index 3ebdb781b7c8c..2e95131e9cb34 100644 --- a/frontend/src/lib/components/DateFilter/types.ts +++ b/frontend/src/lib/components/DateFilter/types.ts @@ -17,6 +17,7 @@ export type DateFilterLogicProps = { dateOptions?: DateMappingOption[] isDateFormatted?: boolean isFixedDateMode?: boolean + placeholder?: string } export const CUSTOM_OPTION_KEY = 'Custom' diff --git a/frontend/src/lib/components/JSSnippet.tsx b/frontend/src/lib/components/JSSnippet.tsx index 05cbf0cfb6139..0d93f5d71f845 100644 --- a/frontend/src/lib/components/JSSnippet.tsx +++ b/frontend/src/lib/components/JSSnippet.tsx @@ -20,7 +20,7 @@ export function snippetFunctions(): string { } const snippetMethods = methods.join(' ') - return `!function(t,e){var o,n,p,r;e.__SV||(window.posthog=e,e._i=[],e.init=function(i,s,a){function g(t,e){var o=e.split(".");2==o.length&&(t=t[o[0]],e=o[1]),t[e]=function(){t.push([e].concat(Array.prototype.slice.call(arguments,0)))}}(p=t.createElement("script")).type="text/javascript",p.async=!0,p.src=s.api_host.replace(".i.posthog.com","-assets.i.posthog.com")+"/static/array.js",(r=t.getElementsByTagName("script")[0]).parentNode.insertBefore(p,r);var u=e;for(void 0!==a?u=e[a]=[]:a="posthog",u.people=u.people||[],u.toString=function(t){var e="posthog";return"posthog"!==a&&(e+="."+a),t||(e+=" (stub)"),e},u.people.toString=function(){return u.toString(1)+".people (stub)"},o="${snippetMethods}".split(" "),n=0;n 0) { @@ -164,7 +164,7 @@ export function LemonInputSelect({ // We split on commas EXCEPT if they're escaped (to allow for commas in values) newValue.split(NON_ESCAPED_COMMA_REGEX).forEach((value) => { - const trimmedValue = value.replace('\\,', ',').trim() // Transform escaped commas to plain commas + const trimmedValue = value.replaceAll('\\,', ',').trim() // Transform escaped commas to plain commas if (trimmedValue && !values.includes(trimmedValue)) { newValues.push(trimmedValue) } diff --git a/frontend/src/lib/taxonomy.tsx b/frontend/src/lib/taxonomy.tsx index b8a333f2d2fa8..76c60960eec8e 100644 --- a/frontend/src/lib/taxonomy.tsx +++ b/frontend/src/lib/taxonomy.tsx @@ -1212,6 +1212,10 @@ export const CORE_FILTER_DEFINITIONS_BY_GROUP = { description: 'posthog-js adds these to the page leave event, they are used in web analytics calculations', label: 'Previous pageview duration', }, + $surveys_activated: { + label: 'Surveys Activated', + description: 'The surveys that were activated for this event.', + }, }, numerical_event_properties: {}, // Same as event properties, see assignment below person_properties: {}, // Currently person properties are the same as event properties, see assignment below @@ -1432,7 +1436,13 @@ export const NON_DOLLAR_POSTHOG_PROPERTY_KEYS = [ 'current_usage.session_replay', 'current_usage.surveys', 'customer_deactivated', - 'custom_limits_usd.data_warehouse', + 'custom_limits.data_warehouse', + 'custom_limits.feature_flags', + 'custom_limits.integrations', + 'custom_limits.platform_and_support', + 'custom_limits.product_analytics', + 'custom_limits.session_replay', + 'custom_limits.surveys', 'free_allocation.data_warehouse', 'free_allocation.feature_flags', 'free_allocation.integrations', @@ -1474,8 +1484,6 @@ export const NON_DOLLAR_POSTHOG_PROPERTY_KEYS = [ 'email_service_available', 'slack_service_available', 'commit_sha', - 'token', - 'distinct_id', ] /** Return whether a given filter key is part of PostHog's core (marked by the PostHog logo). */ diff --git a/frontend/src/queries/Query/Query.tsx b/frontend/src/queries/Query/Query.tsx index 9a004a846569c..7f6e4926f1ca7 100644 --- a/frontend/src/queries/Query/Query.tsx +++ b/frontend/src/queries/Query/Query.tsx @@ -13,6 +13,7 @@ import { DashboardFilter, DataTableNode, DataVisualizationNode, + HogQLVariable, InsightVizNode, Node, } from '~/queries/schema' @@ -50,10 +51,20 @@ export interface QueryProps { inSharedMode?: boolean /** Dashboard filters to override the ones in the query */ filtersOverride?: DashboardFilter | null + /** Dashboard variables to override the ones in the query */ + variablesOverride?: Record | null } export function Query(props: QueryProps): JSX.Element | null { - const { query: propsQuery, setQuery: propsSetQuery, readOnly, embedded, filtersOverride, inSharedMode } = props + const { + query: propsQuery, + setQuery: propsSetQuery, + readOnly, + embedded, + filtersOverride, + variablesOverride, + inSharedMode, + } = props const [localQuery, localSetQuery] = useState(propsQuery) useEffect(() => { @@ -102,6 +113,7 @@ export function Query(props: QueryProps): JSX.Element | null uniqueKey={uniqueKey} context={queryContext} readOnly={readOnly} + variablesOverride={props.variablesOverride} /> ) } else if (isSavedInsightNode(query)) { @@ -117,6 +129,7 @@ export function Query(props: QueryProps): JSX.Element | null embedded={embedded} inSharedMode={inSharedMode} filtersOverride={filtersOverride} + variablesOverride={variablesOverride} /> ) } else if (isWebOverviewQuery(query)) { diff --git a/frontend/src/queries/nodes/DataNode/dataNodeLogic.test.ts b/frontend/src/queries/nodes/DataNode/dataNodeLogic.test.ts index 4329a52946342..e63cfba6f9309 100644 --- a/frontend/src/queries/nodes/DataNode/dataNodeLogic.test.ts +++ b/frontend/src/queries/nodes/DataNode/dataNodeLogic.test.ts @@ -2,7 +2,7 @@ import { expectLogic, partial } from 'kea-test-utils' import { dataNodeLogic } from '~/queries/nodes/DataNode/dataNodeLogic' import { performQuery } from '~/queries/query' -import { DashboardFilter, NodeKind } from '~/queries/schema' +import { DashboardFilter, HogQLVariable, NodeKind } from '~/queries/schema' import { initKeaTests } from '~/test/init' jest.mock('~/queries/query', () => { @@ -473,6 +473,40 @@ describe('dataNodeLogic', () => { expect.any(String), expect.any(Function), filtersOverride, + undefined, + false + ) + }) + + it('passes variablesOverride to api', async () => { + const variablesOverride: Record = { + test_1: { + variableId: 'some_id', + code_name: 'some_name', + value: 'hello world', + }, + } + + const query = { + kind: NodeKind.EventsQuery, + select: ['*', 'event', 'timestamp'], + } + + logic = dataNodeLogic({ + key: 'key', + query, + variablesOverride, + }) + logic.mount() + + expect(performQuery).toHaveBeenCalledWith( + query, + expect.anything(), + false, + expect.any(String), + expect.any(Function), + undefined, + variablesOverride, false ) }) @@ -497,6 +531,32 @@ describe('dataNodeLogic', () => { expect.any(String), expect.any(Function), undefined, + undefined, + false + ) + }) + + it("doesn't pass undefined variablesOverride to api", async () => { + const query = { + kind: NodeKind.EventsQuery, + select: ['*', 'event', 'timestamp'], + } + + logic = dataNodeLogic({ + key: 'key', + query, + variablesOverride: undefined, + }) + logic.mount() + + expect(performQuery).toHaveBeenCalledWith( + query, + expect.anything(), + false, + expect.any(String), + expect.any(Function), + undefined, + undefined, false ) }) diff --git a/frontend/src/queries/nodes/DataNode/dataNodeLogic.ts b/frontend/src/queries/nodes/DataNode/dataNodeLogic.ts index 6d1bdfae9ff6e..25d0f75848491 100644 --- a/frontend/src/queries/nodes/DataNode/dataNodeLogic.ts +++ b/frontend/src/queries/nodes/DataNode/dataNodeLogic.ts @@ -28,7 +28,7 @@ import { userLogic } from 'scenes/userLogic' import { dataNodeCollectionLogic, DataNodeCollectionProps } from '~/queries/nodes/DataNode/dataNodeCollectionLogic' import { removeExpressionComment } from '~/queries/nodes/DataTable/utils' import { performQuery } from '~/queries/query' -import { DashboardFilter, QueryStatus } from '~/queries/schema' +import { DashboardFilter, HogQLVariable, QueryStatus } from '~/queries/schema' import { ActorsQuery, ActorsQueryResponse, @@ -66,6 +66,8 @@ export interface DataNodeLogicProps { /** Dashboard filters to override the ones in the query */ filtersOverride?: DashboardFilter | null + /** Dashboard variables to override the ones in the query */ + variablesOverride?: Record | null } export const AUTOLOAD_INTERVAL = 30000 @@ -99,7 +101,7 @@ export const dataNodeLogic = kea([ ], ], })), - props({ query: {} } as DataNodeLogicProps), + props({ query: {}, variablesOverride: undefined } as DataNodeLogicProps), propsChanged(({ actions, props }, oldProps) => { if (!props.query) { return // Can't do anything without a query @@ -214,6 +216,7 @@ export const dataNodeLogic = kea([ queryId, actions.setPollResponse, props.filtersOverride, + props.variablesOverride, pollOnly )) ?? null const duration = performance.now() - now @@ -451,6 +454,10 @@ export const dataNodeLogic = kea([ ], })), selectors(({ cache }) => ({ + variableOverridesAreSet: [ + (_, p) => [p.variablesOverride ?? (() => ({}))], + (variablesOverride) => !!variablesOverride, + ], isShowingCachedResults: [ () => [(_, props) => props.cachedResults ?? null, (_, props) => props.query], (cachedResults: AnyResponseType | null, query: DataNode): boolean => { diff --git a/frontend/src/queries/nodes/DataTable/dataTableLogic.test.ts b/frontend/src/queries/nodes/DataTable/dataTableLogic.test.ts index e2fc4bde77224..24d058bd7e8d6 100644 --- a/frontend/src/queries/nodes/DataTable/dataTableLogic.test.ts +++ b/frontend/src/queries/nodes/DataTable/dataTableLogic.test.ts @@ -67,6 +67,7 @@ describe('dataTableLogic', () => { expect.any(String), expect.any(Function), undefined, + undefined, false ) expect(performQuery).toHaveBeenCalledTimes(1) diff --git a/frontend/src/queries/nodes/DataVisualization/Components/Variables/Variables.tsx b/frontend/src/queries/nodes/DataVisualization/Components/Variables/Variables.tsx index 0a8e45cf687e1..d17fb419452e4 100644 --- a/frontend/src/queries/nodes/DataVisualization/Components/Variables/Variables.tsx +++ b/frontend/src/queries/nodes/DataVisualization/Components/Variables/Variables.tsx @@ -1,6 +1,6 @@ import './Variables.scss' -import { IconCopy, IconGear } from '@posthog/icons' +import { IconCopy, IconGear, IconTrash } from '@posthog/icons' import { LemonButton, LemonDivider, LemonInput, Popover } from '@posthog/lemon-ui' import { useActions, useValues } from 'kea' import { FEATURE_FLAGS } from 'lib/constants' @@ -8,17 +8,49 @@ import { LemonField } from 'lib/lemon-ui/LemonField' import { featureFlagLogic } from 'lib/logic/featureFlagLogic' import { copyToClipboard } from 'lib/utils/copyToClipboard' import { useEffect, useRef, useState } from 'react' +import { dashboardLogic } from 'scenes/dashboard/dashboardLogic' + +import { dataNodeLogic } from '~/queries/nodes/DataNode/dataNodeLogic' import { dataVisualizationLogic } from '../../dataVisualizationLogic' import { Variable } from '../../types' import { NewVariableModal } from './NewVariableModal' import { variablesLogic } from './variablesLogic' -export const Variables = (): JSX.Element => { +export const VariablesForDashboard = (): JSX.Element => { + const { featureFlags } = useValues(featureFlagLogic) + const { dashboardVariables } = useValues(dashboardLogic) + const { overrideVariableValue } = useActions(dashboardLogic) + + if (!featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES] || !dashboardVariables.length) { + return <> + } + + return ( + <> +
+ {dashboardVariables.map((n) => ( + + ))} +
+ + ) +} + +export const VariablesForInsight = (): JSX.Element => { const { featureFlags } = useValues(featureFlagLogic) - const { variablesForInsight } = useValues(variablesLogic) + const { variablesForInsight, showVariablesBar } = useValues(variablesLogic) + const { updateVariableValue, removeVariable } = useActions(variablesLogic) + const { showEditingUI } = useValues(dataVisualizationLogic) + const { variableOverridesAreSet } = useValues(dataNodeLogic) - if (!featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES] || !variablesForInsight.length) { + if (!featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES] || !variablesForInsight.length || !showVariablesBar) { return <> } @@ -26,7 +58,14 @@ export const Variables = (): JSX.Element => { <>
{variablesForInsight.map((n) => ( - + ))}
@@ -34,10 +73,21 @@ export const Variables = (): JSX.Element => { ) } -const VariableInput = ({ variable, closePopover }: { variable: Variable; closePopover: () => void }): JSX.Element => { - const { showEditingUI } = useValues(dataVisualizationLogic) - const { updateVariableValue } = useActions(variablesLogic) +interface VariableInputProps { + variable: Variable + showEditingUI: boolean + closePopover: () => void + onChange: (variableId: string, value: any) => void + onRemove?: (variableId: string) => void +} +const VariableInput = ({ + variable, + showEditingUI, + closePopover, + onChange, + onRemove, +}: VariableInputProps): JSX.Element => { const [localInputValue, setLocalInputValue] = useState(variable.value ?? variable.default_value ?? '') const inputRef = useRef(null) @@ -59,14 +109,14 @@ const VariableInput = ({ variable, closePopover }: { variable: Variable; closePo value={localInputValue.toString()} onChange={(value) => setLocalInputValue(value)} onPressEnter={() => { - updateVariableValue(variable.id, localInputValue) + onChange(variable.id, localInputValue) closePopover() }} /> { - updateVariableValue(variable.id, localInputValue) + onChange(variable.id, localInputValue) closePopover() }} > @@ -102,6 +152,14 @@ const VariableInput = ({ variable, closePopover }: { variable: Variable; closePo onClick={() => void copyToClipboard(variableAsHogQL, 'variable HogQL')} tooltip="Copy HogQL" /> + {onRemove && ( + onRemove(variable.id)} + icon={} + size="xsmall" + tooltip="Remove variable from insight" + /> + )} } size="xsmall" tooltip="Open variable settings" />

@@ -110,13 +168,35 @@ const VariableInput = ({ variable, closePopover }: { variable: Variable; closePo ) } -const VariableComponent = ({ variable }: { variable: Variable }): JSX.Element => { +interface VariableComponentProps { + variable: Variable + showEditingUI: boolean + onChange: (variableId: string, value: any) => void + variableOverridesAreSet: boolean + onRemove?: (variableId: string) => void +} + +const VariableComponent = ({ + variable, + showEditingUI, + onChange, + variableOverridesAreSet, + onRemove, +}: VariableComponentProps): JSX.Element => { const [isPopoverOpen, setPopoverOpen] = useState(false) return ( setPopoverOpen(false)} />} + overlay={ + setPopoverOpen(false)} + onRemove={onRemove} + /> + } visible={isPopoverOpen} onClickOutside={() => setPopoverOpen(false)} className="DataVizVariable_Popover" @@ -131,6 +211,7 @@ const VariableComponent = ({ variable }: { variable: Variable }): JSX.Element => type="secondary" className="min-w-32 DataVizVariable_Button" onClick={() => setPopoverOpen(!isPopoverOpen)} + disabledReason={variableOverridesAreSet && 'Discard dashboard variables to change'} > {variable.value ?? variable.default_value}
diff --git a/frontend/src/queries/nodes/DataVisualization/Components/Variables/addVariableLogic.ts b/frontend/src/queries/nodes/DataVisualization/Components/Variables/addVariableLogic.ts index 396fd3dbc6b87..a8802e6b6b6ea 100644 --- a/frontend/src/queries/nodes/DataVisualization/Components/Variables/addVariableLogic.ts +++ b/frontend/src/queries/nodes/DataVisualization/Components/Variables/addVariableLogic.ts @@ -1,9 +1,11 @@ -import { actions, kea, path, reducers } from 'kea' -import { loaders } from 'kea-loaders' -import api from 'lib/api' +import { lemonToast } from '@posthog/lemon-ui' +import { actions, connect, kea, key, listeners, path, props, reducers } from 'kea' +import api, { ApiError } from 'lib/api' import { BooleanVariable, ListVariable, NumberVariable, StringVariable, Variable, VariableType } from '../../types' import type { addVariableLogicType } from './addVariableLogicType' +import { variableDataLogic } from './variableDataLogic' +import { variablesLogic } from './variablesLogic' const DEFAULT_VARIABLE: StringVariable = { id: '', @@ -13,12 +15,22 @@ const DEFAULT_VARIABLE: StringVariable = { code_name: '', } +export interface AddVariableLogicProps { + key: string +} + export const addVariableLogic = kea([ path(['queries', 'nodes', 'DataVisualization', 'Components', 'Variables', 'variableLogic']), + props({ key: '' } as AddVariableLogicProps), + key((props) => props.key), + connect({ + actions: [variableDataLogic, ['getVariables'], variablesLogic, ['addVariable']], + }), actions({ openModal: (variableType: VariableType) => ({ variableType }), closeModal: true, updateVariable: (variable: Variable) => ({ variable }), + save: true, }), reducers({ variableType: [ @@ -86,14 +98,18 @@ export const addVariableLogic = kea([ }, ], }), - loaders(({ values }) => ({ - savedVariable: [ - null as null | Variable, - { - save: async () => { - return await api.insightVariables.create(values.variable) - }, - }, - ], + listeners(({ values, actions }) => ({ + save: async () => { + try { + const variable = await api.insightVariables.create(values.variable) + + actions.getVariables() + actions.addVariable({ variableId: variable.id, code_name: variable.code_name }) + actions.closeModal() + } catch (e: any) { + const error = e as ApiError + lemonToast.error(error.detail ?? error.message) + } + }, })), ]) diff --git a/frontend/src/queries/nodes/DataVisualization/Components/Variables/variableDataLogic.ts b/frontend/src/queries/nodes/DataVisualization/Components/Variables/variableDataLogic.ts new file mode 100644 index 0000000000000..8b7fbc8b98962 --- /dev/null +++ b/frontend/src/queries/nodes/DataVisualization/Components/Variables/variableDataLogic.ts @@ -0,0 +1,22 @@ +import { kea, path } from 'kea' +import { loaders } from 'kea-loaders' +import api from 'lib/api' + +import { Variable } from '../../types' +import type { variableDataLogicType } from './variableDataLogicType' + +export const variableDataLogic = kea([ + path(['queries', 'nodes', 'DataVisualization', 'Components', 'Variables', 'variableDataLogic']), + loaders({ + variables: [ + [] as Variable[], + { + getVariables: async () => { + const insights = await api.insightVariables.list() + + return insights.results + }, + }, + ], + }), +]) diff --git a/frontend/src/queries/nodes/DataVisualization/Components/Variables/variablesLogic.ts b/frontend/src/queries/nodes/DataVisualization/Components/Variables/variablesLogic.ts index 4c92665b7f9e6..61f2590242a73 100644 --- a/frontend/src/queries/nodes/DataVisualization/Components/Variables/variablesLogic.ts +++ b/frontend/src/queries/nodes/DataVisualization/Components/Variables/variablesLogic.ts @@ -1,15 +1,14 @@ -import { actions, afterMount, connect, kea, key, path, props, reducers, selectors } from 'kea' -import { loaders } from 'kea-loaders' +import { actions, afterMount, connect, kea, key, listeners, path, props, reducers, selectors } from 'kea' import { subscriptions } from 'kea-subscriptions' -import api from 'lib/api' import { FEATURE_FLAGS } from 'lib/constants' import { featureFlagLogic } from 'lib/logic/featureFlagLogic' -import { getVariablesFromQuery } from 'scenes/insights/utils/queryUtils' +import { getVariablesFromQuery, haveVariablesOrFiltersChanged } from 'scenes/insights/utils/queryUtils' import { DataVisualizationNode, HogQLVariable } from '~/queries/schema' import { dataVisualizationLogic } from '../../dataVisualizationLogic' import { Variable } from '../../types' +import { variableDataLogic } from './variableDataLogic' import type { variablesLogicType } from './variablesLogicType' export interface VariablesLogicProps { @@ -23,21 +22,39 @@ export const variablesLogic = kea([ props({ key: '' } as VariablesLogicProps), key((props) => props.key), connect({ - actions: [dataVisualizationLogic, ['setQuery', 'loadData']], - values: [dataVisualizationLogic, ['query'], featureFlagLogic, ['featureFlags']], + actions: [dataVisualizationLogic, ['setQuery', 'loadData'], variableDataLogic, ['getVariables']], + values: [ + dataVisualizationLogic, + ['query', 'insightLogicProps'], + variableDataLogic, + ['variables', 'variablesLoading'], + featureFlagLogic, + ['featureFlags'], + ], }), actions({ addVariable: (variable: HogQLVariable) => ({ variable }), + addVariables: (variables: HogQLVariable[]) => ({ variables }), + removeVariable: (variableId: string) => ({ variableId }), updateVariableValue: (variableId: string, value: any) => ({ variableId, value }), setEditorQuery: (query: string) => ({ query }), + resetVariables: true, + updateSourceQuery: true, }), reducers({ internalSelectedVariables: [ [] as HogQLVariable[], { addVariable: (state, { variable }) => { + if (state.find((n) => variable.variableId === n.variableId)) { + return state + } + return [...state, { ...variable }] }, + addVariables: (state, { variables }) => { + return [...state, ...variables.map((n) => ({ ...n }))] + }, updateVariableValue: (state, { variableId, value }) => { const variableIndex = state.findIndex((n) => n.variableId === variableId) if (variableIndex < 0) { @@ -49,6 +66,16 @@ export const variablesLogic = kea([ return variablesInState }, + removeVariable: (state, { variableId }) => { + const stateCopy = [...state] + const index = stateCopy.findIndex((n) => n.variableId === variableId) + if (index >= 0) { + stateCopy.splice(index) + } + + return stateCopy + }, + resetVariables: () => [], }, ], editorQuery: [ @@ -59,23 +86,11 @@ export const variablesLogic = kea([ }, ], }), - loaders({ - variables: [ - [] as Variable[], - { - getVariables: async () => { - const insights = await api.insightVariables.list() - - return insights.results - }, - }, - ], - }), selectors({ variablesForInsight: [ - (s) => [s.variables, s.internalSelectedVariables], - (variables, internalSelectedVariables): Variable[] => { - if (!variables.length || !internalSelectedVariables.length) { + (s) => [s.variables, s.internalSelectedVariables, s.variablesLoading], + (variables, internalSelectedVariables, variablesLoading): Variable[] => { + if (!variables.length || !internalSelectedVariables.length || variablesLoading) { return [] } @@ -91,9 +106,30 @@ export const variablesLogic = kea([ .filter((n): n is Variable => Boolean(n)) }, ], + showVariablesBar: [ + (state) => [state.insightLogicProps], + (insightLogicProps) => { + return !insightLogicProps.dashboardId + }, + ], }), - subscriptions(({ props, actions, values }) => ({ - variablesForInsight: (variables: Variable[]) => { + listeners(({ props, values, actions }) => ({ + addVariable: () => { + actions.updateSourceQuery() + }, + removeVariable: () => { + actions.updateSourceQuery() + }, + updateVariableValue: () => { + actions.updateSourceQuery() + }, + updateSourceQuery: () => { + if (!values.featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES]) { + return + } + + const variables = values.variablesForInsight + const query: DataVisualizationNode = { ...values.query, source: { @@ -112,7 +148,8 @@ export const variablesLogic = kea([ }, } - if (!values.featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES]) { + const queryVarsHaveChanged = haveVariablesOrFiltersChanged(query.source, values.query.source) + if (!queryVarsHaveChanged) { return } @@ -124,6 +161,8 @@ export const variablesLogic = kea([ actions.setQuery(query) } }, + })), + subscriptions(({ actions, values }) => ({ editorQuery: (query: string) => { const queryVariableMatches = getVariablesFromQuery(query) @@ -143,16 +182,25 @@ export const variablesLogic = kea([ } }) }, + query: (query: DataVisualizationNode) => { + if (!values.featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES]) { + return + } + + actions.resetVariables() + + const variables = Object.values(query.source.variables ?? {}) + + if (variables.length) { + actions.addVariables(variables) + } + }, })), afterMount(({ actions, values }) => { if (!values.featureFlags[FEATURE_FLAGS.INSIGHT_VARIABLES]) { return } - Object.values(values.query.source.variables ?? {}).forEach((variable) => { - actions.addVariable(variable) - }) - actions.getVariables() }), ]) diff --git a/frontend/src/queries/nodes/DataVisualization/DataVisualization.tsx b/frontend/src/queries/nodes/DataVisualization/DataVisualization.tsx index c15e93076809f..799a135e5a649 100644 --- a/frontend/src/queries/nodes/DataVisualization/DataVisualization.tsx +++ b/frontend/src/queries/nodes/DataVisualization/DataVisualization.tsx @@ -17,7 +17,14 @@ import { HogQLBoldNumber } from 'scenes/insights/views/BoldNumber/BoldNumber' import { urls } from 'scenes/urls' import { insightVizDataCollectionId, insightVizDataNodeKey } from '~/queries/nodes/InsightViz/InsightViz' -import { AnyResponseType, DataVisualizationNode, HogQLQuery, HogQLQueryResponse, NodeKind } from '~/queries/schema' +import { + AnyResponseType, + DataVisualizationNode, + HogQLQuery, + HogQLQueryResponse, + HogQLVariable, + NodeKind, +} from '~/queries/schema' import { QueryContext } from '~/queries/types' import { ChartDisplayType, ExporterFormat, InsightLogicProps } from '~/types' @@ -32,7 +39,8 @@ import { SideBar } from './Components/SideBar' import { Table } from './Components/Table' import { TableDisplay } from './Components/TableDisplay' import { AddVariableButton } from './Components/Variables/AddVariableButton' -import { Variables } from './Components/Variables/Variables' +import { addVariableLogic } from './Components/Variables/addVariableLogic' +import { VariablesForInsight } from './Components/Variables/Variables' import { variablesLogic } from './Components/Variables/variablesLogic' import { dataVisualizationLogic, DataVisualizationLogicProps } from './dataVisualizationLogic' import { displayLogic } from './displayLogic' @@ -46,6 +54,8 @@ interface DataTableVisualizationProps { the data node logic becomes read only implicitly */ cachedResults?: AnyResponseType readOnly?: boolean + /** Dashboard variables to override the ones in the query */ + variablesOverride?: Record | null } let uniqueNode = 0 @@ -57,6 +67,7 @@ export function DataTableVisualization({ context, cachedResults, readOnly, + variablesOverride, }: DataTableVisualizationProps): JSX.Element { const [key] = useState(`DataVisualizationNode.${uniqueKey ?? uniqueNode++}`) const insightProps: InsightLogicProps = context?.insightProps || { @@ -73,6 +84,7 @@ export function DataTableVisualization({ insightLogicProps: insightProps, setQuery, cachedResults, + variablesOverride, } const dataNodeLogicProps: DataNodeLogicProps = { @@ -81,6 +93,7 @@ export function DataTableVisualization({ cachedResults, loadPriority: insightProps.loadPriority, dataNodeCollectionId: insightVizDataCollectionId(insightProps, key), + variablesOverride, } return ( @@ -91,14 +104,16 @@ export function DataTableVisualization({ logic={variablesLogic} props={{ key: dataVisualizationLogicProps.key, readOnly: readOnly ?? false }} > - + + + @@ -238,7 +253,7 @@ function InternalDataTableVisualization(props: DataTableVisualizationProps): JSX )} - +
{showEditingUI && isChartSettingsPanelOpen && ( diff --git a/frontend/src/queries/nodes/DataVisualization/dataVisualizationLogic.ts b/frontend/src/queries/nodes/DataVisualization/dataVisualizationLogic.ts index b0ffb32015d57..a1a23ac6f0dfa 100644 --- a/frontend/src/queries/nodes/DataVisualization/dataVisualizationLogic.ts +++ b/frontend/src/queries/nodes/DataVisualization/dataVisualizationLogic.ts @@ -16,6 +16,7 @@ import { ChartSettingsFormatting, ConditionalFormattingRule, DataVisualizationNode, + HogQLVariable, } from '~/queries/schema' import { QueryContext } from '~/queries/types' import { ChartDisplayType, InsightLogicProps, ItemMode } from '~/types' @@ -68,6 +69,8 @@ export interface DataVisualizationLogicProps { context?: QueryContext cachedResults?: AnyResponseType insightLoading?: boolean + /** Dashboard variables to override the ones in the query */ + variablesOverride?: Record | null } export interface SelectedYAxis { @@ -222,6 +225,7 @@ export const dataVisualizationLogic = kea([ query: props.query.source, dataNodeCollectionId: insightVizDataCollectionId(props.insightLogicProps, props.key), loadPriority: props.insightLogicProps.loadPriority, + variablesOverride: props.variablesOverride, }), ['response', 'responseLoading', 'responseError', 'queryCancelled'], themeLogic, @@ -234,11 +238,12 @@ export const dataVisualizationLogic = kea([ query: props.query.source, dataNodeCollectionId: insightVizDataCollectionId(props.insightLogicProps, props.key), loadPriority: props.insightLogicProps.loadPriority, + variablesOverride: props.variablesOverride, }), ['loadData'], ], })), - props({ query: {} } as DataVisualizationLogicProps), + props({ query: { source: {} } } as DataVisualizationLogicProps), actions(({ values }) => ({ setVisualizationType: (visualizationType: ChartDisplayType) => ({ visualizationType }), updateXSeries: (columnName: string) => ({ @@ -559,6 +564,7 @@ export const dataVisualizationLogic = kea([ return insightMode == ItemMode.Edit }, ], + insightLogicProps: [(_state, props) => [props.insightLogicProps], (insightLogicProps) => insightLogicProps], showResultControls: [ (state, props) => [state.insightMode, props.insightLogicProps], (insightMode, insightLogicProps) => { diff --git a/frontend/src/queries/nodes/InsightViz/InsightViz.tsx b/frontend/src/queries/nodes/InsightViz/InsightViz.tsx index aa47a108cd4c4..ba58d4a16b5af 100644 --- a/frontend/src/queries/nodes/InsightViz/InsightViz.tsx +++ b/frontend/src/queries/nodes/InsightViz/InsightViz.tsx @@ -10,7 +10,7 @@ import { insightVizDataLogic } from 'scenes/insights/insightVizDataLogic' import { keyForInsightLogicProps } from 'scenes/insights/sharedUtils' import { ErrorBoundary } from '~/layout/ErrorBoundary' -import { DashboardFilter, InsightVizNode } from '~/queries/schema' +import { DashboardFilter, HogQLVariable, InsightVizNode } from '~/queries/schema' import { QueryContext } from '~/queries/types' import { isFunnelsQuery } from '~/queries/utils' import { InsightLogicProps, ItemMode } from '~/types' @@ -38,6 +38,7 @@ type InsightVizProps = { embedded?: boolean inSharedMode?: boolean filtersOverride?: DashboardFilter | null + variablesOverride?: Record | null } let uniqueNode = 0 @@ -51,6 +52,7 @@ export function InsightViz({ embedded, inSharedMode, filtersOverride, + variablesOverride, }: InsightVizProps): JSX.Element { const [key] = useState(() => `InsightViz.${uniqueKey || uniqueNode++}`) const insightProps: InsightLogicProps = context?.insightProps || { @@ -59,6 +61,7 @@ export function InsightViz({ setQuery, dataNodeCollectionId: key, filtersOverride, + variablesOverride, } if (!insightProps.setQuery && setQuery) { @@ -75,6 +78,7 @@ export function InsightViz({ loadPriority: insightProps.loadPriority, dataNodeCollectionId: insightVizDataCollectionId(insightProps, vizKey), filtersOverride, + variablesOverride, } const { insightMode } = useValues(insightSceneLogic) diff --git a/frontend/src/queries/query.ts b/frontend/src/queries/query.ts index 1ea01c13868f1..1952432e3607f 100644 --- a/frontend/src/queries/query.ts +++ b/frontend/src/queries/query.ts @@ -6,7 +6,16 @@ import posthog from 'posthog-js' import { OnlineExportContext, QueryExportContext } from '~/types' -import { DashboardFilter, DataNode, HogQLQuery, HogQLQueryResponse, NodeKind, PersonsNode, QueryStatus } from './schema' +import { + DashboardFilter, + DataNode, + HogQLQuery, + HogQLQueryResponse, + HogQLVariable, + NodeKind, + PersonsNode, + QueryStatus, +} from './schema' import { isAsyncResponse, isDataTableNode, @@ -79,6 +88,7 @@ async function executeQuery( queryId?: string, setPollResponse?: (response: QueryStatus) => void, filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null, /** * Whether to limit the function to just polling the provided query ID. * This is important in shared contexts, where we cannot create arbitrary queries via POST – we can only GET. @@ -91,7 +101,15 @@ async function executeQuery( !!featureFlagLogic.findMounted()?.values.featureFlags?.[FEATURE_FLAGS.QUERY_ASYNC] if (!pollOnly) { - const response = await api.query(queryNode, methodOptions, queryId, refresh, isAsyncQuery, filtersOverride) + const response = await api.query( + queryNode, + methodOptions, + queryId, + refresh, + isAsyncQuery, + filtersOverride, + variablesOverride + ) if (!isAsyncResponse(response)) { // Executed query synchronously or from cache @@ -124,6 +142,7 @@ export async function performQuery( queryId?: string, setPollResponse?: (status: QueryStatus) => void, filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null, pollOnly = false ): Promise> { let response: NonNullable @@ -141,6 +160,7 @@ export async function performQuery( queryId, setPollResponse, filtersOverride, + variablesOverride, pollOnly ) if (isHogQLQuery(queryNode) && response && typeof response === 'object') { diff --git a/frontend/src/queries/schema.json b/frontend/src/queries/schema.json index b2538f6daf6b5..a9dc7be1bf476 100644 --- a/frontend/src/queries/schema.json +++ b/frontend/src/queries/schema.json @@ -401,10 +401,20 @@ }, "AlertCondition": { "additionalProperties": false, + "properties": { + "type": { + "$ref": "#/definitions/AlertConditionType" + } + }, + "required": ["type"], "type": "object" }, + "AlertConditionType": { + "enum": ["absolute_value", "relative_increase", "relative_decrease"], + "type": "string" + }, "AlertState": { - "enum": ["Firing", "Not firing", "Errored"], + "enum": ["Firing", "Not firing", "Errored", "Snoozed"], "type": "string" }, "AnyDataNode": { @@ -467,10 +477,10 @@ "$ref": "#/definitions/ErrorTrackingQuery" }, { - "$ref": "#/definitions/ExperimentFunnelQuery" + "$ref": "#/definitions/ExperimentFunnelsQuery" }, { - "$ref": "#/definitions/ExperimentTrendQuery" + "$ref": "#/definitions/ExperimentTrendsQuery" } ] }, @@ -1240,7 +1250,7 @@ ], "type": "object" }, - "CachedExperimentFunnelQueryResponse": { + "CachedExperimentFunnelsQueryResponse": { "additionalProperties": false, "properties": { "cache_key": { @@ -1254,9 +1264,22 @@ "description": "What triggered the calculation of the query, leave empty if user/immediate", "type": "string" }, + "credible_intervals": { + "additionalProperties": { + "items": { + "type": "number" + }, + "maxItems": 2, + "minItems": 2, + "type": "array" + }, + "type": "object" + }, + "expected_loss": { + "type": "number" + }, "insight": { - "const": "FUNNELS", - "type": "string" + "$ref": "#/definitions/FunnelsQueryResponse" }, "is_cached": { "type": "boolean" @@ -1269,32 +1292,49 @@ "format": "date-time", "type": "string" }, + "probability": { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, "query_status": { "$ref": "#/definitions/QueryStatus", "description": "Query status indicates whether next to the provided data, a query is still running." }, - "results": { - "additionalProperties": { - "$ref": "#/definitions/ExperimentVariantFunnelResult" - }, - "type": "object" + "significance_code": { + "$ref": "#/definitions/ExperimentSignificanceCode" + }, + "significant": { + "type": "boolean" }, "timezone": { "type": "string" + }, + "variants": { + "items": { + "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" + }, + "type": "array" } }, "required": [ "cache_key", + "credible_intervals", + "expected_loss", "insight", "is_cached", "last_refresh", "next_allowed_client_refresh", - "results", - "timezone" + "probability", + "significance_code", + "significant", + "timezone", + "variants" ], "type": "object" }, - "CachedExperimentTrendQueryResponse": { + "CachedExperimentTrendsQueryResponse": { "additionalProperties": false, "properties": { "cache_key": { @@ -1357,7 +1397,7 @@ }, "variants": { "items": { - "$ref": "#/definitions/ExperimentVariantTrendBaseStats" + "$ref": "#/definitions/ExperimentVariantTrendsBaseStats" }, "type": "array" } @@ -3641,18 +3681,51 @@ { "additionalProperties": false, "properties": { + "credible_intervals": { + "additionalProperties": { + "items": { + "type": "number" + }, + "maxItems": 2, + "minItems": 2, + "type": "array" + }, + "type": "object" + }, + "expected_loss": { + "type": "number" + }, "insight": { - "const": "FUNNELS", - "type": "string" + "$ref": "#/definitions/FunnelsQueryResponse" }, - "results": { + "probability": { "additionalProperties": { - "$ref": "#/definitions/ExperimentVariantFunnelResult" + "type": "number" }, "type": "object" + }, + "significance_code": { + "$ref": "#/definitions/ExperimentSignificanceCode" + }, + "significant": { + "type": "boolean" + }, + "variants": { + "items": { + "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" + }, + "type": "array" } }, - "required": ["insight", "results"], + "required": [ + "credible_intervals", + "expected_loss", + "insight", + "probability", + "significance_code", + "significant", + "variants" + ], "type": "object" }, { @@ -3689,7 +3762,7 @@ }, "variants": { "items": { - "$ref": "#/definitions/ExperimentVariantTrendBaseStats" + "$ref": "#/definitions/ExperimentVariantTrendsBaseStats" }, "type": "array" } @@ -3820,10 +3893,10 @@ "$ref": "#/definitions/ErrorTrackingQuery" }, { - "$ref": "#/definitions/ExperimentFunnelQuery" + "$ref": "#/definitions/ExperimentFunnelsQuery" }, { - "$ref": "#/definitions/ExperimentTrendQuery" + "$ref": "#/definitions/ExperimentTrendsQuery" } ], "description": "Source of the events" @@ -5055,14 +5128,14 @@ "required": ["columns", "hogql", "results", "types"], "type": "object" }, - "ExperimentFunnelQuery": { + "ExperimentFunnelsQuery": { "additionalProperties": false, "properties": { "experiment_id": { "type": "integer" }, "kind": { - "const": "ExperimentFunnelQuery", + "const": "ExperimentFunnelsQuery", "type": "string" }, "modifiers": { @@ -5070,7 +5143,7 @@ "description": "Modifiers used when performing the query" }, "response": { - "$ref": "#/definitions/ExperimentFunnelQueryResponse" + "$ref": "#/definitions/ExperimentFunnelsQueryResponse" }, "source": { "$ref": "#/definitions/FunnelsQuery" @@ -5079,28 +5152,61 @@ "required": ["experiment_id", "kind", "source"], "type": "object" }, - "ExperimentFunnelQueryResponse": { + "ExperimentFunnelsQueryResponse": { "additionalProperties": false, "properties": { + "credible_intervals": { + "additionalProperties": { + "items": { + "type": "number" + }, + "maxItems": 2, + "minItems": 2, + "type": "array" + }, + "type": "object" + }, + "expected_loss": { + "type": "number" + }, "insight": { - "const": "FUNNELS", - "type": "string" + "$ref": "#/definitions/FunnelsQueryResponse" }, - "results": { + "probability": { "additionalProperties": { - "$ref": "#/definitions/ExperimentVariantFunnelResult" + "type": "number" }, "type": "object" + }, + "significance_code": { + "$ref": "#/definitions/ExperimentSignificanceCode" + }, + "significant": { + "type": "boolean" + }, + "variants": { + "items": { + "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" + }, + "type": "array" } }, - "required": ["insight", "results"], + "required": [ + "insight", + "variants", + "probability", + "significant", + "significance_code", + "expected_loss", + "credible_intervals" + ], "type": "object" }, "ExperimentSignificanceCode": { "enum": ["significant", "not_enough_exposure", "low_win_probability", "high_loss", "high_p_value"], "type": "string" }, - "ExperimentTrendQuery": { + "ExperimentTrendsQuery": { "additionalProperties": false, "properties": { "count_query": { @@ -5113,7 +5219,7 @@ "$ref": "#/definitions/TrendsQuery" }, "kind": { - "const": "ExperimentTrendQuery", + "const": "ExperimentTrendsQuery", "type": "string" }, "modifiers": { @@ -5121,13 +5227,13 @@ "description": "Modifiers used when performing the query" }, "response": { - "$ref": "#/definitions/ExperimentTrendQueryResponse" + "$ref": "#/definitions/ExperimentTrendsQueryResponse" } }, "required": ["count_query", "experiment_id", "kind"], "type": "object" }, - "ExperimentTrendQueryResponse": { + "ExperimentTrendsQueryResponse": { "additionalProperties": false, "properties": { "credible_intervals": { @@ -5161,7 +5267,7 @@ }, "variants": { "items": { - "$ref": "#/definitions/ExperimentVariantTrendBaseStats" + "$ref": "#/definitions/ExperimentVariantTrendsBaseStats" }, "type": "array" } @@ -5177,7 +5283,7 @@ ], "type": "object" }, - "ExperimentVariantFunnelResult": { + "ExperimentVariantFunnelsBaseStats": { "additionalProperties": false, "properties": { "failure_count": { @@ -5193,7 +5299,7 @@ "required": ["key", "success_count", "failure_count"], "type": "object" }, - "ExperimentVariantTrendBaseStats": { + "ExperimentVariantTrendsBaseStats": { "additionalProperties": false, "properties": { "absolute_exposure": { @@ -6908,12 +7014,20 @@ "InsightThreshold": { "additionalProperties": false, "properties": { - "absoluteThreshold": { - "$ref": "#/definitions/InsightsThresholdAbsolute" + "bounds": { + "$ref": "#/definitions/InsightsThresholdBounds" + }, + "type": { + "$ref": "#/definitions/InsightThresholdType" } }, + "required": ["type"], "type": "object" }, + "InsightThresholdType": { + "enum": ["absolute", "percentage"], + "type": "string" + }, "InsightVizNode": { "additionalProperties": false, "properties": { @@ -7216,7 +7330,7 @@ "required": ["kind"], "type": "object" }, - "InsightsThresholdAbsolute": { + "InsightsThresholdBounds": { "additionalProperties": false, "properties": { "lower": { @@ -7501,8 +7615,8 @@ "WebStatsTableQuery", "WebExternalClicksTableQuery", "WebGoalsQuery", - "ExperimentFunnelQuery", - "ExperimentTrendQuery", + "ExperimentFunnelsQuery", + "ExperimentTrendsQuery", "DatabaseSchemaQuery", "SuggestedQuestionsQuery", "TeamTaxonomyQuery", @@ -7986,6 +8100,12 @@ "$ref": "#/definitions/RefreshType", "default": "blocking", "description": "Whether results should be calculated sync or async, and how much to rely on the cache:\n- `'blocking'` - calculate synchronously (returning only when the query is done), UNLESS there are very fresh results in the cache\n- `'async'` - kick off background calculation (returning immediately with a query status), UNLESS there are very fresh results in the cache\n- `'lazy_async'` - kick off background calculation, UNLESS there are somewhat fresh results in the cache\n- `'force_blocking'` - calculate synchronously, even if fresh results are already cached\n- `'force_async'` - kick off background calculation, even if fresh results are already cached\n- `'force_cache'` - return cached data or a cache miss; always completes immediately as it never calculates Background calculation can be tracked using the `query_status` response field." + }, + "variables_override": { + "additionalProperties": { + "type": "object" + }, + "type": "object" } }, "required": ["query"], @@ -8762,18 +8882,51 @@ { "additionalProperties": false, "properties": { + "credible_intervals": { + "additionalProperties": { + "items": { + "type": "number" + }, + "maxItems": 2, + "minItems": 2, + "type": "array" + }, + "type": "object" + }, + "expected_loss": { + "type": "number" + }, "insight": { - "const": "FUNNELS", - "type": "string" + "$ref": "#/definitions/FunnelsQueryResponse" }, - "results": { + "probability": { "additionalProperties": { - "$ref": "#/definitions/ExperimentVariantFunnelResult" + "type": "number" }, "type": "object" + }, + "significance_code": { + "$ref": "#/definitions/ExperimentSignificanceCode" + }, + "significant": { + "type": "boolean" + }, + "variants": { + "items": { + "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" + }, + "type": "array" } }, - "required": ["insight", "results"], + "required": [ + "insight", + "variants", + "probability", + "significant", + "significance_code", + "expected_loss", + "credible_intervals" + ], "type": "object" }, { @@ -8810,7 +8963,7 @@ }, "variants": { "items": { - "$ref": "#/definitions/ExperimentVariantTrendBaseStats" + "$ref": "#/definitions/ExperimentVariantTrendsBaseStats" }, "type": "array" } @@ -9374,18 +9527,51 @@ { "additionalProperties": false, "properties": { + "credible_intervals": { + "additionalProperties": { + "items": { + "type": "number" + }, + "maxItems": 2, + "minItems": 2, + "type": "array" + }, + "type": "object" + }, + "expected_loss": { + "type": "number" + }, "insight": { - "const": "FUNNELS", - "type": "string" + "$ref": "#/definitions/FunnelsQueryResponse" }, - "results": { + "probability": { "additionalProperties": { - "$ref": "#/definitions/ExperimentVariantFunnelResult" + "type": "number" }, "type": "object" + }, + "significance_code": { + "$ref": "#/definitions/ExperimentSignificanceCode" + }, + "significant": { + "type": "boolean" + }, + "variants": { + "items": { + "$ref": "#/definitions/ExperimentVariantFunnelsBaseStats" + }, + "type": "array" } }, - "required": ["insight", "results"], + "required": [ + "credible_intervals", + "expected_loss", + "insight", + "probability", + "significance_code", + "significant", + "variants" + ], "type": "object" }, { @@ -9422,7 +9608,7 @@ }, "variants": { "items": { - "$ref": "#/definitions/ExperimentVariantTrendBaseStats" + "$ref": "#/definitions/ExperimentVariantTrendsBaseStats" }, "type": "array" } @@ -9815,10 +10001,10 @@ "$ref": "#/definitions/ErrorTrackingQuery" }, { - "$ref": "#/definitions/ExperimentFunnelQuery" + "$ref": "#/definitions/ExperimentFunnelsQuery" }, { - "$ref": "#/definitions/ExperimentTrendQuery" + "$ref": "#/definitions/ExperimentTrendsQuery" }, { "$ref": "#/definitions/DataVisualizationNode" diff --git a/frontend/src/queries/schema.ts b/frontend/src/queries/schema.ts index 1970a06d26b7b..1887f57ee0f96 100644 --- a/frontend/src/queries/schema.ts +++ b/frontend/src/queries/schema.ts @@ -100,8 +100,8 @@ export enum NodeKind { WebGoalsQuery = 'WebGoalsQuery', // Experiment queries - ExperimentFunnelQuery = 'ExperimentFunnelQuery', - ExperimentTrendQuery = 'ExperimentTrendQuery', + ExperimentFunnelsQuery = 'ExperimentFunnelsQuery', + ExperimentTrendsQuery = 'ExperimentTrendsQuery', // Database metadata DatabaseSchemaQuery = 'DatabaseSchemaQuery', @@ -133,8 +133,8 @@ export type AnyDataNode = | WebGoalsQuery | SessionAttributionExplorerQuery | ErrorTrackingQuery - | ExperimentFunnelQuery - | ExperimentTrendQuery + | ExperimentFunnelsQuery + | ExperimentTrendsQuery /** * @discriminator kind @@ -161,8 +161,8 @@ export type QuerySchema = | WebGoalsQuery | SessionAttributionExplorerQuery | ErrorTrackingQuery - | ExperimentFunnelQuery - | ExperimentTrendQuery + | ExperimentFunnelsQuery + | ExperimentTrendsQuery // Interface nodes | DataVisualizationNode @@ -610,8 +610,8 @@ export interface DataTableNode | WebGoalsQuery | SessionAttributionExplorerQuery | ErrorTrackingQuery - | ExperimentFunnelQuery - | ExperimentTrendQuery + | ExperimentFunnelsQuery + | ExperimentTrendsQuery )['response'] > >, @@ -631,8 +631,8 @@ export interface DataTableNode | WebGoalsQuery | SessionAttributionExplorerQuery | ErrorTrackingQuery - | ExperimentFunnelQuery - | ExperimentTrendQuery + | ExperimentFunnelsQuery + | ExperimentTrendsQuery /** Columns shown in the table, unless the `source` provides them. */ columns?: HogQLExpression[] /** Columns that aren't shown in the table, even if in columns or returned data */ @@ -1221,6 +1221,7 @@ export interface QueryRequest { */ query: QuerySchema filters_override?: DashboardFilter + variables_override?: Record> } /** @@ -1595,14 +1596,14 @@ export type InsightQueryNode = | StickinessQuery | LifecycleQuery -export interface ExperimentVariantTrendBaseStats { +export interface ExperimentVariantTrendsBaseStats { key: string count: number exposure: number absolute_exposure: number } -export interface ExperimentVariantFunnelResult { +export interface ExperimentVariantFunnelsBaseStats { key: string success_count: number failure_count: number @@ -1616,9 +1617,9 @@ export enum ExperimentSignificanceCode { HighPValue = 'high_p_value', } -export interface ExperimentTrendQueryResponse { +export interface ExperimentTrendsQueryResponse { insight: TrendsQueryResponse - variants: ExperimentVariantTrendBaseStats[] + variants: ExperimentVariantTrendsBaseStats[] probability: Record significant: boolean significance_code: ExperimentSignificanceCode @@ -1626,26 +1627,31 @@ export interface ExperimentTrendQueryResponse { credible_intervals: Record } -export type CachedExperimentTrendQueryResponse = CachedQueryResponse +export type CachedExperimentTrendsQueryResponse = CachedQueryResponse -export interface ExperimentFunnelQueryResponse { - insight: InsightType.FUNNELS - results: Record +export interface ExperimentFunnelsQueryResponse { + insight: FunnelsQueryResponse + variants: ExperimentVariantFunnelsBaseStats[] + probability: Record + significant: boolean + significance_code: ExperimentSignificanceCode + expected_loss: number + credible_intervals: Record } -export type CachedExperimentFunnelQueryResponse = CachedQueryResponse +export type CachedExperimentFunnelsQueryResponse = CachedQueryResponse -export interface ExperimentFunnelQuery extends DataNode { - kind: NodeKind.ExperimentFunnelQuery +export interface ExperimentFunnelsQuery extends DataNode { + kind: NodeKind.ExperimentFunnelsQuery source: FunnelsQuery experiment_id: integer } -export interface ExperimentTrendQuery extends DataNode { - kind: NodeKind.ExperimentTrendQuery +export interface ExperimentTrendsQuery extends DataNode { + kind: NodeKind.ExperimentTrendsQuery count_query: TrendsQuery // Defaults to $feature_flag_called if not specified - // https://github.com/PostHog/posthog/blob/master/posthog/hogql_queries/experiments/experiment_trend_query_runner.py + // https://github.com/PostHog/posthog/blob/master/posthog/hogql_queries/experiments/experiment_trends_query_runner.py exposure_query?: TrendsQuery experiment_id: integer } @@ -1974,25 +1980,38 @@ export interface DashboardFilter { properties?: AnyPropertyFilter[] | null } -export interface InsightsThresholdAbsolute { +export interface InsightsThresholdBounds { lower?: number upper?: number } +export enum InsightThresholdType { + ABSOLUTE = 'absolute', + PERCENTAGE = 'percentage', +} + export interface InsightThreshold { - absoluteThreshold?: InsightsThresholdAbsolute - // More types of thresholds or conditions can be added here + type: InsightThresholdType + bounds?: InsightsThresholdBounds +} + +export enum AlertConditionType { + ABSOLUTE_VALUE = 'absolute_value', // default alert, checks absolute value of current interval + RELATIVE_INCREASE = 'relative_increase', // checks increase in value during current interval compared to previous interval + RELATIVE_DECREASE = 'relative_decrease', // checks decrease in value during current interval compared to previous interval } export interface AlertCondition { // Conditions in addition to the separate threshold // TODO: Think about things like relative thresholds, rate of change, etc. + type: AlertConditionType } export enum AlertState { FIRING = 'Firing', NOT_FIRING = 'Not firing', ERRORED = 'Errored', + SNOOZED = 'Snoozed', } export enum AlertCalculationInterval { diff --git a/frontend/src/scenes/activity/explore/EventDetails.tsx b/frontend/src/scenes/activity/explore/EventDetails.tsx index 8226a7289e266..d96b6d67702d0 100644 --- a/frontend/src/scenes/activity/explore/EventDetails.tsx +++ b/frontend/src/scenes/activity/explore/EventDetails.tsx @@ -26,6 +26,7 @@ export function EventDetails({ event, tableProps }: EventDetailsProps): JSX.Elem const displayedEventProperties: Properties = {} const visibleSystemProperties: Properties = {} + const featureFlagProperties: Properties = {} let systemPropsCount = 0 for (const key of Object.keys(event.properties)) { if (CORE_FILTER_DEFINITIONS_BY_GROUP.events[key] && CORE_FILTER_DEFINITIONS_BY_GROUP.events[key].system) { @@ -35,7 +36,11 @@ export function EventDetails({ event, tableProps }: EventDetailsProps): JSX.Elem } } if (!CORE_FILTER_DEFINITIONS_BY_GROUP.events[key] || !CORE_FILTER_DEFINITIONS_BY_GROUP.events[key].system) { - displayedEventProperties[key] = event.properties[key] + if (key.startsWith('$feature') || key === '$active_feature_flags') { + featureFlagProperties[key] = event.properties[key] + } else { + displayedEventProperties[key] = event.properties[key] + } } } @@ -99,5 +104,25 @@ export function EventDetails({ event, tableProps }: EventDetailsProps): JSX.Elem }) } + if (Object.keys(featureFlagProperties).length > 0) { + tabs.push({ + key: 'feature_flags', + label: 'Feature flags', + content: ( +
+ +
+ ), + }) + } + return } diff --git a/frontend/src/scenes/dashboard/Dashboard.tsx b/frontend/src/scenes/dashboard/Dashboard.tsx index d6576eb4fe20b..5f9e59ee897de 100644 --- a/frontend/src/scenes/dashboard/Dashboard.tsx +++ b/frontend/src/scenes/dashboard/Dashboard.tsx @@ -12,6 +12,7 @@ import { InsightErrorState } from 'scenes/insights/EmptyStates' import { SceneExport } from 'scenes/sceneTypes' import { urls } from 'scenes/urls' +import { VariablesForDashboard } from '~/queries/nodes/DataVisualization/Components/Variables/Variables' import { DashboardMode, DashboardPlacement, DashboardType, QueryBasedInsightModel } from '~/types' import { DashboardHeader } from './DashboardHeader' @@ -124,6 +125,7 @@ function DashboardScene(): JSX.Element {
)}
+ )} diff --git a/frontend/src/scenes/dashboard/DashboardItems.tsx b/frontend/src/scenes/dashboard/DashboardItems.tsx index bef19ceff2796..1582bcc49480b 100644 --- a/frontend/src/scenes/dashboard/DashboardItems.tsx +++ b/frontend/src/scenes/dashboard/DashboardItems.tsx @@ -27,6 +27,7 @@ export function DashboardItems(): JSX.Element { refreshStatus, canEditDashboard, itemsLoading, + temporaryVariables, } = useValues(dashboardLogic) const { updateLayouts, @@ -152,6 +153,7 @@ export function DashboardItems(): JSX.Element { showDetailsControls={placement != DashboardPlacement.Export} placement={placement} loadPriority={smLayout ? smLayout.y * 1000 + smLayout.x : undefined} + variablesOverride={temporaryVariables} {...commonTileProps} /> ) diff --git a/frontend/src/scenes/dashboard/dashboardLogic.test.ts b/frontend/src/scenes/dashboard/dashboardLogic.test.ts index cb60271cae3bf..81dac66ad0317 100644 --- a/frontend/src/scenes/dashboard/dashboardLogic.test.ts +++ b/frontend/src/scenes/dashboard/dashboardLogic.test.ts @@ -306,7 +306,7 @@ describe('dashboardLogic', () => { jest.spyOn(api, 'update') await expectLogic(logic, () => { - logic.actions.updateFiltersAndLayouts() + logic.actions.updateFiltersAndLayoutsAndVariables() }).toFinishAllListeners() expect(api.update).toHaveBeenCalledWith(`api/environments/${MOCK_TEAM_ID}/dashboards/5`, { @@ -329,6 +329,7 @@ describe('dashboardLogic', () => { date_to: null, properties: [], }, + variables: {}, }) }) }) diff --git a/frontend/src/scenes/dashboard/dashboardLogic.tsx b/frontend/src/scenes/dashboard/dashboardLogic.tsx index 17b478a1338dc..4addf1f04f4c0 100644 --- a/frontend/src/scenes/dashboard/dashboardLogic.tsx +++ b/frontend/src/scenes/dashboard/dashboardLogic.tsx @@ -23,6 +23,7 @@ import { Link } from 'lib/lemon-ui/Link' import { featureFlagLogic } from 'lib/logic/featureFlagLogic' import { clearDOMTextSelection, isAbortedRequest, shouldCancelQuery, toParams, uuid } from 'lib/utils' import { DashboardEventSource, eventUsageLogic } from 'lib/utils/eventUsageLogic' +import uniqBy from 'lodash.uniqby' import { Layout, Layouts } from 'react-grid-layout' import { calculateLayouts } from 'scenes/dashboard/tileLayouts' import { Scene } from 'scenes/sceneTypes' @@ -31,9 +32,11 @@ import { userLogic } from 'scenes/userLogic' import { dashboardsModel } from '~/models/dashboardsModel' import { insightsModel } from '~/models/insightsModel' +import { variableDataLogic } from '~/queries/nodes/DataVisualization/Components/Variables/variableDataLogic' +import { Variable } from '~/queries/nodes/DataVisualization/types' import { getQueryBasedDashboard, getQueryBasedInsightModel } from '~/queries/nodes/InsightViz/utils' import { pollForResults } from '~/queries/query' -import { DashboardFilter, RefreshType } from '~/queries/schema' +import { DashboardFilter, DataVisualizationNode, HogQLVariable, NodeKind, RefreshType } from '~/queries/schema' import { AnyPropertyFilter, Breadcrumb, @@ -139,7 +142,8 @@ async function getSingleInsight( queryId: string, refresh: RefreshType, methodOptions?: ApiMethodOptions, - filtersOverride?: DashboardFilter + filtersOverride?: DashboardFilter, + variablesOverride?: Record ): Promise { const apiUrl = `api/environments/${currentTeamId}/insights/${insight.id}/?${toParams({ refresh, @@ -147,6 +151,7 @@ async function getSingleInsight( client_query_id: queryId, session_id: currentSessionId(), ...(filtersOverride ? { filters_override: filtersOverride } : {}), + ...(variablesOverride ? { variables_override: variablesOverride } : {}), })}` const insightResponse: Response = await api.getResponse(apiUrl, methodOptions) const legacyInsight: InsightModel | null = await getJSONOrNull(insightResponse) @@ -156,7 +161,7 @@ async function getSingleInsight( export const dashboardLogic = kea([ path(['scenes', 'dashboard', 'dashboardLogic']), connect(() => ({ - values: [teamLogic, ['currentTeamId'], featureFlagLogic, ['featureFlags']], + values: [teamLogic, ['currentTeamId'], featureFlagLogic, ['featureFlags'], variableDataLogic, ['variables']], logic: [dashboardsModel, insightsModel, eventUsageLogic], })), @@ -169,7 +174,7 @@ export const dashboardLogic = kea([ return props.id }), - actions({ + actions(({ values }) => ({ loadDashboard: (payload: { refresh?: RefreshType action: @@ -201,7 +206,10 @@ export const dashboardLogic = kea([ date_to, }), setProperties: (properties: AnyPropertyFilter[] | null) => ({ properties }), - setFiltersAndLayouts: (filters: DashboardFilter) => ({ filters }), + setFiltersAndLayoutsAndVariables: (filters: DashboardFilter, variables: Record) => ({ + filters, + variables, + }), setAutoRefresh: (enabled: boolean, interval: number) => ({ enabled, interval }), setRefreshStatus: (shortId: InsightShortId, loading = false, queued = false) => ({ shortId, loading, queued }), setRefreshStatuses: (shortIds: InsightShortId[], loading = false, queued = false) => ({ @@ -233,8 +241,14 @@ export const dashboardLogic = kea([ setInitialLoadResponseBytes: (responseBytes: number) => ({ responseBytes }), abortQuery: (payload: { dashboardQueryId: string; queryId: string; queryStartTime: number }) => payload, abortAnyRunningQuery: true, - updateFiltersAndLayouts: true, - }), + updateFiltersAndLayoutsAndVariables: true, + overrideVariableValue: (variableId: string, value: any) => ({ + variableId, + value, + allVariables: values.variables, + }), + resetVariables: () => ({ variables: values.insightVariables }), + })), loaders(({ actions, props, values }) => ({ dashboard: [ @@ -248,7 +262,8 @@ export const dashboardLogic = kea([ try { const apiUrl = values.apiUrl( refresh || 'async', - action === 'preview' ? values.temporaryFilters : undefined + action === 'preview' ? values.temporaryFilters : undefined, + action === 'preview' ? values.temporaryVariables : undefined ) const dashboardResponse: Response = await api.getResponse(apiUrl) const dashboard: DashboardType | null = await getJSONOrNull(dashboardResponse) @@ -282,7 +297,7 @@ export const dashboardLogic = kea([ throw error } }, - updateFiltersAndLayouts: async (_, breakpoint) => { + updateFiltersAndLayoutsAndVariables: async (_, breakpoint) => { actions.abortAnyRunningQuery() try { @@ -297,6 +312,7 @@ export const dashboardLogic = kea([ `api/environments/${values.currentTeamId}/dashboards/${props.id}`, { filters: values.filters, + variables: values.insightVariables, tiles: layoutsToUpdate, } ) @@ -432,6 +448,48 @@ export const dashboardLogic = kea([ }, }, ], + temporaryVariables: [ + {} as Record, + { + overrideVariableValue: (state, { variableId, value, allVariables }) => { + const foundExistingVar = allVariables.find((n) => n.id === variableId) + if (!foundExistingVar) { + return state + } + + return { + ...state, + [variableId]: { code_name: foundExistingVar.code_name, variableId: foundExistingVar.id, value }, + } + }, + resetVariables: (_, { variables }) => ({ ...variables }), + loadDashboardSuccess: (state, { dashboard, payload }) => + dashboard + ? { + ...state, + // don't update filters if we're previewing + ...(payload?.action === 'preview' ? {} : dashboard.variables ?? {}), + } + : state, + }, + ], + insightVariables: [ + {} as Record, + { + setFiltersAndLayoutsAndVariables: (state, { variables }) => ({ + ...state, + ...variables, + }), + loadDashboardSuccess: (state, { dashboard, payload }) => + dashboard + ? { + ...state, + // don't update filters if we're previewing + ...(payload?.action === 'preview' ? {} : dashboard.variables ?? {}), + } + : state, + }, + ], temporaryFilters: [ { date_from: null, @@ -466,7 +524,7 @@ export const dashboardLogic = kea([ properties: null, } as DashboardFilter, { - setFiltersAndLayouts: (state, { filters }) => ({ + setFiltersAndLayoutsAndVariables: (state, { filters }) => ({ ...state, ...filters, }), @@ -689,6 +747,44 @@ export const dashboardLogic = kea([ ], })), selectors(() => ({ + dashboardVariables: [ + (s) => [s.dashboard, s.variables, s.temporaryVariables], + ( + dashboard: DashboardType, + allVariables: Variable[], + temporaryVariables: Record + ): Variable[] => { + const dataVizNodes = dashboard.tiles + .map((n) => n.insight?.query) + .filter((n) => n?.kind === NodeKind.DataVisualizationNode) + .filter((n): n is DataVisualizationNode => Boolean(n)) + const hogQLVariables = dataVizNodes + .map((n) => n.source.variables) + .filter((n): n is Record => Boolean(n)) + .flatMap((n) => Object.values(n)) + + const uniqueVars = uniqBy(hogQLVariables, (n) => n.variableId) + return uniqueVars + .map((v) => { + const foundVar = allVariables.find((n) => n.id === v.variableId) + + if (!foundVar) { + return null + } + + const overridenValue = temporaryVariables[v.variableId]?.value + + // Overwrite the variable `value` from the insight + const resultVar: Variable = { + ...foundVar, + value: overridenValue ?? v.value ?? foundVar.value, + } + + return resultVar + }) + .filter((n): n is Variable => Boolean(n)) + }, + ], asDashboardTemplate: [ (s) => [s.dashboard], (dashboard: DashboardType): DashboardTemplateEditorType | undefined => { @@ -731,10 +827,15 @@ export const dashboardLogic = kea([ apiUrl: [ () => [(_, props) => props.id], (id) => { - return (refresh?: RefreshType, filtersOverride?: DashboardFilter) => + return ( + refresh?: RefreshType, + filtersOverride?: DashboardFilter, + variablesOverride?: Record + ) => `api/environments/${teamLogic.values.currentTeamId}/dashboards/${id}/?${toParams({ refresh, filters_override: filtersOverride, + variables_override: variablesOverride, })}` }, ], @@ -947,7 +1048,7 @@ export const dashboardLogic = kea([ }, })), listeners(({ actions, values, cache, props, sharedListeners }) => ({ - updateFiltersAndLayoutsSuccess: () => { + updateFiltersAndLayoutsAndVariablesSuccess: () => { actions.loadDashboard({ action: 'update' }) }, setRefreshError: sharedListeners.reportRefreshTiming, @@ -1046,7 +1147,10 @@ export const dashboardLogic = kea([ insight, dashboardId, uuid(), - 'force_async' + 'force_async', + undefined, + undefined, + values.temporaryVariables ) dashboardsModel.actions.updateDashboardInsight(refreshedInsight!) // Start polling for results @@ -1138,7 +1242,8 @@ export const dashboardLogic = kea([ queryId, 'force_cache', methodOptions, - action === 'preview' ? values.temporaryFilters : undefined + action === 'preview' ? values.temporaryFilters : undefined, + action === 'preview' ? values.temporaryVariables : undefined ) if (action === 'preview' && polledInsight!.dashboard_tiles) { @@ -1187,8 +1292,8 @@ export const dashboardLogic = kea([ eventUsageLogic.actions.reportDashboardRefreshed(dashboardId, values.newestRefreshed) }, - setFiltersAndLayouts: ({ filters: { date_from, date_to } }) => { - actions.updateFiltersAndLayouts() + setFiltersAndLayoutsAndVariables: ({ filters: { date_from, date_to } }) => { + actions.updateFiltersAndLayoutsAndVariables() eventUsageLogic.actions.reportDashboardDateRangeChanged(date_from, date_to) eventUsageLogic.actions.reportDashboardPropertiesChanged() }, @@ -1203,12 +1308,13 @@ export const dashboardLogic = kea([ // reset filters to that before previewing actions.setDates(values.filters.date_from ?? null, values.filters.date_to ?? null) actions.setProperties(values.filters.properties ?? null) + actions.resetVariables() // also reset layout to that we stored in dashboardLayouts // this is done in the reducer for dashboard } else if (source === DashboardEventSource.DashboardHeaderSaveDashboard) { // save edit mode changes - actions.setFiltersAndLayouts(values.temporaryFilters) + actions.setFiltersAndLayoutsAndVariables(values.temporaryFilters, values.temporaryVariables) } } @@ -1305,6 +1411,10 @@ export const dashboardLogic = kea([ setDates: () => { actions.loadDashboard({ action: 'preview' }) }, + overrideVariableValue: () => { + actions.setDashboardMode(DashboardMode.Edit, null) + actions.loadDashboard({ action: 'preview' }) + }, })), urlToAction(({ values, actions }) => ({ diff --git a/frontend/src/scenes/data-warehouse/external/dataWarehouseExternalSceneLogic.ts b/frontend/src/scenes/data-warehouse/external/dataWarehouseExternalSceneLogic.ts index 17da8d174d375..77f809d35bfe6 100644 --- a/frontend/src/scenes/data-warehouse/external/dataWarehouseExternalSceneLogic.ts +++ b/frontend/src/scenes/data-warehouse/external/dataWarehouseExternalSceneLogic.ts @@ -104,6 +104,7 @@ export const dataWarehouseExternalSceneLogic = kea {experiment && !isExperimentRunning && (
- setEditExperiment(true)}> - Edit - ([ }, { key: [Scene.Experiment, experimentId], - name: experiment?.name || 'New', - path: urls.experiment(experimentId || 'new'), + name: experiment?.name || '', + onRename: async (name: string) => { + // :KLUDGE: work around a type error when using asyncActions accessed via a callback passed to selectors() + const logic = experimentLogic({ experimentId }) + await logic.asyncActions.updateExperiment({ name }) + }, }, ], ], diff --git a/frontend/src/scenes/feature-flags/FeatureFlag.tsx b/frontend/src/scenes/feature-flags/FeatureFlag.tsx index 4e219519fd837..21bd124d956c9 100644 --- a/frontend/src/scenes/feature-flags/FeatureFlag.tsx +++ b/frontend/src/scenes/feature-flags/FeatureFlag.tsx @@ -638,7 +638,7 @@ function UsageTab({ featureFlag }: { id: string; featureFlag: FeatureFlagType }) ) { enrichUsageDashboard() } - }, [dashboard]) + }, [dashboard, hasEnrichedAnalytics, enrichUsageDashboard]) const propertyFilter: AnyPropertyFilter[] = [ { diff --git a/frontend/src/scenes/feature-flags/featureFlagLogic.ts b/frontend/src/scenes/feature-flags/featureFlagLogic.ts index db2811ef1f44b..65026ccfd3453 100644 --- a/frontend/src/scenes/feature-flags/featureFlagLogic.ts +++ b/frontend/src/scenes/feature-flags/featureFlagLogic.ts @@ -38,6 +38,7 @@ import { FilterType, InsightModel, InsightType, + JsonType, MultivariateFlagOptions, MultivariateFlagVariant, NewEarlyAccessFeatureType, @@ -133,9 +134,11 @@ export const variantKeyToIndexFeatureFlagPayloads = (flag: FeatureFlagType): Fea return flag } - const newPayloads = {} + const newPayloads: Record = {} flag.filters.multivariate?.variants.forEach((variant, index) => { - newPayloads[index] = flag.filters.payloads?.[variant.key] + if (flag.filters.payloads?.[variant.key] !== undefined) { + newPayloads[index] = flag.filters.payloads[variant.key] + } }) return { ...flag, @@ -148,11 +151,10 @@ export const variantKeyToIndexFeatureFlagPayloads = (flag: FeatureFlagType): Fea const indexToVariantKeyFeatureFlagPayloads = (flag: Partial): Partial => { if (flag.filters?.multivariate) { - const newPayloads = {} - flag.filters?.multivariate?.variants.forEach(({ key }, index) => { - const payload = flag.filters?.payloads?.[index] - if (payload) { - newPayloads[key] = payload + const newPayloads: Record = {} + flag.filters.multivariate.variants.forEach(({ key }, index) => { + if (flag.filters?.payloads?.[index] !== undefined) { + newPayloads[key] = flag.filters.payloads[index] } }) return { @@ -319,6 +321,22 @@ export const featureFlagLogic = kea([ } const variants = [...(state.filters.multivariate?.variants || [])] variants.splice(index, 1) + + const currentPayloads = { ...state.filters.payloads } + const newPayloads: Record = {} + + // TRICKY: In addition to modifying the variant array, we also need to shift the payload indices + // because the variant array is being modified and we need to make sure that the payloads object + // stays in sync with the variant array. + Object.keys(currentPayloads).forEach((key) => { + const payloadIndex = parseInt(key) + if (payloadIndex > index) { + newPayloads[payloadIndex - 1] = currentPayloads[payloadIndex] + } else if (payloadIndex < index) { + newPayloads[payloadIndex] = currentPayloads[payloadIndex] + } + }) + return { ...state, filters: { @@ -327,6 +345,7 @@ export const featureFlagLogic = kea([ ...state.filters.multivariate, variants, }, + payloads: newPayloads, }, } }, @@ -642,7 +661,7 @@ export const featureFlagLogic = kea([ createScheduledChange: async () => { const { scheduledChangeOperation, scheduleDateMarker, currentTeamId, schedulePayload } = values - const fields = { + const fields: Record = { [ScheduledChangeOperationType.UpdateStatus]: 'active', [ScheduledChangeOperationType.AddReleaseCondition]: 'filters', } diff --git a/frontend/src/scenes/insights/Insight.tsx b/frontend/src/scenes/insights/Insight.tsx index 6e4786a66fe11..f6c4c77bbcf0a 100644 --- a/frontend/src/scenes/insights/Insight.tsx +++ b/frontend/src/scenes/insights/Insight.tsx @@ -21,14 +21,18 @@ export interface InsightSceneProps { export function Insight({ insightId }: InsightSceneProps): JSX.Element { // insightSceneLogic - const { insightMode, insight, filtersOverride } = useValues(insightSceneLogic) + const { insightMode, insight, filtersOverride, variablesOverride } = useValues(insightSceneLogic) // insightLogic const logic = insightLogic({ dashboardItemId: insightId || 'new', // don't use cached insight if we have filtersOverride - cachedInsight: isObject(filtersOverride) && insight?.short_id === insightId ? insight : null, + cachedInsight: + (isObject(filtersOverride) || isObject(variablesOverride)) && insight?.short_id === insightId + ? insight + : null, filtersOverride, + variablesOverride, }) const { insightProps } = useValues(logic) @@ -52,13 +56,16 @@ export function Insight({ insightId }: InsightSceneProps): JSX.Element {
- {isObject(filtersOverride) && ( + {(isObject(filtersOverride) || isObject(variablesOverride)) && (
- You are viewing this insight with filters from a dashboard + + You are viewing this insight with{' '} + {isObject(variablesOverride) ? 'variables' : 'filters'} from a dashboard + - Discard dashboard filters + Discard dashboard {isObject(variablesOverride) ? 'variables' : 'filters'}
@@ -83,6 +90,7 @@ export function Insight({ insightId }: InsightSceneProps): JSX.Element { insightProps, }} filtersOverride={filtersOverride} + variablesOverride={variablesOverride} />
diff --git a/frontend/src/scenes/insights/insightDataLogic.tsx b/frontend/src/scenes/insights/insightDataLogic.tsx index e06b13c197592..168a9160bb966 100644 --- a/frontend/src/scenes/insights/insightDataLogic.tsx +++ b/frontend/src/scenes/insights/insightDataLogic.tsx @@ -32,6 +32,8 @@ export const insightDataLogic = kea([ dataNodeLogic({ key: insightVizDataNodeKey(props), loadPriority: props.loadPriority, + filtersOverride: props.filtersOverride, + variablesOverride: props.variablesOverride, } as DataNodeLogicProps), [ 'query as insightQuery', diff --git a/frontend/src/scenes/insights/insightLogic.tsx b/frontend/src/scenes/insights/insightLogic.tsx index cc04937a42ad5..a299d639fee1d 100644 --- a/frontend/src/scenes/insights/insightLogic.tsx +++ b/frontend/src/scenes/insights/insightLogic.tsx @@ -21,7 +21,7 @@ import { dashboardsModel } from '~/models/dashboardsModel' import { groupsModel } from '~/models/groupsModel' import { insightsModel } from '~/models/insightsModel' import { tagsModel } from '~/models/tagsModel' -import { DashboardFilter, Node } from '~/queries/schema' +import { DashboardFilter, HogQLVariable, Node } from '~/queries/schema' import { InsightLogicProps, InsightShortId, ItemMode, QueryBasedInsightModel, SetInsightOptions } from '~/types' import { teamLogic } from '../teamLogic' @@ -77,9 +77,14 @@ export const insightLogic: LogicWrapper = kea ({ redirectToViewMode }), saveInsightSuccess: true, saveInsightFailure: true, - loadInsight: (shortId: InsightShortId, filtersOverride?: DashboardFilter | null) => ({ + loadInsight: ( + shortId: InsightShortId, + filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null + ) => ({ shortId, filtersOverride, + variablesOverride, }), updateInsight: (insightUpdate: Partial, callback?: () => void) => ({ insightUpdate, @@ -96,9 +101,15 @@ export const insightLogic: LogicWrapper = kea { + loadInsight: async ({ shortId, filtersOverride, variablesOverride }, breakpoint) => { await breakpoint(100) - const insight = await insightsApi.getByShortId(shortId, undefined, 'async', filtersOverride) + const insight = await insightsApi.getByShortId( + shortId, + undefined, + 'async', + filtersOverride, + variablesOverride + ) if (!insight) { throw new Error(`Insight with shortId ${shortId} not found`) @@ -417,7 +428,11 @@ export const insightLogic: LogicWrapper = kea([ insightMode: ItemMode, itemId: string | undefined, alertId: AlertType['id'] | undefined, + filtersOverride: DashboardFilter | undefined, + variablesOverride: Record | undefined, dashboardId: DashboardType['id'] | undefined, - dashboardName: DashboardType['name'] | undefined, - filtersOverride: DashboardFilter | undefined + dashboardName: DashboardType['name'] | undefined ) => ({ insightId, insightMode, @@ -61,6 +62,7 @@ export const insightSceneLogic = kea([ dashboardId, dashboardName, filtersOverride, + variablesOverride, }), setInsightLogicRef: (logic: BuiltLogic | null, unmount: null | (() => void)) => ({ logic, @@ -122,6 +124,13 @@ export const insightSceneLogic = kea([ setSceneState: (_, { filtersOverride }) => (filtersOverride !== undefined ? filtersOverride : null), }, ], + variablesOverride: [ + null as null | Record, + { + setSceneState: (_, { variablesOverride }) => + variablesOverride !== undefined ? variablesOverride : null, + }, + ], insightLogicRef: [ null as null | { logic: BuiltLogic @@ -222,7 +231,11 @@ export const insightSceneLogic = kea([ const oldRef = values.insightLogicRef // free old logic after mounting new one const oldRef2 = values.insightDataLogicRef // free old logic after mounting new one if (insightId) { - const insightProps = { dashboardItemId: insightId, filtersOverride: values.filtersOverride } + const insightProps = { + dashboardItemId: insightId, + filtersOverride: values.filtersOverride, + variablesOverride: values.variablesOverride, + } const logic = insightLogic.build(insightProps) const unmount = logic.mount() @@ -242,7 +255,11 @@ export const insightSceneLogic = kea([ oldRef2.unmount() } } else if (insightId) { - values.insightLogicRef?.logic.actions.loadInsight(insightId as InsightShortId, values.filtersOverride) + values.insightLogicRef?.logic.actions.loadInsight( + insightId as InsightShortId, + values.filtersOverride, + values.variablesOverride + ) } }, })), @@ -294,18 +311,20 @@ export const insightSceneLogic = kea([ insightMode !== values.insightMode || itemId !== values.itemId || alert_id !== values.alertId || + !objectsEqual(searchParams['variables_override'], values.variablesOverride) || + !objectsEqual(filtersOverride, values.filtersOverride) || dashboard !== values.dashboardId || - dashboardName !== values.dashboardName || - !objectsEqual(filtersOverride, values.filtersOverride) + dashboardName !== values.dashboardName ) { actions.setSceneState( insightId, insightMode, itemId, alert_id, + filtersOverride, + searchParams['variables_override'], dashboard, - dashboardName, - filtersOverride + dashboardName ) } diff --git a/frontend/src/scenes/insights/utils/api.ts b/frontend/src/scenes/insights/utils/api.ts index cca500ab600d4..2d85cc71f702c 100644 --- a/frontend/src/scenes/insights/utils/api.ts +++ b/frontend/src/scenes/insights/utils/api.ts @@ -1,7 +1,7 @@ import api from 'lib/api' import { getQueryBasedInsightModel } from '~/queries/nodes/InsightViz/utils' -import { DashboardFilter, RefreshType } from '~/queries/schema' +import { DashboardFilter, HogQLVariable, RefreshType } from '~/queries/schema' import { InsightShortId, QueryBasedInsightModel } from '~/types' async function _perform( @@ -20,9 +20,16 @@ export const insightsApi = { shortId: InsightShortId, basic?: boolean, refresh?: RefreshType, - filtersOverride?: DashboardFilter | null + filtersOverride?: DashboardFilter | null, + variablesOverride?: Record | null ): Promise { - const legacyInsights = await api.insights.loadInsight(shortId, basic, refresh, filtersOverride) + const legacyInsights = await api.insights.loadInsight( + shortId, + basic, + refresh, + filtersOverride, + variablesOverride + ) if (legacyInsights.results.length === 0) { return null } diff --git a/frontend/src/scenes/insights/utils/queryUtils.ts b/frontend/src/scenes/insights/utils/queryUtils.ts index abfc46ba28eda..005093406c10a 100644 --- a/frontend/src/scenes/insights/utils/queryUtils.ts +++ b/frontend/src/scenes/insights/utils/queryUtils.ts @@ -17,8 +17,21 @@ import { ChartDisplayType } from '~/types' type CompareQueryOpts = { ignoreVisualizationOnlyChanges: boolean } export const getVariablesFromQuery = (query: string): string[] => { - const queryVariableMatches = /\{variables\.([a-z0-9_]+)\}/gm.exec(query) - return (queryVariableMatches ?? []).filter(Boolean) + const re = /\{variables\.([a-z0-9_]+)\}/gm + const results: string[] = [] + + for (;;) { + const reResult = re.exec(query) + if (!reResult) { + break + } + + if (reResult[1]) { + results.push(reResult[1]) + } + } + + return results } export const compareQuery = (a: Node, b: Node, opts?: CompareQueryOpts): boolean => { diff --git a/frontend/src/scenes/notebooks/Nodes/NotebookNodeQuery.tsx b/frontend/src/scenes/notebooks/Nodes/NotebookNodeQuery.tsx index 38b48e7512036..be59069b7d665 100644 --- a/frontend/src/scenes/notebooks/Nodes/NotebookNodeQuery.tsx +++ b/frontend/src/scenes/notebooks/Nodes/NotebookNodeQuery.tsx @@ -2,7 +2,7 @@ import { Query } from '~/queries/Query/Query' import { DataTableNode, InsightQueryNode, InsightVizNode, NodeKind, QuerySchema } from '~/queries/schema' import { createPostHogWidgetNode } from 'scenes/notebooks/Nodes/NodeWrapper' import { InsightLogicProps, InsightShortId, NotebookNodeType } from '~/types' -import { useActions, useMountedLogic, useValues } from 'kea' +import { BindLogic, useActions, useMountedLogic, useValues } from 'kea' import { useEffect, useMemo } from 'react' import { notebookNodeLogic } from './notebookNodeLogic' import { NotebookNodeProps, NotebookNodeAttributeProperties } from '../Notebook/utils' @@ -35,9 +35,11 @@ const Component = ({ const { expanded } = useValues(nodeLogic) const { setTitlePlaceholder } = useActions(nodeLogic) const summarizeInsight = useSummarizeInsight() - const { insightName } = useValues( - insightLogic({ dashboardItemId: query.kind === NodeKind.SavedInsightNode ? query.shortId : 'new' }) - ) + + const insightLogicProps = { + dashboardItemId: query.kind === NodeKind.SavedInsightNode ? query.shortId : ('new' as const), + } + const { insightName } = useValues(insightLogic(insightLogicProps)) useEffect(() => { let title = 'Query' @@ -96,19 +98,21 @@ const Component = ({ return (
- { - updateAttributes({ - query: { - ...attributes.query, - source: (t as DataTableNode | InsightVizNode).source, - } as QuerySchema, - }) - }} - /> + + { + updateAttributes({ + query: { + ...attributes.query, + source: (t as DataTableNode | InsightVizNode).source, + } as QuerySchema, + }) + }} + /> +
) } diff --git a/frontend/src/scenes/pipeline/hogfunctions/HogFunctionConfiguration.tsx b/frontend/src/scenes/pipeline/hogfunctions/HogFunctionConfiguration.tsx index e0569c8157229..e16f5cadedc7a 100644 --- a/frontend/src/scenes/pipeline/hogfunctions/HogFunctionConfiguration.tsx +++ b/frontend/src/scenes/pipeline/hogfunctions/HogFunctionConfiguration.tsx @@ -13,6 +13,7 @@ import { Link, SpinnerOverlay, } from '@posthog/lemon-ui' +import clsx from 'clsx' import { BindLogic, useActions, useValues } from 'kea' import { Form } from 'kea-forms' import { NotFound } from 'lib/components/NotFound' @@ -340,89 +341,97 @@ export function HogFunctionConfiguration({ templateId, id }: { templateId?: stri
- {showSource ? ( - <> - } - size="small" - type="secondary" - className="my-4" - onClick={() => { - setConfigurationValue('inputs_schema', [ - ...(configuration.inputs_schema ?? []), - { - type: 'string', - key: `input_${ - (configuration.inputs_schema?.length ?? 0) + 1 - }`, - label: '', - required: false, - }, - ]) - }} - > - Add input variable - - - {({ value, onChange }) => ( - <> -
- Function source code - setShowSource(false)} - > - Hide source code - -
- - This is the underlying Hog code that will run whenever the - filters match.{' '} - See the docs{' '} - for more info - - onChange(v ?? '')} - globals={globalsWithInputs} - options={{ - minimap: { - enabled: false, - }, - wordWrap: 'on', - scrollBeyondLastLine: false, - automaticLayout: true, - fixedOverflowWidgets: true, - suggest: { - showInlineDetails: true, - }, - quickSuggestionsDelay: 300, - }} - /> - - )} -
- + } + size="small" + type="secondary" + className="my-4" + onClick={() => { + setConfigurationValue('inputs_schema', [ + ...(configuration.inputs_schema ?? []), + { + type: 'string', + key: `input_${(configuration.inputs_schema?.length ?? 0) + 1}`, + label: '', + required: false, + }, + ]) + }} + > + Add input variable + + ) : null} +
+
+ +
+
+
+

Edit source

+ {!showSource ?

Click here to edit the function's source code

: null} +
+ + {!showSource ? ( + setShowSource(true)} + disabledReason={ + !hasAddon + ? 'Editing the source code requires the Data Pipelines addon' + : undefined + } + > + Edit source code + ) : ( -
- setShowSource(true)} - disabledReason={ - !hasAddon - ? 'Editing the source code requires the Data Pipelines addon' - : undefined - } - > - Show function source code - -
+ setShowSource(false)} + > + Hide source code + )}
+ + {showSource ? ( + + {({ value, onChange }) => ( + <> + + This is the underlying Hog code that will run whenever the filters + match. See the docs{' '} + for more info + + onChange(v ?? '')} + globals={globalsWithInputs} + options={{ + minimap: { + enabled: false, + }, + wordWrap: 'on', + scrollBeyondLastLine: false, + automaticLayout: true, + fixedOverflowWidgets: true, + suggest: { + showInlineDetails: true, + }, + quickSuggestionsDelay: 300, + }} + /> + + )} + + ) : null}
{id ? : } diff --git a/frontend/src/scenes/saved-insights/SavedInsights.tsx b/frontend/src/scenes/saved-insights/SavedInsights.tsx index 4810a04db4bdf..554f0be7f5c45 100644 --- a/frontend/src/scenes/saved-insights/SavedInsights.tsx +++ b/frontend/src/scenes/saved-insights/SavedInsights.tsx @@ -338,14 +338,14 @@ export const QUERY_TYPES_METADATA: Record = { icon: IconVideoCamera, inMenu: false, }, - [NodeKind.ExperimentTrendQuery]: { - name: 'Experiment Result', + [NodeKind.ExperimentTrendsQuery]: { + name: 'Experiment Trends Result', description: 'View experiment trend result', icon: IconFlask, inMenu: false, }, - [NodeKind.ExperimentFunnelQuery]: { - name: 'Experiment Funnel', + [NodeKind.ExperimentFunnelsQuery]: { + name: 'Experiment Funnels Result', description: 'View experiment funnel result', icon: IconFlask, inMenu: false, diff --git a/frontend/src/scenes/settings/environment/SessionRecordingSettings.tsx b/frontend/src/scenes/settings/environment/SessionRecordingSettings.tsx index dce1fbe08efad..e666ec6d20bef 100644 --- a/frontend/src/scenes/settings/environment/SessionRecordingSettings.tsx +++ b/frontend/src/scenes/settings/environment/SessionRecordingSettings.tsx @@ -1,8 +1,9 @@ -import { IconPlus } from '@posthog/icons' +import { IconPencil, IconPlus, IconTrash } from '@posthog/icons' import { LemonBanner, LemonButton, LemonDialog, + LemonInput, LemonSegmentedButton, LemonSegmentedButtonOption, LemonSelect, @@ -11,23 +12,32 @@ import { Link, Spinner, } from '@posthog/lemon-ui' +import clsx from 'clsx' import { useActions, useValues } from 'kea' +import { Form } from 'kea-forms' import { AuthorizedUrlList } from 'lib/components/AuthorizedUrlList/AuthorizedUrlList' import { AuthorizedUrlListType } from 'lib/components/AuthorizedUrlList/authorizedUrlListLogic' import { EventSelect } from 'lib/components/EventSelect/EventSelect' +import { FlaggedFeature } from 'lib/components/FlaggedFeature' import { FlagSelector } from 'lib/components/FlagSelector' import { PayGateMini } from 'lib/components/PayGateMini/PayGateMini' import { PropertySelect } from 'lib/components/PropertySelect/PropertySelect' import { TaxonomicFilterGroupType } from 'lib/components/TaxonomicFilter/types' -import { SESSION_REPLAY_MINIMUM_DURATION_OPTIONS } from 'lib/constants' +import { FEATURE_FLAGS, SESSION_REPLAY_MINIMUM_DURATION_OPTIONS } from 'lib/constants' import { IconCancel, IconSelectEvents } from 'lib/lemon-ui/icons' +import { LemonField } from 'lib/lemon-ui/LemonField' import { LemonLabel } from 'lib/lemon-ui/LemonLabel/LemonLabel' import { objectsEqual } from 'lib/utils' -import { sessionReplayLinkedFlagLogic } from 'scenes/settings/environment/sessionReplayLinkedFlagLogic' +import { sessionReplayIngestionControlLogic } from 'scenes/settings/environment/sessionReplayIngestionControlLogic' import { teamLogic } from 'scenes/teamLogic' import { userLogic } from 'scenes/userLogic' -import { AvailableFeature, MultivariateFlagOptions, SessionRecordingAIConfig } from '~/types' +import { + AvailableFeature, + MultivariateFlagOptions, + SessionRecordingAIConfig, + SessionReplayUrlTriggerConfig, +} from '~/types' function LogCaptureSettings(): JSX.Element { const { updateCurrentTeam } = useActions(teamLogic) @@ -259,9 +269,8 @@ function LinkedFlagSelector(): JSX.Element | null { const featureFlagRecordingFeatureEnabled = hasAvailableFeature(AvailableFeature.REPLAY_FEATURE_FLAG_BASED_RECORDING) - const logic = sessionReplayLinkedFlagLogic({ id: currentTeam?.session_recording_linked_flag?.id || null }) - const { linkedFlag, featureFlagLoading, flagHasVariants } = useValues(logic) - const { selectFeatureFlag } = useActions(logic) + const { linkedFlag, featureFlagLoading, flagHasVariants } = useValues(sessionReplayIngestionControlLogic) + const { selectFeatureFlag } = useActions(sessionReplayIngestionControlLogic) if (!featureFlagRecordingFeatureEnabled) { return null @@ -330,6 +339,123 @@ function LinkedFlagSelector(): JSX.Element | null { ) } +function UrlTriggerForm(): JSX.Element { + const { cancelProposingUrlTrigger } = useActions(sessionReplayIngestionControlLogic) + const { isProposedUrlTriggerSubmitting } = useValues(sessionReplayIngestionControlLogic) + + return ( +
+
+ + + + + + +
+
+ + Cancel + + + Save + +
+ + ) +} + +function UrlTriggerRow({ trigger, index }: { trigger: SessionReplayUrlTriggerConfig; index: number }): JSX.Element { + const { editUrlTriggerIndex } = useValues(sessionReplayIngestionControlLogic) + const { setEditUrlTriggerIndex, removeUrlTrigger } = useActions(sessionReplayIngestionControlLogic) + + if (editUrlTriggerIndex === index) { + return ( +
+ +
+ ) + } + + return ( +
+ + {trigger.matching === 'regex' ? 'Matches regex: ' : ''} {trigger.url} + +
+ } + onClick={() => setEditUrlTriggerIndex(index)} + tooltip="Edit" + center + /> + + } + tooltip="Remove URL trigger" + center + onClick={() => { + LemonDialog.open({ + title: <>Remove URL trigger, + description: `Are you sure you want to remove this URL trigger?`, + primaryButton: { + status: 'danger', + children: 'Remove', + onClick: () => removeUrlTrigger(index), + }, + secondaryButton: { + children: 'Cancel', + }, + }) + }} + /> +
+
+ ) +} + +function UrlTriggerOptions(): JSX.Element | null { + const { isAddUrlTriggerConfigFormVisible, urlTriggerConfig } = useValues(sessionReplayIngestionControlLogic) + const { newUrlTrigger } = useActions(sessionReplayIngestionControlLogic) + + return ( +
+
+ Enable recordings when URL matches + { + newUrlTrigger() + }} + type="secondary" + icon={} + data-attr="session-replay-add-url-trigger" + > + Add + +
+

+ Adding a URL trigger means recording will only be started when the user visits a page that matches the + URL. +

+ + {isAddUrlTriggerConfigFormVisible && } + {urlTriggerConfig?.map((trigger, index) => ( + + ))} +
+ ) +} + export function ReplayCostControl(): JSX.Element | null { const { updateCurrentTeam } = useActions(teamLogic) const { currentTeam } = useValues(teamLogic) @@ -484,6 +610,9 @@ export function ReplayCostControl(): JSX.Element | null { )} + + + ) diff --git a/frontend/src/scenes/settings/environment/sessionReplayIngestionControlLogic.ts b/frontend/src/scenes/settings/environment/sessionReplayIngestionControlLogic.ts new file mode 100644 index 0000000000000..b0d2057631f9d --- /dev/null +++ b/frontend/src/scenes/settings/environment/sessionReplayIngestionControlLogic.ts @@ -0,0 +1,158 @@ +import { actions, afterMount, connect, kea, listeners, path, props, reducers, selectors, sharedListeners } from 'kea' +import { forms } from 'kea-forms' +import { loaders } from 'kea-loaders' +import { subscriptions } from 'kea-subscriptions' +import api from 'lib/api' +import { isObject } from 'lib/utils' +import { variantKeyToIndexFeatureFlagPayloads } from 'scenes/feature-flags/featureFlagLogic' +import { teamLogic } from 'scenes/teamLogic' + +import { FeatureFlagBasicType, SessionReplayUrlTriggerConfig, TeamPublicType, TeamType } from '~/types' + +import type { sessionReplayIngestionControlLogicType } from './sessionReplayIngestionControlLogicType' + +const NEW_URL_TRIGGER = { url: '', matching: 'regex' } + +export const sessionReplayIngestionControlLogic = kea([ + path(['scenes', 'settings', 'project', 'sessionReplayIngestionControlLogic']), + actions({ + selectFeatureFlag: (flag: FeatureFlagBasicType) => ({ flag }), + setUrlTriggerConfig: (urlTriggerConfig: SessionReplayUrlTriggerConfig[]) => ({ urlTriggerConfig }), + + addUrlTrigger: (urlTriggerConfig: SessionReplayUrlTriggerConfig) => ({ urlTriggerConfig }), + removeUrlTrigger: (index: number) => ({ index }), + updateUrlTrigger: (index: number, urlTriggerConfig: SessionReplayUrlTriggerConfig) => ({ + index, + urlTriggerConfig, + }), + setEditUrlTriggerIndex: (originalIndex: number | null) => ({ originalIndex }), + newUrlTrigger: true, + cancelProposingUrlTrigger: true, + }), + connect({ values: [teamLogic, ['currentTeam']], actions: [teamLogic, ['updateCurrentTeam']] }), + reducers({ + selectedFlag: [ + null as FeatureFlagBasicType | null, + { + selectFeatureFlag: (_, { flag }) => flag, + }, + ], + urlTriggerConfig: [ + null as SessionReplayUrlTriggerConfig[] | null, + { + setUrlTriggerConfig: (_, { urlTriggerConfig }) => urlTriggerConfig, + addUrlTrigger: (state, { urlTriggerConfig }) => [...(state ?? []), urlTriggerConfig], + updateUrlTrigger: (state, { index, urlTriggerConfig: newUrlTriggerConfig }) => + (state ?? []).map((triggerConfig, i) => (i === index ? newUrlTriggerConfig : triggerConfig)), + removeUrlTrigger: (state, { index }) => { + return (state ?? []).filter((_, i) => i !== index) + }, + }, + ], + editUrlTriggerIndex: [ + null as number | null, + { + setEditUrlTriggerIndex: (_, { originalIndex }) => originalIndex, + removeUrlTrigger: (editUrlTriggerIndex, { index }) => + editUrlTriggerIndex && index < editUrlTriggerIndex + ? editUrlTriggerIndex - 1 + : index === editUrlTriggerIndex + ? null + : editUrlTriggerIndex, + newUrlTrigger: () => -1, + updateUrlTrigger: () => null, + addUrlTrigger: () => null, + cancelProposingUrlTrigger: () => null, + }, + ], + }), + props({}), + loaders(({ values }) => ({ + featureFlag: { + loadFeatureFlag: async () => { + if (values.linkedFeatureFlagId) { + const retrievedFlag = await api.featureFlags.get(values.linkedFeatureFlagId) + return variantKeyToIndexFeatureFlagPayloads(retrievedFlag) + } + return null + }, + }, + })), + selectors({ + linkedFeatureFlagId: [ + (s) => [s.currentTeam], + (currentTeam) => currentTeam?.session_recording_linked_flag?.id || null, + ], + linkedFlag: [ + (s) => [s.featureFlag, s.selectedFlag, s.currentTeam], + // an existing linked flag is loaded from the API, + // a newly chosen flag is selected can be passed in + // the current team is used to ensure that we don't show stale values + // as people change the selection + (featureFlag, selectedFlag, currentTeam) => + currentTeam?.session_recording_linked_flag?.id ? selectedFlag || featureFlag : null, + ], + flagHasVariants: [(s) => [s.linkedFlag], (linkedFlag) => isObject(linkedFlag?.filters.multivariate)], + remoteUrlTriggerConfig: [ + (s) => [s.currentTeam], + (currentTeam) => currentTeam?.session_recording_url_trigger_config, + ], + isAddUrlTriggerConfigFormVisible: [ + (s) => [s.editUrlTriggerIndex], + (editUrlTriggerIndex) => editUrlTriggerIndex === -1, + ], + urlTriggerToEdit: [ + (s) => [s.urlTriggerConfig, s.editUrlTriggerIndex], + (urlTriggerConfig, editUrlTriggerIndex) => { + if ( + editUrlTriggerIndex === null || + editUrlTriggerIndex === -1 || + !urlTriggerConfig?.[editUrlTriggerIndex] + ) { + return NEW_URL_TRIGGER + } + return urlTriggerConfig[editUrlTriggerIndex] + }, + ], + }), + afterMount(({ actions }) => { + actions.loadFeatureFlag() + }), + subscriptions(({ actions }) => ({ + currentTeam: (currentTeam: TeamPublicType | TeamType | null) => { + actions.setUrlTriggerConfig(currentTeam?.session_recording_url_trigger_config ?? []) + }, + })), + forms(({ values, actions }) => ({ + proposedUrlTrigger: { + defaults: { url: '', matching: 'regex' } as SessionReplayUrlTriggerConfig, + submit: async ({ url, matching }) => { + if (values.editUrlTriggerIndex !== null && values.editUrlTriggerIndex >= 0) { + actions.updateUrlTrigger(values.editUrlTriggerIndex, { url, matching }) + } else { + actions.addUrlTrigger({ url, matching }) + } + }, + }, + })), + sharedListeners(({ values }) => ({ + saveUrlTriggers: async () => { + await teamLogic.asyncActions.updateCurrentTeam({ + session_recording_url_trigger_config: values.urlTriggerConfig ?? [], + }) + }, + })), + listeners(({ sharedListeners, actions, values }) => ({ + setEditUrlTriggerIndex: () => { + actions.setProposedUrlTriggerValue('url', values.urlTriggerToEdit.url) + actions.setProposedUrlTriggerValue('matching', values.urlTriggerToEdit.matching) + }, + addUrlTrigger: sharedListeners.saveUrlTriggers, + removeUrlTrigger: sharedListeners.saveUrlTriggers, + updateUrlTrigger: sharedListeners.saveUrlTriggers, + submitProposedUrlTriggerSuccess: () => { + actions.setEditUrlTriggerIndex(null) + actions.resetProposedUrlTrigger() + }, + })), +]) diff --git a/frontend/src/scenes/settings/environment/sessionReplayLinkedFlagLogic.ts b/frontend/src/scenes/settings/environment/sessionReplayLinkedFlagLogic.ts deleted file mode 100644 index b7b36f30cf9f9..0000000000000 --- a/frontend/src/scenes/settings/environment/sessionReplayLinkedFlagLogic.ts +++ /dev/null @@ -1,57 +0,0 @@ -import { actions, afterMount, connect, kea, path, props, reducers, selectors } from 'kea' -import { loaders } from 'kea-loaders' -import api from 'lib/api' -import { isObject } from 'lib/utils' -import { variantKeyToIndexFeatureFlagPayloads } from 'scenes/feature-flags/featureFlagLogic' -import { teamLogic } from 'scenes/teamLogic' - -import { FeatureFlagBasicType } from '~/types' - -import type { sessionReplayLinkedFlagLogicType } from './sessionReplayLinkedFlagLogicType' - -export interface ReplayLinkedFlagLogicProps { - id: number | null -} - -export const sessionReplayLinkedFlagLogic = kea([ - path(['scenes', 'settings', 'project', 'sessionReplayLinkedFlagLogic']), - actions({ - selectFeatureFlag: (flag: FeatureFlagBasicType) => ({ flag }), - }), - connect({ values: [teamLogic, ['currentTeam']] }), - reducers({ - selectedFlag: [ - null as FeatureFlagBasicType | null, - { - selectFeatureFlag: (_, { flag }) => flag, - }, - ], - }), - props({} as ReplayLinkedFlagLogicProps), - loaders(({ props }) => ({ - featureFlag: { - loadFeatureFlag: async () => { - if (props.id) { - const retrievedFlag = await api.featureFlags.get(props.id) - return variantKeyToIndexFeatureFlagPayloads(retrievedFlag) - } - return null - }, - }, - })), - selectors({ - linkedFlag: [ - (s) => [s.featureFlag, s.selectedFlag, s.currentTeam], - // an existing linked flag is loaded from the API, - // a newly chosen flag is selected can be passed in - // the current team is used to ensure that we don't show stale values - // as people change the selection - (featureFlag, selectedFlag, currentTeam) => - currentTeam?.session_recording_linked_flag?.id ? selectedFlag || featureFlag : null, - ], - flagHasVariants: [(s) => [s.linkedFlag], (linkedFlag) => isObject(linkedFlag?.filters.multivariate)], - }), - afterMount(({ actions }) => { - actions.loadFeatureFlag() - }), -]) diff --git a/frontend/src/scenes/surveys/SurveyEdit.tsx b/frontend/src/scenes/surveys/SurveyEdit.tsx index acd242061cc74..32d1636e492d9 100644 --- a/frontend/src/scenes/surveys/SurveyEdit.tsx +++ b/frontend/src/scenes/surveys/SurveyEdit.tsx @@ -889,6 +889,9 @@ export default function SurveyEdit(): JSX.Element { if (newValue === 'once') { setSurveyValue('iteration_count', 0) setSurveyValue('iteration_frequency_days', 0) + } else if (newValue === 'recurring') { + setSurveyValue('iteration_count', 1) + setSurveyValue('iteration_frequency_days', 90) } }} options={[ @@ -899,7 +902,7 @@ export default function SurveyEdit(): JSX.Element { }, { value: 'recurring', - label: 'Repeat on a Schedule', + label: 'Repeat on a schedule', 'data-attr': 'survey-iteration-frequency-days', disabledReason: surveysRecurringScheduleDisabledReason, }, diff --git a/frontend/src/scenes/teamActivityDescriber.tsx b/frontend/src/scenes/teamActivityDescriber.tsx index 024748a186128..7fd8e6cdfe289 100644 --- a/frontend/src/scenes/teamActivityDescriber.tsx +++ b/frontend/src/scenes/teamActivityDescriber.tsx @@ -37,6 +37,17 @@ const teamActionsMapping: Record< ], } }, + session_recording_url_trigger_config(change: ActivityChange | undefined): ChangeMapping | null { + const before = change?.before + const after = change?.after + if (before === null && after === null) { + return null + } + + return { + description: [<>Changed session replay URL triggers], + } + }, capture_console_log_opt_in(change: ActivityChange | undefined): ChangeMapping | null { return { description: [<>{change?.after ? 'enabled' : 'disabled'} console log capture in session replay] } }, diff --git a/frontend/src/scenes/urls.ts b/frontend/src/scenes/urls.ts index 146c561e225e1..05f0372b7f8c8 100644 --- a/frontend/src/scenes/urls.ts +++ b/frontend/src/scenes/urls.ts @@ -3,7 +3,7 @@ import { AlertType } from 'lib/components/Alerts/types' import { getCurrentTeamId } from 'lib/utils/getAppContext' import { ExportOptions } from '~/exporter/types' -import { HogQLFilters, Node } from '~/queries/schema' +import { HogQLFilters, HogQLVariable, Node } from '~/queries/schema' import { ActionType, ActivityTab, @@ -89,8 +89,20 @@ export const urls = { } ).url, insightEdit: (id: InsightShortId): string => `/insights/${id}/edit`, - insightView: (id: InsightShortId, dashboardId?: number): string => - `/insights/${id}${dashboardId !== undefined ? `?dashboard=${dashboardId}` : ''}`, + insightView: ( + id: InsightShortId, + dashboardId?: number, + variablesOverride?: Record + ): string => { + const params = [ + { param: 'dashboard', value: dashboardId }, + { param: 'variables_override', value: variablesOverride }, + ] + .filter((n) => Boolean(n.value)) + .map((n) => `${n.param}=${encodeURIComponent(JSON.stringify(n.value))}`) + .join('&') + return `/insights/${id}${params.length ? `?${params}` : ''}` + }, insightSubcriptions: (id: InsightShortId): string => `/insights/${id}/subscriptions`, insightSubcription: (id: InsightShortId, subscriptionId: string): string => `/insights/${id}/subscriptions/${subscriptionId}`, diff --git a/frontend/src/types.ts b/frontend/src/types.ts index 402ab3b2daa2e..14d0ee73a3e28 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -37,6 +37,7 @@ import type { DatabaseSchemaField, HogQLQuery, HogQLQueryModifiers, + HogQLVariable, InsightVizNode, Node, QueryStatus, @@ -512,6 +513,7 @@ export interface TeamType extends TeamBasicType { autocapture_exceptions_opt_in: boolean autocapture_web_vitals_opt_in?: boolean autocapture_web_vitals_allowed_metrics?: SupportedWebVitalsMetrics[] + session_recording_url_trigger_config?: SessionReplayUrlTriggerConfig[] surveys_opt_in?: boolean heatmaps_opt_in?: boolean autocapture_exceptions_errors_to_ignore: string[] @@ -1823,6 +1825,7 @@ export type DashboardTemplateScope = 'team' | 'global' | 'feature_flag' export interface DashboardType extends DashboardBasicType { tiles: DashboardTile[] filters: DashboardFilter + variables?: Record } export enum TemplateAvailabilityContext { @@ -2661,6 +2664,8 @@ export interface InsightLogicProps { /** Dashboard filters to override the ones in the query */ filtersOverride?: DashboardFilter | null + /** Dashboard variables to override the ones in the query */ + variablesOverride?: Record | null } export interface SetInsightOptions { @@ -4604,3 +4609,8 @@ export type AppMetricsV2RequestParams = { interval?: 'hour' | 'day' | 'week' breakdown_by?: 'name' | 'kind' } + +export type SessionReplayUrlTriggerConfig = { + url: string + matching: 'regex' +} diff --git a/latest_migrations.manifest b/latest_migrations.manifest index 10975c68f9aa3..7e70361e8625e 100644 --- a/latest_migrations.manifest +++ b/latest_migrations.manifest @@ -5,7 +5,7 @@ contenttypes: 0002_remove_content_type_name ee: 0016_rolemembership_organization_member otp_static: 0002_throttling otp_totp: 0002_auto_20190420_0723 -posthog: 0490_dashboard_variables +posthog: 0492_team_session_recording_url_trigger_config sessions: 0001_initial social_django: 0010_uid_db_index two_factor: 0007_auto_20201201_1019 diff --git a/mypy-baseline.txt b/mypy-baseline.txt index 2d1c674469dcb..c1d0be4b08c78 100644 --- a/mypy-baseline.txt +++ b/mypy-baseline.txt @@ -3,6 +3,54 @@ posthog/temporal/common/utils.py:0: note: This is likely because "from_activity" posthog/temporal/common/utils.py:0: error: Argument 2 to "__get__" of "classmethod" has incompatible type "type[HeartbeatType]"; expected "type[Never]" [arg-type] posthog/tasks/exports/ordered_csv_renderer.py:0: error: No return value expected [return-value] posthog/warehouse/models/ssh_tunnel.py:0: error: Incompatible types in assignment (expression has type "NoEncryption", variable has type "BestAvailableEncryption") [assignment] +posthog/temporal/data_imports/pipelines/sql_database/helpers.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Dict entry 2 has incompatible type "Literal['auto']": "None"; expected "Literal['json_response', 'header_link', 'auto', 'single_page', 'cursor', 'offset', 'page_number']": "type[BasePaginator]" [dict-item] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "None", variable has type "AuthConfigBase") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Argument 1 to "get_auth_class" has incompatible type "Literal['bearer', 'api_key', 'http_basic'] | None"; expected "Literal['bearer', 'api_key', 'http_basic']" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Need type annotation for "dependency_graph" [var-annotated] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "None", target has type "ResolvedParam") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible return value type (got "tuple[TopologicalSorter[Any], dict[str, EndpointResource], dict[str, ResolvedParam]]", expected "tuple[Any, dict[str, EndpointResource], dict[str, ResolvedParam | None]]") [return-value] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("str | Endpoint | None") [operator] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type variable "StrOrLiteralStr" of "parse" of "Formatter" cannot be "str | None" [type-var] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None") [operator] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None") [operator] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" is not indexable [index] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" has no attribute "pop" [union-attr] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" is not indexable [index] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "str | None" has no attribute "format" [union-attr] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Argument 1 to "single_entity_path" has incompatible type "str | None"; expected "str" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" has no attribute "items" [union-attr] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "str | None", variable has type "str") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "str | None", variable has type "str") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Statement is unreachable [unreachable] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 0 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, Any]" [dict-item] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 1 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, Any]" [dict-item] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 0 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, ResolveParamConfig | IncrementalParamConfig | Any]" [dict-item] +posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 1 has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "SupportsKeysAndGetItem[str, ResolveParamConfig | IncrementalParamConfig | Any]" [dict-item] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Not all union combinations were tried because there are too many unions [misc] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 2 to "source" has incompatible type "str | None"; expected "str" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 3 to "source" has incompatible type "str | None"; expected "str" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 4 to "source" has incompatible type "int | None"; expected "int" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 6 to "source" has incompatible type "Schema | None"; expected "Schema" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 7 to "source" has incompatible type "Literal['evolve', 'discard_value', 'freeze', 'discard_row'] | TSchemaContractDict | None"; expected "Literal['evolve', 'discard_value', 'freeze', 'discard_row'] | TSchemaContractDict" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 8 to "source" has incompatible type "type[BaseConfiguration] | None"; expected "type[BaseConfiguration]" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "build_resource_dependency_graph" has incompatible type "EndpointResourceBase | None"; expected "EndpointResourceBase" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Incompatible types in assignment (expression has type "list[str] | None", variable has type "list[str]") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "setup_incremental_object" has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "dict[str, Any]" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument "base_url" to "RESTClient" has incompatible type "str | None"; expected "str" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "exclude_keys" has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "Mapping[str, Any]" [arg-type] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Incompatible default for argument "resolved_param" (default has type "ResolvedParam | None", argument has type "ResolvedParam") [assignment] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument "module" to "SourceInfo" has incompatible type Module | None; expected Module [arg-type] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] posthog/utils.py:0: error: No overload variant of "asdict" matches argument type "type[DataclassInstance]" [call-overload] posthog/utils.py:0: note: Possible overload variants: posthog/utils.py:0: note: def asdict(obj: DataclassInstance) -> dict[str, Any] @@ -292,8 +340,8 @@ posthog/hogql/query.py:0: error: Incompatible types in assignment (expression ha posthog/hogql/query.py:0: error: Argument 1 to "get_default_limit_for_context" has incompatible type "LimitContext | None"; expected "LimitContext" [arg-type] posthog/hogql/query.py:0: error: "SelectQuery" has no attribute "select_queries" [attr-defined] posthog/hogql/query.py:0: error: Subclass of "SelectQuery" and "SelectUnionQuery" cannot exist: would have incompatible method signatures [unreachable] -posthog/api/action.py:0: error: Argument 1 to has incompatible type "*tuple[str, ...]"; expected "type[BaseRenderer]" [arg-type] posthog/queries/person_query.py:0: error: Incompatible type for lookup 'pk': (got "str | int | list[str]", expected "str | int") [misc] +posthog/api/action.py:0: error: Argument 1 to has incompatible type "*tuple[str, ...]"; expected "type[BaseRenderer]" [arg-type] posthog/queries/event_query/event_query.py:0: error: Incompatible type for lookup 'pk': (got "str | int | list[str]", expected "str | int") [misc] posthog/hogql_queries/sessions_timeline_query_runner.py:0: error: Statement is unreachable [unreachable] posthog/hogql_queries/hogql_query_runner.py:0: error: Statement is unreachable [unreachable] @@ -383,7 +431,23 @@ posthog/test/test_feature_flag_analytics.py:0: error: Item "None" of "Dashboard posthog/test/test_feature_flag_analytics.py:0: error: Item "None" of "Dashboard | None" has no attribute "tiles" [union-attr] posthog/test/test_feature_flag_analytics.py:0: error: Item "None" of "Dashboard | None" has no attribute "tiles" [union-attr] posthog/test/test_feature_flag_analytics.py:0: error: Item "None" of "Dashboard | None" has no attribute "delete" [union-attr] -posthog/temporal/data_imports/pipelines/sql_database/helpers.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] posthog/tasks/test/test_update_survey_iteration.py:0: error: Item "None" of "FeatureFlag | None" has no attribute "filters" [union-attr] posthog/tasks/test/test_stop_surveys_reached_target.py:0: error: No overload variant of "__sub__" of "datetime" matches argument type "None" [operator] posthog/tasks/test/test_stop_surveys_reached_target.py:0: note: Possible overload variants: @@ -410,12 +474,19 @@ posthog/tasks/exports/test/test_csv_exporter_renders.py:0: error: Item "memoryvi posthog/tasks/exports/test/test_csv_exporter_renders.py:0: error: Item "None" of "bytes | memoryview | None" has no attribute "decode" [union-attr] posthog/tasks/exports/test/test_csv_exporter_renders.py:0: error: Item "memoryview" of "bytes | memoryview | None" has no attribute "decode" [union-attr] posthog/tasks/exports/test/test_csv_exporter_renders.py:0: error: Item "None" of "bytes | memoryview | None" has no attribute "decode" [union-attr] +posthog/management/commands/sync_persons_to_clickhouse.py:0: error: Argument 4 to "create_person_override" has incompatible type "int | None"; expected "int" [arg-type] +posthog/management/commands/sync_persons_to_clickhouse.py:0: error: Argument "group_type_index" to "raw_create_group_ch" has incompatible type "int"; expected "Literal[0, 1, 2, 3, 4]" [arg-type] +posthog/management/commands/migrate_team.py:0: error: Incompatible types in assignment (expression has type "None", variable has type "BatchExport") [assignment] +posthog/management/commands/migrate_team.py:0: error: "BatchExportDestination" has no attribute "exclude_events" [attr-defined] +posthog/management/commands/migrate_team.py:0: error: "BatchExportDestination" has no attribute "include_events" [attr-defined] +posthog/management/commands/fix_future_person_created_at.py:0: error: Argument "version" to "create_person" has incompatible type "int | None"; expected "int" [arg-type] posthog/hogql_queries/test/test_query_runner.py:0: error: Variable "TestQueryRunner" is not valid as a type [valid-type] posthog/hogql_queries/test/test_query_runner.py:0: note: See https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases posthog/hogql_queries/test/test_query_runner.py:0: error: Invalid base class "TestQueryRunner" [misc] posthog/hogql_queries/test/test_hogql_query_runner.py:0: error: Incompatible types in assignment (expression has type "Expr", variable has type "SelectQuery") [assignment] posthog/hogql_queries/test/test_hogql_query_runner.py:0: error: Incompatible types in assignment (expression has type "Expr", variable has type "SelectQuery") [assignment] posthog/hogql_queries/test/test_hogql_query_runner.py:0: error: Incompatible types in assignment (expression has type "Expr", variable has type "SelectQuery") [assignment] +posthog/hogql_queries/test/test_actors_query_runner.py:0: error: Incompatible types in assignment (expression has type "Expr", variable has type "SelectQuery") [assignment] posthog/hogql_queries/legacy_compatibility/test/test_filter_to_query.py:0: error: Need type annotation for "properties_0" (hint: "properties_0: list[] = ...") [var-annotated] posthog/hogql_queries/legacy_compatibility/test/test_filter_to_query.py:0: error: Need type annotation for "properties_3" (hint: "properties_3: dict[, ] = ...") [var-annotated] posthog/hogql_queries/legacy_compatibility/test/test_filter_to_query.py:0: error: Need type annotation for "filter" (hint: "filter: dict[, ] = ...") [var-annotated] @@ -428,6 +499,21 @@ posthog/hogql/test/test_timings.py:0: error: No overload variant of "__setitem__ posthog/hogql/test/test_timings.py:0: note: Possible overload variants: posthog/hogql/test/test_timings.py:0: note: def __setitem__(self, SupportsIndex, int, /) -> None posthog/hogql/test/test_timings.py:0: note: def __setitem__(self, slice, Iterable[int], /) -> None +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "next_join" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | Any | None" has no attribute "constraint" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "constraint_type" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "expr" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "next_join" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | Any | None" has no attribute "constraint" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "constraint_type" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "SelectUnionQueryType" of "SelectQueryType | SelectUnionQueryType | None" has no attribute "columns" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "SelectQueryType | SelectUnionQueryType | None" has no attribute "columns" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] +posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] +posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "table" [union-attr] +posthog/hogql/test/test_resolver.py:0: error: Argument 1 to "clone_expr" has incompatible type "SelectQuery | SelectUnionQuery | Field | Any | None"; expected "Expr" [arg-type] +posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "alias" [union-attr] posthog/hogql/test/test_property.py:0: error: Argument 1 to "_property_to_expr" of "TestProperty" has incompatible type "HogQLPropertyFilter"; expected "PropertyGroup | Property | dict[Any, Any] | list[Any]" [arg-type] posthog/hogql/test/test_printer.py:0: error: Argument 2 to "Database" has incompatible type "int"; expected "WeekStartDay | None" [arg-type] posthog/hogql/test/test_printer.py:0: error: Argument 2 to "Database" has incompatible type "int"; expected "WeekStartDay | None" [arg-type] @@ -456,6 +542,9 @@ posthog/hogql/test/_test_parser.py:0: error: Item "None" of "JoinExpr | None" ha posthog/hogql/test/_test_parser.py:0: error: Item "None" of "JoinExpr | None" has no attribute "table" [union-attr] posthog/hogql/test/_test_parser.py:0: error: Item "None" of "JoinExpr | None" has no attribute "alias" [union-attr] posthog/hogql/test/_test_parser.py:0: error: Item "None" of "JoinExpr | None" has no attribute "table" [union-attr] +posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] +posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] +posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] posthog/hogql/database/schema/event_sessions.py:0: error: Statement is unreachable [unreachable] posthog/heatmaps/test/test_heatmaps_api.py:0: error: "HttpResponse" has no attribute "json" [attr-defined] posthog/heatmaps/test/test_heatmaps_api.py:0: error: "HttpResponse" has no attribute "json" [attr-defined] @@ -503,10 +592,32 @@ posthog/api/organization_feature_flag.py:0: error: Invalid index type "str | Non posthog/api/organization_feature_flag.py:0: error: Invalid index type "str | None" for "dict[str, int]"; expected type "str" [index] posthog/api/organization_feature_flag.py:0: error: Invalid index type "str | None" for "dict[str, int]"; expected type "str" [index] posthog/api/notebook.py:0: error: Incompatible types in assignment (expression has type "int", variable has type "str | None") [assignment] +posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "int", target has type "str") [assignment] +posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "int", target has type "str") [assignment] +posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "dict[str, Collection[str]]", variable has type "StripeSourcePayload") [assignment] +posthog/warehouse/external_data_source/source.py:0: error: Argument 1 to "_create_source" has incompatible type "StripeSourcePayload"; expected "dict[Any, Any]" [arg-type] posthog/warehouse/data_load/validate_schema.py:0: error: Incompatible types in assignment (expression has type "object", variable has type "DataWarehouseCredential | Combinable | None") [assignment] posthog/warehouse/data_load/validate_schema.py:0: error: Incompatible types in assignment (expression has type "object", variable has type "str | int | Combinable") [assignment] posthog/warehouse/data_load/validate_schema.py:0: error: Incompatible types in assignment (expression has type "dict[str, dict[str, str | bool]] | dict[str, str]", variable has type "dict[str, dict[str, str]]") [assignment] posthog/warehouse/data_load/source_templates.py:0: error: Incompatible types in assignment (expression has type "str", variable has type "Type") [assignment] +posthog/warehouse/api/external_data_schema.py:0: error: Incompatible return value type (got "str | None", expected "SyncType | None") [return-value] +posthog/warehouse/api/external_data_schema.py:0: error: Argument 1 to "get_sql_schemas_for_source_type" has incompatible type "str"; expected "Type" [arg-type] +posthog/warehouse/api/external_data_schema.py:0: error: No overload variant of "get" of "dict" matches argument type "str" [call-overload] +posthog/warehouse/api/external_data_schema.py:0: note: Possible overload variants: +posthog/warehouse/api/external_data_schema.py:0: note: def get(self, Type, /) -> dict[str, list[IncrementalField]] | None +posthog/warehouse/api/external_data_schema.py:0: note: def get(self, Type, dict[str, list[IncrementalField]], /) -> dict[str, list[IncrementalField]] +posthog/warehouse/api/external_data_schema.py:0: note: def [_T] get(self, Type, _T, /) -> dict[str, list[IncrementalField]] | _T +posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Argument 1 has incompatible type "str"; expected "Type" [arg-type] +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Incompatible types in assignment (expression has type "list[Any]", variable has type "dict[str, list[tuple[str, str]]]") [assignment] +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: No overload variant of "get" of "dict" matches argument types "str", "tuple[()]" [call-overload] +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: Possible overload variants: +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def get(self, Type, /) -> Sequence[str] | None +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def get(self, Type, Sequence[str], /) -> Sequence[str] +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def [_T] get(self, Type, _T, /) -> Sequence[str] | _T +posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Argument 1 has incompatible type "dict[str, list[tuple[str, str]]]"; expected "list[Any]" [arg-type] posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a return type annotation [no-untyped-def] posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a type annotation [no-untyped-def] posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def] @@ -549,28 +660,6 @@ posthog/queries/trends/test/test_person.py:0: error: Invalid index type "int" fo posthog/queries/trends/test/test_person.py:0: error: "str" has no attribute "get" [attr-defined] posthog/queries/trends/test/test_person.py:0: error: Invalid index type "int" for "_MonkeyPatchedResponse"; expected type "str" [index] posthog/models/test/test_organization_model.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "timedelta" [attr-defined] -posthog/management/commands/sync_persons_to_clickhouse.py:0: error: Argument 4 to "create_person_override" has incompatible type "int | None"; expected "int" [arg-type] -posthog/management/commands/sync_persons_to_clickhouse.py:0: error: Argument "group_type_index" to "raw_create_group_ch" has incompatible type "int"; expected "Literal[0, 1, 2, 3, 4]" [arg-type] -posthog/management/commands/migrate_team.py:0: error: Incompatible types in assignment (expression has type "None", variable has type "BatchExport") [assignment] -posthog/management/commands/migrate_team.py:0: error: "BatchExportDestination" has no attribute "exclude_events" [attr-defined] -posthog/management/commands/migrate_team.py:0: error: "BatchExportDestination" has no attribute "include_events" [attr-defined] -posthog/management/commands/fix_future_person_created_at.py:0: error: Argument "version" to "create_person" has incompatible type "int | None"; expected "int" [arg-type] -posthog/hogql_queries/test/test_actors_query_runner.py:0: error: Incompatible types in assignment (expression has type "Expr", variable has type "SelectQuery") [assignment] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "next_join" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | Any | None" has no attribute "constraint" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "constraint_type" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "expr" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "next_join" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | Any | None" has no attribute "constraint" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinConstraint | Any | None" has no attribute "constraint_type" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "SelectUnionQueryType" of "SelectQueryType | SelectUnionQueryType | None" has no attribute "columns" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "SelectQueryType | SelectUnionQueryType | None" has no attribute "columns" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] -posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] -posthog/hogql/test/test_resolver.py:0: error: "FieldOrTable" has no attribute "fields" [attr-defined] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "table" [union-attr] -posthog/hogql/test/test_resolver.py:0: error: Argument 1 to "clone_expr" has incompatible type "SelectQuery | SelectUnionQuery | Field | Any | None"; expected "Expr" [arg-type] -posthog/hogql/test/test_resolver.py:0: error: Item "None" of "JoinExpr | None" has no attribute "alias" [union-attr] posthog/hogql/test/test_query.py:0: error: Value of type "list[QueryTiming] | None" is not indexable [index] posthog/hogql/test/test_query.py:0: error: Value of type "list[QueryTiming] | None" is not indexable [index] posthog/hogql/test/test_query.py:0: error: Module has no attribute "utc" [attr-defined] @@ -601,14 +690,12 @@ posthog/hogql/test/test_parser_python.py:0: error: Unsupported dynamic base clas posthog/hogql/test/test_parser_cpp.py:0: error: Unsupported dynamic base class "parser_test_factory" [misc] posthog/hogql/test/test_parse_string_python.py:0: error: Unsupported dynamic base class "parse_string_test_factory" [misc] posthog/hogql/test/test_parse_string_cpp.py:0: error: Unsupported dynamic base class "parse_string_test_factory" [misc] -posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] -posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] -posthog/hogql/functions/test/test_cohort.py:0: error: "TestCohort" has no attribute "snapshot" [attr-defined] posthog/hogql/database/test/test_view.py:0: error: Argument "dialect" to "print_ast" has incompatible type "str"; expected "Literal['hogql', 'clickhouse']" [arg-type] posthog/hogql/database/test/test_s3_table.py:0: error: Argument "dialect" to "print_ast" has incompatible type "str"; expected "Literal['hogql', 'clickhouse']" [arg-type] posthog/async_migrations/test/test_runner.py:0: error: Item "None" of "datetime | None" has no attribute "day" [union-attr] posthog/api/test/test_insight.py:0: error: Argument "data" to "get" of "APIClient" has incompatible type "dict[str, object]"; expected "Mapping[str, str | bytes | int | Iterable[str | bytes | int]] | Iterable[tuple[str, str | bytes | int | Iterable[str | bytes | int]]] | None" [arg-type] posthog/api/test/test_insight.py:0: error: Argument "data" to "get" of "APIClient" has incompatible type "dict[str, object]"; expected "Mapping[str, str | bytes | int | Iterable[str | bytes | int]] | Iterable[tuple[str, str | bytes | int | Iterable[str | bytes | int]]] | None" [arg-type] +posthog/api/test/test_insight.py:0: error: Argument "data" to "get" of "APIClient" has incompatible type "dict[str, object]"; expected "Mapping[str, str | bytes | int | Iterable[str | bytes | int]] | Iterable[tuple[str, str | bytes | int | Iterable[str | bytes | int]]] | None" [arg-type] posthog/api/test/test_feature_flag.py:0: error: Item "None" of "Dashboard | None" has no attribute "tiles" [union-attr] posthog/api/test/test_feature_flag.py:0: error: Item "None" of "Dashboard | None" has no attribute "name" [union-attr] posthog/api/test/test_feature_flag.py:0: error: Item "None" of "Dashboard | None" has no attribute "description" [union-attr] @@ -673,18 +760,11 @@ posthog/admin/admins/team_admin.py:0: error: Item "None" of "Project | None" has posthog/admin/admins/team_admin.py:0: error: Item "None" of "Project | None" has no attribute "name" [union-attr] posthog/admin/admins/plugin_admin.py:0: error: Item "None" of "Organization | None" has no attribute "pk" [union-attr] posthog/admin/admins/plugin_admin.py:0: error: Item "None" of "Organization | None" has no attribute "name" [union-attr] -ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseTrendExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] -ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseFunnelExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] -ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseSecondaryExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] -ee/clickhouse/views/experiments.py:0: error: Item "None" of "User | None" has no attribute "email" [union-attr] -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Argument 1 has incompatible type "str"; expected "Type" [arg-type] -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Incompatible types in assignment (expression has type "list[Any]", variable has type "dict[str, list[tuple[str, str]]]") [assignment] -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: No overload variant of "get" of "dict" matches argument types "str", "tuple[()]" [call-overload] -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: Possible overload variants: -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def get(self, Type, /) -> Sequence[str] | None -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def get(self, Type, Sequence[str], /) -> Sequence[str] -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: note: def [_T] get(self, Type, _T, /) -> Sequence[str] | _T -posthog/temporal/data_imports/workflow_activities/create_job_model.py:0: error: Argument 1 has incompatible type "dict[str, list[tuple[str, str]]]"; expected "list[Any]" [arg-type] +posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/temporal/tests/batch_exports/test_batch_exports.py:0: error: TypedDict key must be a string literal; expected one of ("_timestamp", "created_at", "distinct_id", "elements", "elements_chain", ...) [literal-required] posthog/session_recordings/session_recording_api.py:0: error: Argument "team_id" to "get_realtime_snapshots" has incompatible type "int"; expected "str" [arg-type] posthog/session_recordings/session_recording_api.py:0: error: Value of type variable "SupportsRichComparisonT" of "sorted" cannot be "str | None" [type-var] posthog/session_recordings/session_recording_api.py:0: error: Argument 1 to "get" of "dict" has incompatible type "str | None"; expected "str" [arg-type] @@ -695,14 +775,6 @@ posthog/queries/app_metrics/historical_exports.py:0: error: Argument 1 to "loads posthog/api/test/test_decide.py:0: error: Item "None" of "User | None" has no attribute "toolbar_mode" [union-attr] posthog/api/test/test_decide.py:0: error: Item "None" of "User | None" has no attribute "save" [union-attr] posthog/api/test/test_authentication.py:0: error: Module has no attribute "utc" [attr-defined] -posthog/admin/admins/plugin_config_admin.py:0: error: Item "None" of "Team | None" has no attribute "name" [union-attr] -posthog/migrations/0237_remove_timezone_from_teams.py:0: error: Argument 2 to "RunPython" has incompatible type "Callable[[Migration, Any], None]"; expected "_CodeCallable | None" [arg-type] -posthog/migrations/0228_fix_tile_layouts.py:0: error: Argument 2 to "RunPython" has incompatible type "Callable[[Migration, Any], None]"; expected "_CodeCallable | None" [arg-type] -posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "int", target has type "str") [assignment] -posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "int", target has type "str") [assignment] -posthog/warehouse/external_data_source/source.py:0: error: Incompatible types in assignment (expression has type "dict[str, Collection[str]]", variable has type "StripeSourcePayload") [assignment] -posthog/warehouse/external_data_source/source.py:0: error: Argument 1 to "_create_source" has incompatible type "StripeSourcePayload"; expected "dict[Any, Any]" [arg-type] -posthog/api/sharing.py:0: error: Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable) [union-attr] posthog/api/plugin.py:0: error: Item "None" of "Team | None" has no attribute "organization" [union-attr] posthog/api/plugin.py:0: error: Item "None" of "Team | None" has no attribute "id" [union-attr] posthog/api/plugin.py:0: error: Item "None" of "Team | None" has no attribute "organization" [union-attr] @@ -716,107 +788,55 @@ posthog/api/plugin.py:0: error: Incompatible type for "file_size" of "PluginAtta posthog/api/plugin.py:0: error: Item "None" of "IO[Any] | None" has no attribute "read" [union-attr] posthog/api/plugin.py:0: error: Item "None" of "Team | None" has no attribute "organization" [union-attr] posthog/api/plugin.py:0: error: Item "None" of "Team | None" has no attribute "id" [union-attr] -posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/tests/batch_exports/test_run_updates.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/tests/batch_exports/test_batch_exports.py:0: error: TypedDict key must be a string literal; expected one of ("_timestamp", "created_at", "distinct_id", "elements", "elements_chain", ...) [literal-required] -posthog/api/plugin_log_entry.py:0: error: Name "timezone.datetime" is not defined [name-defined] -posthog/api/plugin_log_entry.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "datetime" [attr-defined] -posthog/api/plugin_log_entry.py:0: error: Name "timezone.datetime" is not defined [name-defined] -posthog/api/plugin_log_entry.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "datetime" [attr-defined] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Dict entry 2 has incompatible type "Literal['auto']": "None"; expected "Literal['json_response', 'header_link', 'auto', 'single_page', 'cursor', 'offset', 'page_number']": "type[BasePaginator]" [dict-item] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "None", variable has type "AuthConfigBase") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Argument 1 to "get_auth_class" has incompatible type "Literal['bearer', 'api_key', 'http_basic'] | None"; expected "Literal['bearer', 'api_key', 'http_basic']" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Need type annotation for "dependency_graph" [var-annotated] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "None", target has type "ResolvedParam") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible return value type (got "tuple[TopologicalSorter[Any], dict[str, EndpointResource], dict[str, ResolvedParam]]", expected "tuple[Any, dict[str, EndpointResource], dict[str, ResolvedParam | None]]") [return-value] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("str | Endpoint | None") [operator] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type variable "StrOrLiteralStr" of "parse" of "Formatter" cannot be "str | None" [type-var] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None") [operator] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unsupported right operand type for in ("dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None") [operator] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" is not indexable [index] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" has no attribute "pop" [union-attr] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Value of type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" is not indexable [index] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "str | None" has no attribute "format" [union-attr] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Argument 1 to "single_entity_path" has incompatible type "str | None"; expected "str" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Item "None" of "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None" has no attribute "items" [union-attr] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "str | None", variable has type "str") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Incompatible types in assignment (expression has type "str | None", variable has type "str") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Statement is unreachable [unreachable] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 0 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, Any]" [dict-item] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 1 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, Any]" [dict-item] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 0 has incompatible type "dict[str, Any] | None"; expected "SupportsKeysAndGetItem[str, ResolveParamConfig | IncrementalParamConfig | Any]" [dict-item] -posthog/temporal/data_imports/pipelines/rest_source/config_setup.py:0: error: Unpacked dict entry 1 has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "SupportsKeysAndGetItem[str, ResolveParamConfig | IncrementalParamConfig | Any]" [dict-item] +posthog/admin/admins/plugin_config_admin.py:0: error: Item "None" of "Team | None" has no attribute "name" [union-attr] +ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseTrendExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] +ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseFunnelExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] +ee/clickhouse/views/experiments.py:0: error: Argument 4 to "ClickhouseSecondaryExperimentResult" has incompatible type "datetime | None"; expected "datetime" [arg-type] +ee/clickhouse/views/experiments.py:0: error: Item "None" of "User | None" has no attribute "email" [union-attr] posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py:0: error: Need type annotation for "_execute_calls" (hint: "_execute_calls: list[] = ...") [var-annotated] posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py:0: error: Need type annotation for "_execute_async_calls" (hint: "_execute_async_calls: list[] = ...") [var-annotated] posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py:0: error: Need type annotation for "_cursors" (hint: "_cursors: list[] = ...") [var-annotated] posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py:0: error: List item 0 has incompatible type "tuple[str, str, int, int, int, int, str, int]"; expected "tuple[str, str, int, int, str, str, str, str]" [list-item] posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py:0: error: "tuple[Any, ...]" has no attribute "last_uploaded_part_timestamp" [attr-defined] posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py:0: error: "tuple[Any, ...]" has no attribute "upload_state" [attr-defined] +posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "job_type" to "PipelineInputs" has incompatible type "str"; expected "Type" [arg-type] +posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "source_type" to "sql_source_for_type" has incompatible type "str"; expected "Type" [arg-type] +posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "source_type" to "sql_source_for_type" has incompatible type "str"; expected "Type" [arg-type] +posthog/migrations/0237_remove_timezone_from_teams.py:0: error: Argument 2 to "RunPython" has incompatible type "Callable[[Migration, Any], None]"; expected "_CodeCallable | None" [arg-type] +posthog/migrations/0228_fix_tile_layouts.py:0: error: Argument 2 to "RunPython" has incompatible type "Callable[[Migration, Any], None]"; expected "_CodeCallable | None" [arg-type] +posthog/api/query.py:0: error: Statement is unreachable [unreachable] +posthog/api/plugin_log_entry.py:0: error: Name "timezone.datetime" is not defined [name-defined] +posthog/api/plugin_log_entry.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "datetime" [attr-defined] +posthog/api/plugin_log_entry.py:0: error: Name "timezone.datetime" is not defined [name-defined] +posthog/api/plugin_log_entry.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "datetime" [attr-defined] posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py:0: error: Incompatible types in assignment (expression has type "str | int", variable has type "int") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Not all union combinations were tried because there are too many unions [misc] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 2 to "source" has incompatible type "str | None"; expected "str" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 3 to "source" has incompatible type "str | None"; expected "str" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 4 to "source" has incompatible type "int | None"; expected "int" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 6 to "source" has incompatible type "Schema | None"; expected "Schema" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 7 to "source" has incompatible type "Literal['evolve', 'discard_value', 'freeze', 'discard_row'] | TSchemaContractDict | None"; expected "Literal['evolve', 'discard_value', 'freeze', 'discard_row'] | TSchemaContractDict" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 8 to "source" has incompatible type "type[BaseConfiguration] | None"; expected "type[BaseConfiguration]" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "build_resource_dependency_graph" has incompatible type "EndpointResourceBase | None"; expected "EndpointResourceBase" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Incompatible types in assignment (expression has type "list[str] | None", variable has type "list[str]") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "setup_incremental_object" has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "dict[str, Any]" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument "base_url" to "RESTClient" has incompatible type "str | None"; expected "str" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument 1 to "exclude_keys" has incompatible type "dict[str, ResolveParamConfig | IncrementalParamConfig | Any] | None"; expected "Mapping[str, Any]" [arg-type] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Incompatible default for argument "resolved_param" (default has type "ResolvedParam | None", argument has type "ResolvedParam") [assignment] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/rest_source/__init__.py:0: error: Argument "module" to "SourceInfo" has incompatible type Module | None; expected Module [arg-type] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/zendesk/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/vitally/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/pipelines/stripe/__init__.py:0: error: Unused "type: ignore" comment [unused-ignore] +posthog/api/sharing.py:0: error: Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable) [union-attr] +posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] +posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] +posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] +posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] +posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] posthog/api/test/batch_exports/conftest.py:0: error: Signature of "run" incompatible with supertype "Worker" [override] posthog/api/test/batch_exports/conftest.py:0: note: Superclass: posthog/api/test/batch_exports/conftest.py:0: note: def run(self) -> Coroutine[Any, Any, None] posthog/api/test/batch_exports/conftest.py:0: note: Subclass: posthog/api/test/batch_exports/conftest.py:0: note: def run(self, loop: Any) -> Any posthog/api/test/batch_exports/conftest.py:0: error: Argument "activities" to "ThreadedWorker" has incompatible type "list[function]"; expected "Sequence[Callable[..., Any]]" [arg-type] -posthog/warehouse/api/external_data_schema.py:0: error: Incompatible return value type (got "str | None", expected "SyncType | None") [return-value] -posthog/warehouse/api/external_data_schema.py:0: error: Argument 1 to "get_sql_schemas_for_source_type" has incompatible type "str"; expected "Type" [arg-type] -posthog/warehouse/api/external_data_schema.py:0: error: No overload variant of "get" of "dict" matches argument type "str" [call-overload] -posthog/warehouse/api/external_data_schema.py:0: note: Possible overload variants: -posthog/warehouse/api/external_data_schema.py:0: note: def get(self, Type, /) -> dict[str, list[IncrementalField]] | None -posthog/warehouse/api/external_data_schema.py:0: note: def get(self, Type, dict[str, list[IncrementalField]], /) -> dict[str, list[IncrementalField]] -posthog/warehouse/api/external_data_schema.py:0: note: def [_T] get(self, Type, _T, /) -> dict[str, list[IncrementalField]] | _T -posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/warehouse/api/table.py:0: error: Unused "type: ignore" comment [unused-ignore] -posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "job_type" to "PipelineInputs" has incompatible type "str"; expected "Type" [arg-type] -posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "source_type" to "sql_source_for_type" has incompatible type "str"; expected "Type" [arg-type] -posthog/temporal/data_imports/workflow_activities/import_data.py:0: error: Argument "source_type" to "sql_source_for_type" has incompatible type "str"; expected "Type" [arg-type] +posthog/temporal/tests/data_imports/test_end_to_end.py:0: error: Unused "type: ignore" comment [unused-ignore] posthog/api/test/test_team.py:0: error: "HttpResponse" has no attribute "json" [attr-defined] posthog/api/test/test_team.py:0: error: "HttpResponse" has no attribute "json" [attr-defined] +posthog/api/test/test_capture.py:0: error: Statement is unreachable [unreachable] +posthog/api/test/test_capture.py:0: error: Incompatible return value type (got "_MonkeyPatchedWSGIResponse", expected "HttpResponse") [return-value] +posthog/api/test/test_capture.py:0: error: Module has no attribute "utc" [attr-defined] +posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] +posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] +posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] +posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] posthog/test/test_middleware.py:0: error: Incompatible types in assignment (expression has type "_MonkeyPatchedWSGIResponse", variable has type "_MonkeyPatchedResponse") [assignment] posthog/management/commands/test/test_create_batch_export_from_app.py:0: error: Incompatible return value type (got "dict[str, Collection[str]]", expected "dict[str, str]") [return-value] posthog/management/commands/test/test_create_batch_export_from_app.py:0: error: Incompatible types in assignment (expression has type "dict[str, Collection[str]]", variable has type "dict[str, str]") [assignment] @@ -859,21 +879,3 @@ posthog/api/test/batch_exports/test_update.py:0: error: Value of type "BatchExpo posthog/api/test/batch_exports/test_update.py:0: error: Value of type "BatchExport" is not indexable [index] posthog/api/test/batch_exports/test_update.py:0: error: Value of type "BatchExport" is not indexable [index] posthog/api/test/batch_exports/test_pause.py:0: error: "batch_export_delete_schedule" does not return a value (it only ever returns None) [func-returns-value] -posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] -posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] -posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] -posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] -posthog/temporal/tests/external_data/test_external_data_job.py:0: error: Invalid index type "str" for "dict[Type, Sequence[str]]"; expected type "Type" [index] -posthog/api/test/test_capture.py:0: error: Statement is unreachable [unreachable] -posthog/api/test/test_capture.py:0: error: Incompatible return value type (got "_MonkeyPatchedWSGIResponse", expected "HttpResponse") [return-value] -posthog/api/test/test_capture.py:0: error: Module has no attribute "utc" [attr-defined] -posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] -posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] -posthog/api/test/test_capture.py:0: error: Unpacked dict entry 0 has incompatible type "Collection[str]"; expected "SupportsKeysAndGetItem[str, dict[Never, Never]]" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/api/test/test_capture.py:0: error: Dict entry 0 has incompatible type "str": "float"; expected "str": "int" [dict-item] -posthog/temporal/tests/data_imports/test_end_to_end.py:0: error: Unused "type: ignore" comment [unused-ignore] diff --git a/package.json b/package.json index 70aabb78bdbb5..90d17bccfa39e 100644 --- a/package.json +++ b/package.json @@ -144,6 +144,7 @@ "kea-waitfor": "^0.2.1", "kea-window-values": "^3.0.0", "lodash.merge": "^4.6.2", + "lodash.uniqby": "^4.7.0", "maplibre-gl": "^3.5.1", "md5": "^2.3.0", "monaco-editor": "^0.49.0", @@ -152,7 +153,7 @@ "pmtiles": "^2.11.0", "postcss": "^8.4.31", "postcss-preset-env": "^9.3.0", - "posthog-js": "1.170.1", + "posthog-js": "1.172.0", "posthog-js-lite": "3.0.0", "prettier": "^2.8.8", "prop-types": "^15.7.2", @@ -233,6 +234,7 @@ "@types/jest": "^29.5.12", "@types/jest-image-snapshot": "^6.1.0", "@types/lodash.merge": "^4.6.9", + "@types/lodash.uniqby": "^4.7.9", "@types/md5": "^2.3.0", "@types/node": "^18.11.9", "@types/papaparse": "^5.3.8", diff --git a/plugin-server/src/worker/ingestion/timestamps.ts b/plugin-server/src/worker/ingestion/timestamps.ts index 3cfb3097aa256..bf1e82f4dffdf 100644 --- a/plugin-server/src/worker/ingestion/timestamps.ts +++ b/plugin-server/src/worker/ingestion/timestamps.ts @@ -45,13 +45,21 @@ export function parseEventTimestamp(data: PluginEvent, callback?: IngestionWarni parsedTs = now } - if (!parsedTs.isValid) { - callback?.('ignored_invalid_timestamp', { + const parsedTsOutOfBounds = parsedTs.year < 0 || parsedTs.year > 9999 + if (!parsedTs.isValid || parsedTsOutOfBounds) { + const details: Record = { eventUuid: data['uuid'] ?? '', field: 'timestamp', value: data['timestamp'] ?? '', - reason: parsedTs.invalidExplanation || 'unknown error', - }) + reason: parsedTs.invalidExplanation || (parsedTsOutOfBounds ? 'out of bounds' : 'unknown error'), + } + + if (parsedTsOutOfBounds) { + details['offset'] = data['offset'] + details['parsed_year'] = parsedTs.year + } + + callback?.('ignored_invalid_timestamp', details) return DateTime.utc() } diff --git a/plugin-server/tests/worker/ingestion/timestamps.test.ts b/plugin-server/tests/worker/ingestion/timestamps.test.ts index fae7847df01da..a70844a349ae9 100644 --- a/plugin-server/tests/worker/ingestion/timestamps.test.ts +++ b/plugin-server/tests/worker/ingestion/timestamps.test.ts @@ -145,6 +145,34 @@ describe('parseEventTimestamp()', () => { expect(timestamp.toUTC().toISO()).toEqual('2021-10-29T01:43:54.000Z') }) + it('timestamps adjusted way out of bounds are ignored', () => { + const event = { + offset: 600000000000000, + timestamp: '2021-10-28T01:00:00.000Z', + sent_at: '2021-10-28T01:05:00.000Z', + now: '2021-10-28T01:10:00.000Z', + uuid: new UUIDT(), + } as any as PluginEvent + + const callbackMock = jest.fn() + const timestamp = parseEventTimestamp(event, callbackMock) + expect(callbackMock.mock.calls).toEqual([ + [ + 'ignored_invalid_timestamp', + { + field: 'timestamp', + eventUuid: event.uuid, + offset: 600000000000000, + parsed_year: -16992, + reason: 'out of bounds', + value: '2021-10-28T01:00:00.000Z', + }, + ], + ]) + + expect(timestamp.toUTC().toISO()).toEqual('2020-08-12T01:02:00.000Z') + }) + it('reports timestamp parsing error and fallbacks to DateTime.utc', () => { const event = { team_id: 123, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e94f7aeb01f14..6f4cf091e794e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -253,6 +253,9 @@ dependencies: lodash.merge: specifier: ^4.6.2 version: 4.6.2 + lodash.uniqby: + specifier: ^4.7.0 + version: 4.7.0 maplibre-gl: specifier: ^3.5.1 version: 3.5.1 @@ -278,8 +281,8 @@ dependencies: specifier: ^9.3.0 version: 9.3.0(postcss@8.4.31) posthog-js: - specifier: 1.170.1 - version: 1.170.1 + specifier: 1.172.0 + version: 1.172.0 posthog-js-lite: specifier: 3.0.0 version: 3.0.0 @@ -518,6 +521,9 @@ devDependencies: '@types/lodash.merge': specifier: ^4.6.9 version: 4.6.9 + '@types/lodash.uniqby': + specifier: ^4.7.9 + version: 4.7.9 '@types/node': specifier: ^18.11.9 version: 18.11.9 @@ -8397,6 +8403,12 @@ packages: '@types/lodash': 4.14.188 dev: true + /@types/lodash.uniqby@4.7.9: + resolution: {integrity: sha512-rjrXji/seS6BZJRgXrU2h6FqxRVufsbq/HE0Tx0SdgbtlWr2YmD/M64BlYEYYlaMcpZwy32IYVkMfUMYlPuv0w==} + dependencies: + '@types/lodash': 4.14.188 + dev: true + /@types/lodash@4.14.188: resolution: {integrity: sha512-zmEmF5OIM3rb7SbLCFYoQhO4dGt2FRM9AMkxvA3LaADOF1n8in/zGJlWji9fmafLoNyz+FoL6FE0SLtGIArD7w==} dev: true @@ -10627,6 +10639,11 @@ packages: requiresBuild: true dev: false + /core-js@3.38.1: + resolution: {integrity: sha512-OP35aUorbU3Zvlx7pjsFdu1rGNnD4pgw/CWoYzRY3t2EzoVT7shKHY1dlAy3f41cGIO7ZDPQimhGFTlEYkG/Hw==} + requiresBuild: true + dev: false + /core-util-is@1.0.2: resolution: {integrity: sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==} @@ -15687,6 +15704,10 @@ packages: resolution: {integrity: sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==} dev: false + /lodash.uniqby@4.7.0: + resolution: {integrity: sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==} + dev: false + /lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} @@ -17752,9 +17773,10 @@ packages: resolution: {integrity: sha512-dyajjnfzZD1tht4N7p7iwf7nBnR1MjVaVu+MKr+7gBgA39bn28wizCIJZztZPtHy4PY0YwtSGgwfBCuG/hnHgA==} dev: false - /posthog-js@1.170.1: - resolution: {integrity: sha512-LReG76Sb2c0PlJZT5DSyM8GBU40d1fW/gqukwhjWYD3P8+98GJI8+e2qjVic4MI1PCZUkVPYxbP/bXkZQWhskg==} + /posthog-js@1.172.0: + resolution: {integrity: sha512-3BPOwoWUTOXYaJnqHEXCrkToIAvdcjdevz3VoUtccF0WgyBgHqkv6q0EGVPgBF6yWqkelIiQHJ2/3Pcl8Qf3jA==} dependencies: + core-js: 3.38.1 fflate: 0.4.8 preact: 10.24.3 web-vitals: 4.2.3 diff --git a/posthog/api/alert.py b/posthog/api/alert.py index 19611889c6662..707db62140c4a 100644 --- a/posthog/api/alert.py +++ b/posthog/api/alert.py @@ -16,6 +16,9 @@ from posthog.schema import AlertState from posthog.api.insight import InsightBasicSerializer +from posthog.utils import relative_date_parse +from zoneinfo import ZoneInfo + class ThresholdSerializer(serializers.ModelSerializer): class Meta: @@ -73,6 +76,11 @@ def validate(self, data): return data +class RelativeDateTimeField(serializers.DateTimeField): + def to_internal_value(self, data): + return data + + class AlertSerializer(serializers.ModelSerializer): created_by = UserBasicSerializer(read_only=True) checks = AlertCheckSerializer(many=True, read_only=True) @@ -84,6 +92,7 @@ class AlertSerializer(serializers.ModelSerializer): write_only=True, allow_empty=False, ) + snoozed_until = RelativeDateTimeField(allow_null=True, required=False) class Meta: model = AlertConfiguration @@ -104,6 +113,7 @@ class Meta: "checks", "config", "calculation_interval", + "snoozed_until", ] read_only_fields = [ "id", @@ -149,6 +159,28 @@ def create(self, validated_data: dict) -> AlertConfiguration: return instance def update(self, instance, validated_data): + if "snoozed_until" in validated_data: + snoozed_until_param = validated_data.pop("snoozed_until") + + if snoozed_until_param is None: + instance.state = AlertState.NOT_FIRING + instance.snoozed_until = None + else: + # always store snoozed_until as UTC time + # as we look at current UTC time to check when to run alerts + snoozed_until = relative_date_parse(snoozed_until_param, ZoneInfo("UTC"), increase=True) + instance.state = AlertState.SNOOZED + instance.snoozed_until = snoozed_until + + AlertCheck.objects.create( + alert_configuration=instance, + calculated_value=None, + condition=instance.condition, + targets_notified={}, + state=instance.state, + error=None, + ) + conditions_or_threshold_changed = False threshold_data = validated_data.pop("threshold", None) @@ -183,6 +215,12 @@ def update(self, instance, validated_data): return super().update(instance, validated_data) + def validate_snoozed_until(self, value): + if value is not None and not isinstance(value, str): + raise ValidationError("snoozed_until has to be passed in string format") + + return value + def validate_insight(self, value): if value and not are_alerts_supported_for_insight(value): raise ValidationError("Alerts are not supported for this insight.") diff --git a/posthog/api/dashboards/dashboard.py b/posthog/api/dashboards/dashboard.py index 7541b6f00803b..ca626c0d1a8c2 100644 --- a/posthog/api/dashboards/dashboard.py +++ b/posthog/api/dashboards/dashboard.py @@ -30,7 +30,7 @@ from posthog.models.tagged_item import TaggedItem from posthog.models.user import User from posthog.user_permissions import UserPermissionsSerializerMixin -from posthog.utils import filters_override_requested_by_client +from posthog.utils import filters_override_requested_by_client, variables_override_requested_by_client logger = structlog.get_logger(__name__) @@ -126,6 +126,7 @@ def get_effective_privilege_level(self, dashboard: Dashboard) -> Dashboard.Privi class DashboardSerializer(DashboardBasicSerializer): tiles = serializers.SerializerMethodField() filters = serializers.SerializerMethodField() + variables = serializers.SerializerMethodField() created_by = UserBasicSerializer(read_only=True) use_template = serializers.CharField(write_only=True, allow_blank=True, required=False) use_dashboard = serializers.IntegerField(write_only=True, allow_null=True, required=False) @@ -150,6 +151,7 @@ class Meta: "use_dashboard", "delete_insights", "filters", + "variables", "tags", "tiles", "restriction_level", @@ -164,6 +166,12 @@ def validate_filters(self, value) -> dict: return value + def validate_variables(self, value) -> dict: + if not isinstance(value, dict): + raise serializers.ValidationError("Variables must be a dictionary") + + return value + @monitor(feature=Feature.DASHBOARD, endpoint="dashboard", method="POST") def create(self, validated_data: dict, *args: Any, **kwargs: Any) -> Dashboard: request = self.context["request"] @@ -301,6 +309,12 @@ def update(self, instance: Dashboard, validated_data: dict, *args: Any, **kwargs raise serializers.ValidationError("Filters must be a dictionary") instance.filters = request_filters + request_variables = initial_data.get("variables") + if request_variables: + if not isinstance(request_variables, dict): + raise serializers.ValidationError("Filters must be a dictionary") + instance.variables = request_variables + instance = super().update(instance, validated_data) user = cast(User, self.context["request"].user) @@ -410,6 +424,16 @@ def get_filters(self, dashboard: Dashboard) -> dict: return dashboard.filters + def get_variables(self, dashboard: Dashboard) -> dict: + request = self.context.get("request") + if request: + variables_override = variables_override_requested_by_client(request) + + if variables_override is not None: + return variables_override + + return dashboard.variables + def validate(self, data): if data.get("use_dashboard", None) and data.get("use_template", None): raise serializers.ValidationError("`use_dashboard` and `use_template` cannot be used together") diff --git a/posthog/api/decide.py b/posthog/api/decide.py index 0569e2d5772fe..bcc597474a3ca 100644 --- a/posthog/api/decide.py +++ b/posthog/api/decide.py @@ -10,15 +10,20 @@ from sentry_sdk import capture_exception from statshog.defaults.django import statsd -from posthog.geoip import get_geoip_properties from posthog.api.survey import SURVEY_TARGETING_FLAG_PREFIX -from posthog.api.utils import get_project_id, get_token, hostname_in_allowed_url_list, parse_domain +from posthog.api.utils import ( + get_project_id, + get_token, + hostname_in_allowed_url_list, + parse_domain, +) from posthog.database_healthcheck import DATABASE_FOR_FLAG_MATCHING from posthog.exceptions import ( - UnspecifiedCompressionFallbackParsingError, RequestParsingError, + UnspecifiedCompressionFallbackParsingError, generate_exception_response, ) +from posthog.geoip import get_geoip_properties from posthog.logging.timing import timed from posthog.metrics import LABEL_TEAM_ID from posthog.models import Team, User @@ -265,7 +270,11 @@ def get_decide(request: HttpRequest): response["sessionRecording"] = _session_recording_config_response(request, team, token) if settings.DECIDE_SESSION_REPLAY_QUOTA_CHECK: - from ee.billing.quota_limiting import QuotaLimitingCaches, QuotaResource, list_limited_team_attributes + from ee.billing.quota_limiting import ( + QuotaLimitingCaches, + QuotaResource, + list_limited_team_attributes, + ) limited_tokens_recordings = list_limited_team_attributes( QuotaResource.RECORDINGS, QuotaLimitingCaches.QUOTA_LIMITER_CACHE_KEY @@ -277,6 +286,8 @@ def get_decide(request: HttpRequest): response["surveys"] = True if team.surveys_opt_in else False response["heatmaps"] = True if team.heatmaps_opt_in else False + default_identified_only = team.pk >= settings.DEFAULT_IDENTIFIED_ONLY_TEAM_ID_MIN + response["defaultIdentifiedOnly"] = bool(default_identified_only) site_apps = [] # errors mean the database is unavailable, bail in this case @@ -351,6 +362,7 @@ def _session_recording_config_response(request: HttpRequest, team: Team, token: "minimumDurationMilliseconds": minimum_duration, "linkedFlag": linked_flag, "networkPayloadCapture": team.session_recording_network_payload_capture_config or None, + "urlTriggers": team.session_recording_url_trigger_config, } if isinstance(team.session_replay_config, dict): diff --git a/posthog/api/insight.py b/posthog/api/insight.py index a039b7cb1929b..d1aa643a400a0 100644 --- a/posthog/api/insight.py +++ b/posthog/api/insight.py @@ -60,6 +60,7 @@ from posthog.hogql_queries.apply_dashboard_filters import ( WRAPPER_NODE_KINDS, apply_dashboard_filters_to_dict, + apply_dashboard_variables_to_dict, ) from posthog.hogql_queries.legacy_compatibility.feature_flag import ( hogql_insights_replace_filters, @@ -109,10 +110,11 @@ from posthog.settings import CAPTURE_TIME_TO_SEE_DATA, SITE_URL from posthog.user_permissions import UserPermissionsSerializerMixin from posthog.utils import ( - filters_override_requested_by_client, refresh_requested_by_client, relative_date_parse, str_to_bool, + filters_override_requested_by_client, + variables_override_requested_by_client, ) logger = structlog.get_logger(__name__) @@ -594,12 +596,17 @@ def to_representation(self, instance: Insight): dashboard: Optional[Dashboard] = self.context.get("dashboard") request: Optional[Request] = self.context.get("request") dashboard_filters_override = filters_override_requested_by_client(request) if request else None + dashboard_variables_override = variables_override_requested_by_client(request) if request else None if hogql_insights_replace_filters(instance.team) and ( instance.query is not None or instance.query_from_filters is not None ): query = instance.query or instance.query_from_filters - if dashboard is not None or dashboard_filters_override is not None: + if ( + dashboard is not None + or dashboard_filters_override is not None + or dashboard_variables_override is not None + ): query = apply_dashboard_filters_to_dict( query, ( @@ -611,6 +618,12 @@ def to_representation(self, instance: Insight): ), instance.team, ) + + query = apply_dashboard_variables_to_dict( + query, + dashboard_variables_override or {}, + instance.team, + ) representation["filters"] = {} representation["query"] = query else: @@ -618,7 +631,9 @@ def to_representation(self, instance: Insight): dashboard=dashboard, dashboard_filters_override=dashboard_filters_override ) representation["query"] = instance.get_effective_query( - dashboard=dashboard, dashboard_filters_override=dashboard_filters_override + dashboard=dashboard, + dashboard_filters_override=dashboard_filters_override, + dashboard_variables_override=dashboard_variables_override, ) if "insight" not in representation["filters"] and not representation["query"]: @@ -639,6 +654,7 @@ def insight_result(self, insight: Insight) -> InsightResult: refresh_requested = refresh_requested_by_client(self.context["request"]) execution_mode = execution_mode_from_refresh(refresh_requested) filters_override = filters_override_requested_by_client(self.context["request"]) + variables_override = variables_override_requested_by_client(self.context["request"]) if self.context.get("is_shared", False): execution_mode = shared_insights_execution_mode(execution_mode) @@ -650,6 +666,7 @@ def insight_result(self, insight: Insight) -> InsightResult: execution_mode=execution_mode, user=None if self.context["request"].user.is_anonymous else self.context["request"].user, filters_override=filters_override, + variables_override=variables_override, ) except ExposedHogQLError as e: raise ValidationError(str(e)) diff --git a/posthog/api/insight_variable.py b/posthog/api/insight_variable.py index 85303b4e58c84..8f53a2ea80ed6 100644 --- a/posthog/api/insight_variable.py +++ b/posthog/api/insight_variable.py @@ -1,5 +1,6 @@ from django_filters.rest_framework import DjangoFilterBackend from rest_framework import serializers, viewsets +from rest_framework.exceptions import ValidationError from posthog.api.routing import TeamAndOrgViewSetMixin from posthog.models.insight_variable import InsightVariable @@ -22,6 +23,13 @@ def create(self, validated_data): "".join(n for n in validated_data["name"] if n.isalnum() or n == " ").replace(" ", "_").lower() ) + count = InsightVariable.objects.filter( + team_id=self.context["team_id"], code_name=validated_data["code_name"] + ).count() + + if count > 0: + raise ValidationError("Variable with name already exists") + return InsightVariable.objects.create(**validated_data) diff --git a/posthog/api/query.py b/posthog/api/query.py index 7896e102ca204..1d3bf3f67edda 100644 --- a/posthog/api/query.py +++ b/posthog/api/query.py @@ -28,7 +28,10 @@ from posthog.event_usage import report_user_action from posthog.hogql.ai import PromptUnclear, write_sql_from_prompt from posthog.hogql.errors import ExposedHogQLError -from posthog.hogql_queries.apply_dashboard_filters import apply_dashboard_filters_to_dict +from posthog.hogql_queries.apply_dashboard_filters import ( + apply_dashboard_filters_to_dict, + apply_dashboard_variables_to_dict, +) from posthog.hogql_queries.query_runner import ExecutionMode, execution_mode_from_refresh from posthog.models.user import User from posthog.rate_limit import ( @@ -79,6 +82,14 @@ def create(self, request, *args, **kwargs) -> Response: data.query.model_dump(), data.filters_override.model_dump(), self.team ) # type: ignore + if data.variables_override is not None: + if isinstance(data.query, BaseModel): + query_as_dict = data.query.model_dump() + else: + query_as_dict = data.query + + data.query = apply_dashboard_variables_to_dict(query_as_dict, data.variables_override, self.team) # type: ignore + client_query_id = data.client_query_id or uuid.uuid4().hex execution_mode = execution_mode_from_refresh(data.refresh) response_status: int = status.HTTP_200_OK diff --git a/posthog/api/services/query.py b/posthog/api/services/query.py index be9f6d56bea34..c3ca91cb88ab5 100644 --- a/posthog/api/services/query.py +++ b/posthog/api/services/query.py @@ -18,6 +18,7 @@ from posthog.models import Team, User from posthog.schema import ( DatabaseSchemaQueryResponse, + HogQLVariable, HogQuery, DashboardFilter, HogQLAutocomplete, @@ -35,6 +36,7 @@ def process_query_dict( query_json: dict, *, dashboard_filters_json: Optional[dict] = None, + variables_override_json: Optional[dict] = None, limit_context: Optional[LimitContext] = None, execution_mode: ExecutionMode = ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, user: Optional[User] = None, @@ -44,11 +46,17 @@ def process_query_dict( ) -> dict | BaseModel: model = QuerySchemaRoot.model_validate(query_json) tag_queries(query=query_json) + dashboard_filters = DashboardFilter.model_validate(dashboard_filters_json) if dashboard_filters_json else None + variables_override = ( + [HogQLVariable.model_validate(n) for n in variables_override_json.values()] if variables_override_json else None + ) + return process_query_model( team, model.root, dashboard_filters=dashboard_filters, + variables_override=variables_override, limit_context=limit_context, execution_mode=execution_mode, user=user, @@ -63,6 +71,7 @@ def process_query_model( query: BaseModel, # mypy has problems with unions and isinstance *, dashboard_filters: Optional[DashboardFilter] = None, + variables_override: Optional[list[HogQLVariable]] = None, limit_context: Optional[LimitContext] = None, execution_mode: ExecutionMode = ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, user: Optional[User] = None, @@ -80,6 +89,7 @@ def process_query_model( team, query.source, dashboard_filters=dashboard_filters, + variables_override=variables_override, limit_context=limit_context, execution_mode=execution_mode, user=user, @@ -119,6 +129,8 @@ def process_query_model( else: # Query runner available - it will handle execution as well as caching if dashboard_filters: query_runner.apply_dashboard_filters(dashboard_filters) + if variables_override: + query_runner.apply_variable_overrides(variables_override) result = query_runner.run( execution_mode=execution_mode, user=user, diff --git a/posthog/api/survey.py b/posthog/api/survey.py index b2faf7a419186..ea894a7dd30c0 100644 --- a/posthog/api/survey.py +++ b/posthog/api/survey.py @@ -386,7 +386,7 @@ def update(self, instance: Survey, validated_data): instance.targeting_flag.active = False instance.targeting_flag.save() - iteration_count = validated_data.get("iteration_count") + iteration_count = validated_data.get("iteration_count", None) if ( instance.current_iteration is not None and iteration_count is not None @@ -396,8 +396,9 @@ def update(self, instance: Survey, validated_data): f"Cannot change survey recurrence to {iteration_count}, should be at least {instance.current_iteration}" ) - instance.iteration_count = iteration_count - instance.iteration_frequency_days = validated_data.get("iteration_frequency_days") + if iteration_count is not None: + instance.iteration_count = iteration_count + instance.iteration_frequency_days = validated_data.get("iteration_frequency_days") instance = super().update(instance, validated_data) diff --git a/posthog/api/team.py b/posthog/api/team.py index adbf2786f9a7b..566ec7fad57ed 100644 --- a/posthog/api/team.py +++ b/posthog/api/team.py @@ -103,6 +103,7 @@ class Meta: "session_recording_minimum_duration_milliseconds", "session_recording_linked_flag", "session_recording_network_payload_capture_config", + "session_recording_url_trigger_config", "session_replay_config", "survey_config", "recording_domains", @@ -156,6 +157,7 @@ class Meta: "session_recording_minimum_duration_milliseconds", "session_recording_linked_flag", "session_recording_network_payload_capture_config", + "session_recording_url_trigger_config", "session_replay_config", "survey_config", "effective_membership_level", diff --git a/posthog/api/test/__snapshots__/test_action.ambr b/posthog/api/test/__snapshots__/test_action.ambr index b2dc5ba508946..a55b9fe7296a2 100644 --- a/posthog/api/test/__snapshots__/test_action.ambr +++ b/posthog/api/test/__snapshots__/test_action.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -147,6 +148,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -469,6 +471,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_annotation.ambr b/posthog/api/test/__snapshots__/test_annotation.ambr index 4598de97187a5..6f4850c626049 100644 --- a/posthog/api/test/__snapshots__/test_annotation.ambr +++ b/posthog/api/test/__snapshots__/test_annotation.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -115,6 +116,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -370,6 +372,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_decide.ambr b/posthog/api/test/__snapshots__/test_decide.ambr index 21d18122377ee..7e266f47d2009 100644 --- a/posthog/api/test/__snapshots__/test_decide.ambr +++ b/posthog/api/test/__snapshots__/test_decide.ambr @@ -65,153 +65,73 @@ # --- # name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.10 ''' - SELECT "posthog_hogfunction"."id", - "posthog_hogfunction"."team_id", - "posthog_hogfunction"."name", - "posthog_hogfunction"."description", - "posthog_hogfunction"."created_at", - "posthog_hogfunction"."created_by_id", - "posthog_hogfunction"."deleted", - "posthog_hogfunction"."updated_at", - "posthog_hogfunction"."enabled", - "posthog_hogfunction"."icon_url", - "posthog_hogfunction"."hog", - "posthog_hogfunction"."bytecode", - "posthog_hogfunction"."inputs_schema", - "posthog_hogfunction"."inputs", - "posthog_hogfunction"."encrypted_inputs", - "posthog_hogfunction"."filters", - "posthog_hogfunction"."masking", - "posthog_hogfunction"."template_id", - "posthog_team"."id", - "posthog_team"."uuid", - "posthog_team"."organization_id", - "posthog_team"."project_id", - "posthog_team"."api_token", - "posthog_team"."app_urls", - "posthog_team"."name", - "posthog_team"."slack_incoming_webhook", - "posthog_team"."created_at", - "posthog_team"."updated_at", - "posthog_team"."anonymize_ips", - "posthog_team"."completed_snippet_onboarding", - "posthog_team"."has_completed_onboarding_for", - "posthog_team"."ingested_event", - "posthog_team"."autocapture_opt_out", - "posthog_team"."autocapture_web_vitals_opt_in", - "posthog_team"."autocapture_web_vitals_allowed_metrics", - "posthog_team"."autocapture_exceptions_opt_in", - "posthog_team"."autocapture_exceptions_errors_to_ignore", - "posthog_team"."session_recording_opt_in", - "posthog_team"."session_recording_sample_rate", - "posthog_team"."session_recording_minimum_duration_milliseconds", - "posthog_team"."session_recording_linked_flag", - "posthog_team"."session_recording_network_payload_capture_config", - "posthog_team"."session_replay_config", - "posthog_team"."survey_config", - "posthog_team"."capture_console_log_opt_in", - "posthog_team"."capture_performance_opt_in", - "posthog_team"."surveys_opt_in", - "posthog_team"."heatmaps_opt_in", - "posthog_team"."session_recording_version", - "posthog_team"."signup_token", - "posthog_team"."is_demo", - "posthog_team"."access_control", - "posthog_team"."week_start_day", - "posthog_team"."inject_web_apps", - "posthog_team"."test_account_filters", - "posthog_team"."test_account_filters_default_checked", - "posthog_team"."path_cleaning_filters", - "posthog_team"."timezone", - "posthog_team"."data_attributes", - "posthog_team"."person_display_name_properties", - "posthog_team"."live_events_columns", - "posthog_team"."recording_domains", - "posthog_team"."primary_dashboard_id", - "posthog_team"."extra_settings", - "posthog_team"."modifiers", - "posthog_team"."correlation_config", - "posthog_team"."session_recording_retention_period_days", - "posthog_team"."plugins_opt_in", - "posthog_team"."opt_out_capture", - "posthog_team"."event_names", - "posthog_team"."event_names_with_usage", - "posthog_team"."event_properties", - "posthog_team"."event_properties_with_usage", - "posthog_team"."event_properties_numerical", - "posthog_team"."external_data_workspace_id", - "posthog_team"."external_data_workspace_last_synced_at" - FROM "posthog_hogfunction" - INNER JOIN "posthog_team" ON ("posthog_hogfunction"."team_id" = "posthog_team"."id") - WHERE ("posthog_hogfunction"."team_id" = 2 - AND "posthog_hogfunction"."filters" @> '{"filter_test_accounts": true}'::jsonb) + SELECT "posthog_user"."id", + "posthog_user"."password", + "posthog_user"."last_login", + "posthog_user"."first_name", + "posthog_user"."last_name", + "posthog_user"."is_staff", + "posthog_user"."date_joined", + "posthog_user"."uuid", + "posthog_user"."current_organization_id", + "posthog_user"."current_team_id", + "posthog_user"."email", + "posthog_user"."pending_email", + "posthog_user"."temporary_token", + "posthog_user"."distinct_id", + "posthog_user"."is_email_verified", + "posthog_user"."has_seen_product_intro_for", + "posthog_user"."strapi_id", + "posthog_user"."is_active", + "posthog_user"."theme_mode", + "posthog_user"."partial_notification_settings", + "posthog_user"."anonymize_data", + "posthog_user"."toolbar_mode", + "posthog_user"."hedgehog_config", + "posthog_user"."events_column_config", + "posthog_user"."email_opt_in" + FROM "posthog_user" + WHERE "posthog_user"."id" = 2 + LIMIT 21 ''' # --- # name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.11 ''' - SELECT "posthog_team"."id", - "posthog_team"."uuid", - "posthog_team"."organization_id", - "posthog_team"."project_id", - "posthog_team"."api_token", - "posthog_team"."app_urls", - "posthog_team"."name", - "posthog_team"."slack_incoming_webhook", - "posthog_team"."created_at", - "posthog_team"."updated_at", - "posthog_team"."anonymize_ips", - "posthog_team"."completed_snippet_onboarding", - "posthog_team"."has_completed_onboarding_for", - "posthog_team"."ingested_event", - "posthog_team"."autocapture_opt_out", - "posthog_team"."autocapture_web_vitals_opt_in", - "posthog_team"."autocapture_web_vitals_allowed_metrics", - "posthog_team"."autocapture_exceptions_opt_in", - "posthog_team"."autocapture_exceptions_errors_to_ignore", - "posthog_team"."session_recording_opt_in", - "posthog_team"."session_recording_sample_rate", - "posthog_team"."session_recording_minimum_duration_milliseconds", - "posthog_team"."session_recording_linked_flag", - "posthog_team"."session_recording_network_payload_capture_config", - "posthog_team"."session_replay_config", - "posthog_team"."survey_config", - "posthog_team"."capture_console_log_opt_in", - "posthog_team"."capture_performance_opt_in", - "posthog_team"."surveys_opt_in", - "posthog_team"."heatmaps_opt_in", - "posthog_team"."session_recording_version", - "posthog_team"."signup_token", - "posthog_team"."is_demo", - "posthog_team"."access_control", - "posthog_team"."week_start_day", - "posthog_team"."inject_web_apps", - "posthog_team"."test_account_filters", - "posthog_team"."test_account_filters_default_checked", - "posthog_team"."path_cleaning_filters", - "posthog_team"."timezone", - "posthog_team"."data_attributes", - "posthog_team"."person_display_name_properties", - "posthog_team"."live_events_columns", - "posthog_team"."recording_domains", - "posthog_team"."primary_dashboard_id", - "posthog_team"."extra_settings", - "posthog_team"."modifiers", - "posthog_team"."correlation_config", - "posthog_team"."session_recording_retention_period_days", - "posthog_team"."external_data_workspace_id", - "posthog_team"."external_data_workspace_last_synced_at" - FROM "posthog_team" - WHERE ("posthog_team"."project_id" = 2 - AND "posthog_team"."id" = 2) - LIMIT 21 + SELECT "posthog_featureflag"."id", + "posthog_featureflag"."key", + "posthog_featureflag"."name", + "posthog_featureflag"."filters", + "posthog_featureflag"."rollout_percentage", + "posthog_featureflag"."team_id", + "posthog_featureflag"."created_by_id", + "posthog_featureflag"."created_at", + "posthog_featureflag"."deleted", + "posthog_featureflag"."active", + "posthog_featureflag"."rollback_conditions", + "posthog_featureflag"."performed_rollback", + "posthog_featureflag"."ensure_experience_continuity", + "posthog_featureflag"."usage_dashboard_id", + "posthog_featureflag"."has_enriched_analytics" + FROM "posthog_featureflag" + WHERE ("posthog_featureflag"."active" + AND NOT "posthog_featureflag"."deleted" + AND "posthog_featureflag"."team_id" = 2) ''' # --- # name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.12 ''' - SELECT 1 AS "a" - FROM "posthog_grouptypemapping" - WHERE "posthog_grouptypemapping"."team_id" = 2 - LIMIT 1 + SELECT "posthog_pluginconfig"."id", + "posthog_pluginconfig"."web_token", + "posthog_pluginsourcefile"."updated_at", + "posthog_plugin"."updated_at", + "posthog_pluginconfig"."updated_at" + FROM "posthog_pluginconfig" + INNER JOIN "posthog_plugin" ON ("posthog_pluginconfig"."plugin_id" = "posthog_plugin"."id") + INNER JOIN "posthog_pluginsourcefile" ON ("posthog_plugin"."id" = "posthog_pluginsourcefile"."plugin_id") + WHERE ("posthog_pluginconfig"."enabled" + AND "posthog_pluginsourcefile"."filename" = 'site.ts' + AND "posthog_pluginsourcefile"."status" = 'TRANSPILED' + AND "posthog_pluginconfig"."team_id" = 2) ''' # --- # name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.13 @@ -388,6 +308,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -428,76 +349,6 @@ ''' # --- # name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.4 - ''' - SELECT "posthog_project"."id", - "posthog_project"."organization_id", - "posthog_project"."name", - "posthog_project"."created_at" - FROM "posthog_project" - WHERE "posthog_project"."id" = 2 - LIMIT 21 - ''' -# --- -# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.5 - ''' - SELECT "posthog_team"."id", - "posthog_team"."uuid", - "posthog_team"."organization_id", - "posthog_team"."project_id", - "posthog_team"."api_token", - "posthog_team"."app_urls", - "posthog_team"."name", - "posthog_team"."slack_incoming_webhook", - "posthog_team"."created_at", - "posthog_team"."updated_at", - "posthog_team"."anonymize_ips", - "posthog_team"."completed_snippet_onboarding", - "posthog_team"."has_completed_onboarding_for", - "posthog_team"."ingested_event", - "posthog_team"."autocapture_opt_out", - "posthog_team"."autocapture_web_vitals_opt_in", - "posthog_team"."autocapture_web_vitals_allowed_metrics", - "posthog_team"."autocapture_exceptions_opt_in", - "posthog_team"."autocapture_exceptions_errors_to_ignore", - "posthog_team"."session_recording_opt_in", - "posthog_team"."session_recording_sample_rate", - "posthog_team"."session_recording_minimum_duration_milliseconds", - "posthog_team"."session_recording_linked_flag", - "posthog_team"."session_recording_network_payload_capture_config", - "posthog_team"."session_replay_config", - "posthog_team"."survey_config", - "posthog_team"."capture_console_log_opt_in", - "posthog_team"."capture_performance_opt_in", - "posthog_team"."surveys_opt_in", - "posthog_team"."heatmaps_opt_in", - "posthog_team"."session_recording_version", - "posthog_team"."signup_token", - "posthog_team"."is_demo", - "posthog_team"."access_control", - "posthog_team"."week_start_day", - "posthog_team"."inject_web_apps", - "posthog_team"."test_account_filters", - "posthog_team"."test_account_filters_default_checked", - "posthog_team"."path_cleaning_filters", - "posthog_team"."timezone", - "posthog_team"."data_attributes", - "posthog_team"."person_display_name_properties", - "posthog_team"."live_events_columns", - "posthog_team"."recording_domains", - "posthog_team"."primary_dashboard_id", - "posthog_team"."extra_settings", - "posthog_team"."modifiers", - "posthog_team"."correlation_config", - "posthog_team"."session_recording_retention_period_days", - "posthog_team"."external_data_workspace_id", - "posthog_team"."external_data_workspace_last_synced_at" - FROM "posthog_team" - WHERE ("posthog_team"."project_id" = 2 - AND "posthog_team"."id" = 2) - LIMIT 21 - ''' -# --- -# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.6 ''' SELECT "posthog_organizationmembership"."id", "posthog_organizationmembership"."organization_id", @@ -529,7 +380,7 @@ WHERE "posthog_organizationmembership"."user_id" = 2 ''' # --- -# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.7 +# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.5 ''' SELECT "posthog_organizationmembership"."id", "posthog_organizationmembership"."organization_id", @@ -561,7 +412,7 @@ WHERE "posthog_organizationmembership"."user_id" = 2 ''' # --- -# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.8 +# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.6 ''' SELECT "posthog_team"."id", "posthog_team"."organization_id", @@ -570,9 +421,27 @@ WHERE "posthog_team"."organization_id" IN ('00000000-0000-0000-0000-000000000000'::uuid) ''' # --- -# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.9 +# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.7 ''' - SELECT "posthog_team"."id", + SELECT "posthog_hogfunction"."id", + "posthog_hogfunction"."team_id", + "posthog_hogfunction"."name", + "posthog_hogfunction"."description", + "posthog_hogfunction"."created_at", + "posthog_hogfunction"."created_by_id", + "posthog_hogfunction"."deleted", + "posthog_hogfunction"."updated_at", + "posthog_hogfunction"."enabled", + "posthog_hogfunction"."icon_url", + "posthog_hogfunction"."hog", + "posthog_hogfunction"."bytecode", + "posthog_hogfunction"."inputs_schema", + "posthog_hogfunction"."inputs", + "posthog_hogfunction"."encrypted_inputs", + "posthog_hogfunction"."filters", + "posthog_hogfunction"."masking", + "posthog_hogfunction"."template_id", + "posthog_team"."id", "posthog_team"."uuid", "posthog_team"."organization_id", "posthog_team"."project_id", @@ -596,6 +465,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -621,12 +491,36 @@ "posthog_team"."modifiers", "posthog_team"."correlation_config", "posthog_team"."session_recording_retention_period_days", + "posthog_team"."plugins_opt_in", + "posthog_team"."opt_out_capture", + "posthog_team"."event_names", + "posthog_team"."event_names_with_usage", + "posthog_team"."event_properties", + "posthog_team"."event_properties_with_usage", + "posthog_team"."event_properties_numerical", "posthog_team"."external_data_workspace_id", "posthog_team"."external_data_workspace_last_synced_at" - FROM "posthog_team" - WHERE ("posthog_team"."project_id" = 2 - AND "posthog_team"."id" = 2) - LIMIT 21 + FROM "posthog_hogfunction" + INNER JOIN "posthog_team" ON ("posthog_hogfunction"."team_id" = "posthog_team"."id") + WHERE ("posthog_hogfunction"."team_id" = 2 + AND "posthog_hogfunction"."filters" @> '{"filter_test_accounts": true}'::jsonb) + ''' +# --- +# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.8 + ''' + SELECT 1 AS "a" + FROM "posthog_grouptypemapping" + WHERE "posthog_grouptypemapping"."team_id" = 2 + LIMIT 1 + ''' +# --- +# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.9 + ''' + SELECT "posthog_productintent"."product_type", + "posthog_productintent"."created_at", + "posthog_productintent"."onboarding_completed_at" + FROM "posthog_productintent" + WHERE "posthog_productintent"."team_id" = 2 ''' # --- # name: TestDecide.test_flag_with_behavioural_cohorts @@ -673,6 +567,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -771,6 +666,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -927,6 +823,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1025,6 +922,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1187,6 +1085,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1302,6 +1201,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_early_access_feature.ambr b/posthog/api/test/__snapshots__/test_early_access_feature.ambr index 32efc633244b0..ad603b62d145a 100644 --- a/posthog/api/test/__snapshots__/test_early_access_feature.ambr +++ b/posthog/api/test/__snapshots__/test_early_access_feature.ambr @@ -25,6 +25,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -168,6 +169,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_element.ambr b/posthog/api/test/__snapshots__/test_element.ambr index ad4216e97a499..07e0cd83ae2ae 100644 --- a/posthog/api/test/__snapshots__/test_element.ambr +++ b/posthog/api/test/__snapshots__/test_element.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_feature_flag.ambr b/posthog/api/test/__snapshots__/test_feature_flag.ambr index 874f074cf2d1d..0c17549699ba7 100644 --- a/posthog/api/test/__snapshots__/test_feature_flag.ambr +++ b/posthog/api/test/__snapshots__/test_feature_flag.ambr @@ -466,6 +466,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -675,6 +676,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1044,6 +1046,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1186,6 +1189,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1487,6 +1491,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1605,6 +1610,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1677,6 +1683,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1742,6 +1749,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_insight.ambr b/posthog/api/test/__snapshots__/test_insight.ambr index 41ca56ad62f97..f1d09748e9b6c 100644 --- a/posthog/api/test/__snapshots__/test_insight.ambr +++ b/posthog/api/test/__snapshots__/test_insight.ambr @@ -696,6 +696,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -754,6 +755,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -889,6 +891,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1136,6 +1139,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1289,6 +1293,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1429,6 +1434,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1548,6 +1554,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1702,6 +1709,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1795,6 +1803,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1888,6 +1897,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1953,6 +1963,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr index 08303ddef662f..9dffb44915b9f 100644 --- a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr +++ b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr @@ -106,6 +106,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -219,6 +220,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -312,6 +314,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -377,6 +380,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -521,6 +525,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -586,6 +591,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -679,6 +685,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -744,6 +751,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -902,6 +910,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -990,6 +999,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1435,6 +1445,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2130,6 +2141,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_preflight.ambr b/posthog/api/test/__snapshots__/test_preflight.ambr index 99d3eb4e8de69..970734d175ee4 100644 --- a/posthog/api/test/__snapshots__/test_preflight.ambr +++ b/posthog/api/test/__snapshots__/test_preflight.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/__snapshots__/test_survey.ambr b/posthog/api/test/__snapshots__/test_survey.ambr index 63d70d8867071..d19ead09d4cfd 100644 --- a/posthog/api/test/__snapshots__/test_survey.ambr +++ b/posthog/api/test/__snapshots__/test_survey.ambr @@ -172,6 +172,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr index 9500666ef0f50..2820e9ffea6b9 100644 --- a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr +++ b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -194,6 +195,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -395,6 +397,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -718,6 +721,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1108,6 +1112,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1173,6 +1178,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1342,6 +1348,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1400,6 +1407,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1535,6 +1543,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1652,6 +1661,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1859,6 +1869,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2082,6 +2093,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2175,6 +2187,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2268,6 +2281,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2333,6 +2347,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2430,6 +2445,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2527,6 +2543,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2696,6 +2713,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2811,6 +2829,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2904,6 +2923,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2997,6 +3017,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3062,6 +3083,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3135,6 +3157,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3270,6 +3293,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3387,6 +3411,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3602,6 +3627,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3803,6 +3829,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3966,6 +3993,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4152,6 +4180,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4623,6 +4652,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4741,6 +4771,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4918,6 +4949,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5058,6 +5090,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5162,6 +5195,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5255,6 +5289,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5320,6 +5355,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5385,6 +5421,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5520,6 +5557,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5611,6 +5649,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5676,6 +5715,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5811,6 +5851,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5928,6 +5969,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6123,6 +6165,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6238,6 +6281,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6345,6 +6389,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6438,6 +6483,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6503,6 +6549,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6568,6 +6615,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6703,6 +6751,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6827,6 +6876,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7010,6 +7060,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7100,6 +7151,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7190,6 +7242,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7283,6 +7336,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7376,6 +7430,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7441,6 +7496,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7506,6 +7562,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7655,6 +7712,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7772,6 +7830,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7966,6 +8025,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -8167,6 +8227,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -8348,6 +8409,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -8557,6 +8619,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -8654,6 +8717,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -8831,6 +8895,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -9023,6 +9088,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -9141,6 +9207,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -9318,6 +9385,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -9608,6 +9676,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/dashboards/test_dashboard.py b/posthog/api/test/dashboards/test_dashboard.py index 1b9c98b029f01..ef97a1e6bd64b 100644 --- a/posthog/api/test/dashboards/test_dashboard.py +++ b/posthog/api/test/dashboards/test_dashboard.py @@ -13,6 +13,7 @@ from posthog.constants import AvailableFeature from posthog.hogql_queries.legacy_compatibility.filter_to_query import filter_to_query from posthog.models import Dashboard, DashboardTile, Filter, Insight, Team, User +from posthog.models.insight_variable import InsightVariable from posthog.models.organization import Organization from posthog.models.project import Project from posthog.models.sharing_configuration import SharingConfiguration @@ -1391,3 +1392,52 @@ def test_dashboard_duplication_breakdown_histogram_bin_count_none(self): for item in response["tiles"]: self.assertNotEqual(item.get("dashboard", None), existing_dashboard.pk) + + def test_dashboard_variables(self): + variable = InsightVariable.objects.create( + team=self.team, name="Test 1", code_name="test_1", default_value="some_default_value", type="String" + ) + dashboard = Dashboard.objects.create( + team=self.team, + name="dashboard 1", + created_by=self.user, + variables={ + str(variable.id): { + "code_name": variable.code_name, + "variableId": str(variable.id), + "value": "some override value", + } + }, + ) + insight = Insight.objects.create( + filters={}, + query={ + "kind": "DataVisualizationNode", + "source": { + "kind": "HogQLQuery", + "query": "select {variables.test_1}", + "variables": { + str(variable.id): { + "code_name": variable.code_name, + "variableId": str(variable.id), + } + }, + }, + "chartSettings": {}, + "tableSettings": {}, + }, + team=self.team, + last_refresh=now(), + ) + DashboardTile.objects.create(dashboard=dashboard, insight=insight) + + response_data = self.dashboard_api.get_dashboard(dashboard.pk) + + assert response_data["variables"] is not None + assert isinstance(response_data["variables"], dict) + assert len(response_data["variables"].keys()) == 1 + for key, value in response_data["variables"].items(): + assert key == str(variable.id) + assert value["code_name"] == variable.code_name + assert value["variableId"] == str(variable.id) + assert value["value"] == "some override value" diff --git a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr index d5384e55e25b0..6f71b5ec7d091 100644 --- a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr +++ b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr @@ -57,6 +57,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -147,6 +148,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -421,6 +423,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -523,6 +526,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/api/test/test_alert.py b/posthog/api/test/test_alert.py index e1a1fcaccd836..4c56520f15027 100644 --- a/posthog/api/test/test_alert.py +++ b/posthog/api/test/test_alert.py @@ -6,6 +6,10 @@ from posthog.test.base import APIBaseTest, QueryMatchingTest from posthog.models.team import Team +from posthog.schema import InsightThresholdType, AlertState +from posthog.models import AlertConfiguration +from posthog.models.alert import AlertCheck +from datetime import datetime class TestAlert(APIBaseTest, QueryMatchingTest): @@ -33,7 +37,7 @@ def test_create_and_delete_alert(self) -> None: ], "config": {"type": "TrendsAlertConfig", "series_index": 0}, "name": "alert name", - "threshold": {"configuration": {}}, + "threshold": {"configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}}, "calculation_interval": "daily", } response = self.client.post(f"/api/projects/{self.team.id}/alerts", creation_request) @@ -52,13 +56,14 @@ def test_create_and_delete_alert(self) -> None: "state": "Not firing", "config": {"type": "TrendsAlertConfig", "series_index": 0}, "threshold": { - "configuration": {}, + "configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}, "created_at": mock.ANY, "id": mock.ANY, "name": "", }, "last_checked_at": None, "next_check_at": None, + "snoozed_until": None, } assert response.status_code == status.HTTP_201_CREATED, response.content assert response.json() == expected_alert_json @@ -107,7 +112,7 @@ def test_create_and_list_alert(self) -> None: "subscribed_users": [ self.user.id, ], - "threshold": {"configuration": {}}, + "threshold": {"configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}}, "name": "alert name", } alert = self.client.post(f"/api/projects/{self.team.id}/alerts", creation_request).json() @@ -133,7 +138,7 @@ def test_alert_limit(self) -> None: "subscribed_users": [ self.user.id, ], - "threshold": {"configuration": {}}, + "threshold": {"configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}}, "name": "alert name", } self.client.post(f"/api/projects/{self.team.id}/alerts", creation_request) @@ -151,7 +156,7 @@ def test_alert_is_deleted_on_insight_update(self) -> None: "subscribed_users": [ self.user.id, ], - "threshold": {"configuration": {}}, + "threshold": {"configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}}, "name": "alert name", } alert = self.client.post(f"/api/projects/{self.team.id}/alerts", creation_request).json() @@ -176,3 +181,33 @@ def test_alert_is_deleted_on_insight_update(self) -> None: response = self.client.get(f"/api/projects/{self.team.id}/alerts/{alert['id']}") assert response.status_code == status.HTTP_404_NOT_FOUND + + def test_snooze_alert(self) -> None: + creation_request = { + "insight": self.insight["id"], + "subscribed_users": [ + self.user.id, + ], + "threshold": {"configuration": {"type": InsightThresholdType.ABSOLUTE, "bounds": {}}}, + "name": "alert name", + "state": AlertState.FIRING, + } + + alert = self.client.post(f"/api/projects/{self.team.id}/alerts", creation_request).json() + assert alert["state"] == AlertState.NOT_FIRING + + alert = AlertConfiguration.objects.get(pk=alert["id"]) + alert.state = AlertState.FIRING + alert.save() + + firing_alert = AlertConfiguration.objects.get(pk=alert.id) + assert firing_alert.state == AlertState.FIRING + + resolved_alert = self.client.patch( + f"/api/projects/{self.team.id}/alerts/{firing_alert.id}", {"snoozed_until": datetime.now()} + ).json() + assert resolved_alert["state"] == AlertState.SNOOZED + + # should also create a new alert check with resolution + check = AlertCheck.objects.filter(alert_configuration=firing_alert.id).latest("created_at") + assert check.state == AlertState.SNOOZED diff --git a/posthog/api/test/test_decide.py b/posthog/api/test/test_decide.py index 2bd47011c93c8..3fa1fd394d102 100644 --- a/posthog/api/test/test_decide.py +++ b/posthog/api/test/test_decide.py @@ -108,7 +108,7 @@ def _update_team(self, data, expected_status_code: int = status.HTTP_200_OK): client = Client() client.force_login(self.user) - response = client.patch("/api/projects/@current/", data, content_type="application/json") + response = client.patch("/api/environments/@current/", data, content_type="application/json") self.assertEqual(response.status_code, expected_status_code) client.logout() @@ -166,6 +166,7 @@ def test_user_session_recording_opt_in(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } self.assertEqual(response["supportedCompression"], ["gzip", "gzip-js"]) @@ -185,6 +186,7 @@ def test_user_console_log_opt_in(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } def test_user_performance_opt_in(self, *args): @@ -302,6 +304,26 @@ def test_session_recording_linked_flag_variant(self, *args): response = self._post_decide().json() self.assertEqual(response["sessionRecording"]["linkedFlag"], {"flag": "my-flag", "variant": "test"}) + def test_session_recording_url_trigger_patterns(self, *args): + self._update_team( + { + "session_recording_url_trigger_config": [{"url": "/replay-examples/", "matching": "regex"}], + "session_recording_opt_in": True, + } + ) + + response = self._post_decide(origin="capacitor://localhost:8000/home").json() + assert response["sessionRecording"] == { + "endpoint": "/s/", + "recorderVersion": "v2", + "consoleLogRecordingEnabled": True, + "sampleRate": None, + "linkedFlag": None, + "minimumDurationMilliseconds": None, + "networkPayloadCapture": None, + "urlTriggers": [{"url": "/replay-examples/", "matching": "regex"}], + } + def test_session_recording_network_payload_capture_config(self, *args): # :TRICKY: Test for regression around caching @@ -430,6 +452,7 @@ def test_user_session_recording_opt_in_wildcard_domain(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } self.assertEqual(response["supportedCompression"], ["gzip", "gzip-js"]) @@ -457,6 +480,7 @@ def test_user_session_recording_evil_site(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } def test_user_autocapture_opt_out(self, *args): @@ -491,6 +515,7 @@ def test_user_session_recording_allowed_when_no_permitted_domains_are_set(self, "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } def test_user_session_recording_allowed_for_android(self, *args) -> None: @@ -505,6 +530,7 @@ def test_user_session_recording_allowed_for_android(self, *args) -> None: "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } def test_user_session_recording_allowed_for_ios(self, *args) -> None: @@ -519,6 +545,7 @@ def test_user_session_recording_allowed_for_ios(self, *args) -> None: "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } def test_user_session_recording_allowed_when_permitted_domains_are_not_http_based(self, *args): @@ -538,6 +565,7 @@ def test_user_session_recording_allowed_when_permitted_domains_are_not_http_base "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], } @snapshot_postgres_queries @@ -2901,6 +2929,7 @@ def test_decide_doesnt_error_out_when_database_is_down(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], }, ) self.assertEqual(response["supportedCompression"], ["gzip", "gzip-js"]) @@ -2929,6 +2958,7 @@ def test_decide_doesnt_error_out_when_database_is_down(self, *args): "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], }, ) self.assertEqual(response["supportedCompression"], ["gzip", "gzip-js"]) @@ -3614,7 +3644,7 @@ def _update_team(self, data): client = Client() client.force_login(self.user) - response = client.patch("/api/projects/@current/", data, content_type="application/json") + response = client.patch("/api/environments/@current/", data, content_type="application/json") self.assertEqual(response.status_code, status.HTTP_200_OK) client.logout() @@ -3724,6 +3754,7 @@ def test_decide_doesnt_error_out_when_database_is_down_and_database_check_isnt_c "linkedFlag": None, "minimumDurationMilliseconds": None, "networkPayloadCapture": None, + "urlTriggers": [], }, ) self.assertEqual(response["supportedCompression"], ["gzip", "gzip-js"]) diff --git a/posthog/api/test/test_insight.py b/posthog/api/test/test_insight.py index 3aa7723fb9557..32cd4da9df83f 100644 --- a/posthog/api/test/test_insight.py +++ b/posthog/api/test/test_insight.py @@ -33,6 +33,7 @@ User, ) from posthog.models.insight_caching_state import InsightCachingState +from posthog.models.insight_variable import InsightVariable from posthog.models.project import Project from posthog.schema import ( DataTableNode, @@ -380,6 +381,7 @@ def test_get_insight_in_shared_context(self) -> None: team=self.team, user=mock.ANY, filters_override=None, + variables_override=None, ) with patch( @@ -393,6 +395,7 @@ def test_get_insight_in_shared_context(self) -> None: team=self.team, user=mock.ANY, filters_override=None, + variables_override=None, ) def test_get_insight_by_short_id(self) -> None: @@ -3596,3 +3599,60 @@ def test_insight_returns_cached_types(self) -> None: self.assertNotIn("code", response) self.assertIsNotNone(response["results"][0]["types"]) + + def test_insight_variables_overrides(self): + dashboard = Dashboard.objects.create( + team=self.team, + name="dashboard 1", + created_by=self.user, + ) + variable = InsightVariable.objects.create( + team=self.team, name="Test 1", code_name="test_1", default_value="some_default_value", type="String" + ) + insight = Insight.objects.create( + filters={}, + query={ + "kind": "DataVisualizationNode", + "source": { + "kind": "HogQLQuery", + "query": "select {variables.test_1}", + "variables": { + str(variable.id): { + "code_name": variable.code_name, + "variableId": str(variable.id), + } + }, + }, + "chartSettings": {}, + "tableSettings": {}, + }, + team=self.team, + ) + DashboardTile.objects.create(dashboard=dashboard, insight=insight) + + response = self.client.get( + f"/api/projects/{self.team.id}/insights/{insight.pk}", + data={ + "from_dashboard": dashboard.pk, + "variables_override": json.dumps( + { + str(variable.id): { + "code_name": variable.code_name, + "variableId": str(variable.id), + "value": "override value!", + } + } + ), + }, + ).json() + + assert isinstance(response["query"], dict) + assert isinstance(response["query"]["source"], dict) + assert isinstance(response["query"]["source"]["variables"], dict) + + assert len(response["query"]["source"]["variables"].keys()) == 1 + for key, value in response["query"]["source"]["variables"].items(): + assert key == str(variable.id) + assert value["code_name"] == variable.code_name + assert value["variableId"] == str(variable.id) + assert value["value"] == "override value!" diff --git a/posthog/api/test/test_insight_variable.py b/posthog/api/test/test_insight_variable.py new file mode 100644 index 0000000000000..2b6f09ef8ed89 --- /dev/null +++ b/posthog/api/test/test_insight_variable.py @@ -0,0 +1,33 @@ +from posthog.models.insight_variable import InsightVariable +from posthog.test.base import APIBaseTest + + +class TestInsightVariable(APIBaseTest): + def test_create_insight_variable(self): + response = self.client.post( + f"/api/projects/{self.team.pk}/insight_variables/", data={"name": "Test 1", "type": "String"} + ) + + assert response.status_code == 201 + + variable = InsightVariable.objects.get(team_id=self.team.pk) + + assert variable is not None + assert variable.created_by is not None + assert variable.created_at is not None + assert variable.name == "Test 1" + assert variable.type == "String" + assert variable.code_name == "test_1" + + def test_no_duplicate_code_names(self): + InsightVariable.objects.create(team=self.team, name="Test 1", code_name="test_1") + + response = self.client.post( + f"/api/projects/{self.team.pk}/insight_variables/", data={"name": "Test 1", "type": "String"} + ) + + assert response.status_code == 400 + + variable_count = InsightVariable.objects.filter(team_id=self.team.pk).count() + + assert variable_count == 1 diff --git a/posthog/api/test/test_survey.py b/posthog/api/test/test_survey.py index 4f171d91b6c14..c6de95a44702e 100644 --- a/posthog/api/test/test_survey.py +++ b/posthog/api/test/test_survey.py @@ -2371,6 +2371,19 @@ def test_can_create_recurring_survey(self): assert len(response_data["iteration_start_dates"]) == 2 assert response_data["current_iteration"] == 1 + def test_can_create_and_launch_recurring_survey(self): + survey = self._create_recurring_survey() + response = self.client.patch( + f"/api/projects/{self.team.id}/surveys/{survey.id}/", + data={ + "start_date": datetime.now() - timedelta(days=1), + }, + ) + response_data = response.json() + assert response_data["iteration_start_dates"] is not None + assert len(response_data["iteration_start_dates"]) == 2 + assert response_data["current_iteration"] == 1 + def test_can_set_internal_targeting_flag(self): survey = self._create_recurring_survey() response = self.client.patch( @@ -2493,7 +2506,7 @@ def test_guards_for_nil_iteration_count(self): ) assert response.status_code == status.HTTP_200_OK survey.refresh_from_db() - self.assertIsNone(survey.current_iteration) + self.assertIsNotNone(survey.current_iteration) response = self.client.patch( f"/api/projects/{self.team.id}/surveys/{survey.id}/", data={ diff --git a/posthog/caching/calculate_results.py b/posthog/caching/calculate_results.py index 985332c3c7206..8af99a3b2cfd8 100644 --- a/posthog/caching/calculate_results.py +++ b/posthog/caching/calculate_results.py @@ -130,6 +130,7 @@ def calculate_for_query_based_insight( execution_mode: ExecutionMode, user: Optional[User], filters_override: Optional[dict] = None, + variables_override: Optional[dict] = None, ) -> "InsightResult": from posthog.caching.fetch_from_cache import InsightResult, NothingInCacheResult from posthog.caching.insight_cache import update_cached_state @@ -144,6 +145,13 @@ def calculate_for_query_based_insight( dashboard_filters_json=( filters_override if filters_override is not None else dashboard.filters if dashboard is not None else None ), + variables_override_json=( + variables_override + if variables_override is not None + else dashboard.variables + if dashboard is not None + else None + ), execution_mode=execution_mode, user=user, insight_id=insight.pk, diff --git a/posthog/cdp/templates/__init__.py b/posthog/cdp/templates/__init__.py index 1d982a7fdd199..d80354f956ce5 100644 --- a/posthog/cdp/templates/__init__.py +++ b/posthog/cdp/templates/__init__.py @@ -31,12 +31,13 @@ template as google_cloud_storage, TemplateGoogleCloudStorageMigrator, ) - +from .airtable.template_airtable import template as airtable HOG_FUNCTION_TEMPLATES = [ slack, webhook, activecampaign, + airtable, attio, avo, aws_kinesis, diff --git a/posthog/cdp/templates/airtable/template_airtable.py b/posthog/cdp/templates/airtable/template_airtable.py new file mode 100644 index 0000000000000..f2e0ef2bc6133 --- /dev/null +++ b/posthog/cdp/templates/airtable/template_airtable.py @@ -0,0 +1,82 @@ +from posthog.cdp.templates.hog_function_template import HogFunctionTemplate + + +template: HogFunctionTemplate = HogFunctionTemplate( + status="alpha", + id="template-airtable", + name="Airtable", + description="Creates Airtable records", + icon_url="/static/services/airtable.png", + category=["Custom"], + hog=""" +let url := f'https://api.airtable.com/v0/{inputs.base_id}/{inputs.table_name}' + +let payload := { + 'headers': { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {inputs.access_token}' + }, + 'body': { + 'fields': inputs.fields, + 'typecast': true + }, + 'method': 'POST' +} + +if (inputs.debug) { + print('Request', url, payload) +} + +let res := fetch(url, payload); + +if (inputs.debug) { + print('Response', res.status, res.body); +} +if (res.status >= 400) { + throw Error(f'Error from api.airtable.com (status {res.status}): {res.body}') +} +""".strip(), + inputs_schema=[ + { + "key": "access_token", + "type": "string", + "label": "Airtable access token", + "secret": True, + "required": True, + "description": "Create this at https://airtable.com/create/tokens", + }, + { + "key": "base_id", + "type": "string", + "label": "Airtable base ID", + "secret": False, + "required": True, + "description": "Find this at https://airtable.com/developers/web/api/introduction", + }, + { + "key": "table_name", + "type": "string", + "label": "Table name", + "secret": False, + "required": True, + }, + { + "key": "fields", + "type": "json", + "label": "Fields", + "default": {"Timestamp": "{event.timestamp}", "Person Name": "{person.name}"}, + "secret": False, + "required": True, + "description": "Map field names from Airtable to properties from events and person records.", + }, + { + "key": "debug", + "type": "boolean", + "label": "Log responses", + "description": "Logs the response of http calls for debugging.", + "secret": False, + "required": False, + "default": False, + }, + ], +) diff --git a/posthog/cdp/templates/airtable/test_template_airtable.py b/posthog/cdp/templates/airtable/test_template_airtable.py new file mode 100644 index 0000000000000..fb4549c7d3c35 --- /dev/null +++ b/posthog/cdp/templates/airtable/test_template_airtable.py @@ -0,0 +1,66 @@ +from inline_snapshot import snapshot +from posthog.cdp.templates.helpers import BaseHogFunctionTemplateTest +from posthog.cdp.templates.airtable.template_airtable import template as template_airtable + + +class TestTemplateAirtable(BaseHogFunctionTemplateTest): + template = template_airtable + + def test_function_works(self): + self.run_function( + inputs={ + "access_token": "test_token", + "base_id": "test_base_id", + "table_name": "test_table", + "fields": {"Name": "John Doe", "Email": "john@example.com"}, + "debug": False, + } + ) + + assert self.get_mock_fetch_calls()[0] == snapshot( + ( + "https://api.airtable.com/v0/test_base_id/test_table", + { + "headers": {"Content-Type": "application/json", "Authorization": "Bearer test_token"}, + "body": {"fields": {"Name": "John Doe", "Email": "john@example.com"}, "typecast": True}, + "method": "POST", + }, + ) + ) + assert self.get_mock_print_calls() == snapshot([]) + + def test_prints_when_debugging(self): + self.run_function( + inputs={ + "access_token": "test_token", + "base_id": "test_base_id", + "table_name": "test_table", + "fields": {"Name": "John Doe", "Email": "john@example.com"}, + "debug": True, + } + ) + + assert self.get_mock_fetch_calls()[0] == snapshot( + ( + "https://api.airtable.com/v0/test_base_id/test_table", + { + "headers": {"Content-Type": "application/json", "Authorization": "Bearer test_token"}, + "body": {"fields": {"Name": "John Doe", "Email": "john@example.com"}, "typecast": True}, + "method": "POST", + }, + ) + ) + assert self.get_mock_print_calls() == snapshot( + [ + ( + "Request", + "https://api.airtable.com/v0/test_base_id/test_table", + { + "headers": {"Content-Type": "application/json", "Authorization": "Bearer test_token"}, + "body": {"fields": {"Name": "John Doe", "Email": "john@example.com"}, "typecast": True}, + "method": "POST", + }, + ), + ("Response", 200, {}), + ] + ) diff --git a/posthog/hogql_queries/apply_dashboard_filters.py b/posthog/hogql_queries/apply_dashboard_filters.py index 6d8e74f0fb588..9cb016eb48da9 100644 --- a/posthog/hogql_queries/apply_dashboard_filters.py +++ b/posthog/hogql_queries/apply_dashboard_filters.py @@ -22,3 +22,31 @@ def apply_dashboard_filters_to_dict(query: dict, filters: dict, team: Team) -> d return query query_runner.apply_dashboard_filters(DashboardFilter(**filters)) return query_runner.query.model_dump() + + +# Apply the variables from the django-style Dashboard object +def apply_dashboard_variables_to_dict(query: dict, variables_overrides: dict[str, dict], team: Team) -> dict: + if not variables_overrides: + return query + + if query.get("kind") in WRAPPER_NODE_KINDS: + source = apply_dashboard_variables_to_dict(query["source"], variables_overrides, team) + return {**query, "source": source} + + if query.get("kind") == NodeKind.HOG_QL_QUERY: + query_variables: dict[str, dict] | None = query.get("variables") + if query_variables is None: + return query + + for variable_id, overriden_hogql_variable in variables_overrides.items(): + query_variable = query_variables.get(variable_id) + if query_variable: + query_variables[variable_id] = { + "variableId": variable_id, + "code_name": query_variable["code_name"], + "value": overriden_hogql_variable.get("value"), + } + + return {**query, "variables": query_variables} + + return query diff --git a/posthog/hogql_queries/experiments/experiment_funnel_query_runner.py b/posthog/hogql_queries/experiments/experiment_funnel_query_runner.py deleted file mode 100644 index 0ff9a1058977b..0000000000000 --- a/posthog/hogql_queries/experiments/experiment_funnel_query_runner.py +++ /dev/null @@ -1,93 +0,0 @@ -from posthog.hogql import ast -from posthog.hogql_queries.query_runner import QueryRunner -from posthog.models.experiment import Experiment -from ..insights.funnels.funnels_query_runner import FunnelsQueryRunner -from posthog.schema import ( - CachedExperimentFunnelQueryResponse, - ExperimentFunnelQuery, - ExperimentFunnelQueryResponse, - ExperimentVariantFunnelResult, - FunnelsQuery, - InsightDateRange, - BreakdownFilter, -) -from typing import Any -from zoneinfo import ZoneInfo - - -class ExperimentFunnelQueryRunner(QueryRunner): - query: ExperimentFunnelQuery - response: ExperimentFunnelQueryResponse - cached_response: CachedExperimentFunnelQueryResponse - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.experiment = Experiment.objects.get(id=self.query.experiment_id) - self.feature_flag = self.experiment.feature_flag - self.prepared_funnel_query = self._prepare_funnel_query() - self.query_runner = FunnelsQueryRunner( - query=self.prepared_funnel_query, team=self.team, timings=self.timings, limit_context=self.limit_context - ) - - def calculate(self) -> ExperimentFunnelQueryResponse: - response = self.query_runner.calculate() - results = self._process_results(response.results) - return ExperimentFunnelQueryResponse(insight="FUNNELS", results=results) - - def _prepare_funnel_query(self) -> FunnelsQuery: - """ - This method takes the raw funnel query and adapts it - for the needs of experiment analysis: - - 1. Set the date range to match the experiment's duration, using the project's timezone. - 2. Configure the breakdown to use the feature flag key, which allows us - to separate results for different experiment variants. - """ - # Clone the source query - prepared_funnel_query = FunnelsQuery(**self.query.source.model_dump()) - - # Set the date range to match the experiment's duration, using the project's timezone - if self.team.timezone: - tz = ZoneInfo(self.team.timezone) - start_date = self.experiment.start_date.astimezone(tz) if self.experiment.start_date else None - end_date = self.experiment.end_date.astimezone(tz) if self.experiment.end_date else None - else: - start_date = self.experiment.start_date - end_date = self.experiment.end_date - - prepared_funnel_query.dateRange = InsightDateRange( - date_from=start_date.isoformat() if start_date else None, - date_to=end_date.isoformat() if end_date else None, - explicitDate=True, - ) - - # Configure the breakdown to use the feature flag key - prepared_funnel_query.breakdownFilter = BreakdownFilter( - breakdown=f"$feature/{self.feature_flag.key}", - breakdown_type="event", - ) - - return prepared_funnel_query - - def _process_results(self, funnels_results: list[list[dict[str, Any]]]) -> dict[str, ExperimentVariantFunnelResult]: - variants = self.feature_flag.variants - processed_results = { - variant["key"]: ExperimentVariantFunnelResult(key=variant["key"], success_count=0, failure_count=0) - for variant in variants - } - - for result in funnels_results: - first_step = result[0] - last_step = result[-1] - variant = first_step.get("breakdown_value") - variant_str = variant[0] if isinstance(variant, list) else str(variant) - if variant_str in processed_results: - total_count = first_step.get("count", 0) - success_count = last_step.get("count", 0) if len(result) > 1 else 0 - processed_results[variant_str].success_count = success_count - processed_results[variant_str].failure_count = total_count - success_count - - return processed_results - - def to_query(self) -> ast.SelectQuery: - raise ValueError(f"Cannot convert source query of type {self.query.source.kind} to query") diff --git a/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py b/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py new file mode 100644 index 0000000000000..c6783daa489e0 --- /dev/null +++ b/posthog/hogql_queries/experiments/experiment_funnels_query_runner.py @@ -0,0 +1,181 @@ +import json +from posthog.constants import ExperimentNoResultsErrorKeys +from posthog.hogql import ast +from posthog.hogql_queries.experiments import CONTROL_VARIANT_KEY +from posthog.hogql_queries.experiments.funnels_statistics import ( + are_results_significant, + calculate_credible_intervals, + calculate_probabilities, +) +from posthog.hogql_queries.query_runner import QueryRunner +from posthog.models.experiment import Experiment +from ..insights.funnels.funnels_query_runner import FunnelsQueryRunner +from posthog.schema import ( + CachedExperimentFunnelsQueryResponse, + ExperimentFunnelsQuery, + ExperimentFunnelsQueryResponse, + ExperimentSignificanceCode, + ExperimentVariantFunnelsBaseStats, + FunnelsQuery, + FunnelsQueryResponse, + InsightDateRange, + BreakdownFilter, +) +from typing import Optional, Any, cast +from zoneinfo import ZoneInfo +from rest_framework.exceptions import ValidationError + + +class ExperimentFunnelsQueryRunner(QueryRunner): + query: ExperimentFunnelsQuery + response: ExperimentFunnelsQueryResponse + cached_response: CachedExperimentFunnelsQueryResponse + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.experiment = Experiment.objects.get(id=self.query.experiment_id) + self.feature_flag = self.experiment.feature_flag + self.variants = [variant["key"] for variant in self.feature_flag.variants] + self.prepared_funnel_query = self._prepare_funnel_query() + self.funnels_query_runner = FunnelsQueryRunner( + query=self.prepared_funnel_query, team=self.team, timings=self.timings, limit_context=self.limit_context + ) + + def calculate(self) -> ExperimentFunnelsQueryResponse: + funnels_result = self.funnels_query_runner.calculate() + + self._validate_event_variants(funnels_result) + + # Statistical analysis + control_variant, test_variants = self._get_variants_with_base_stats(funnels_result) + probabilities = calculate_probabilities(control_variant, test_variants) + significance_code, loss = are_results_significant(control_variant, test_variants, probabilities) + credible_intervals = calculate_credible_intervals([control_variant, *test_variants]) + + return ExperimentFunnelsQueryResponse( + insight=funnels_result, + variants=[variant.model_dump() for variant in [control_variant, *test_variants]], + probability={ + variant.key: probability + for variant, probability in zip([control_variant, *test_variants], probabilities) + }, + significant=significance_code == ExperimentSignificanceCode.SIGNIFICANT, + significance_code=significance_code, + expected_loss=loss, + credible_intervals=credible_intervals, + ) + + def _prepare_funnel_query(self) -> FunnelsQuery: + """ + This method takes the raw funnel query and adapts it + for the needs of experiment analysis: + + 1. Set the date range to match the experiment's duration, using the project's timezone. + 2. Configure the breakdown to use the feature flag key, which allows us + to separate results for different experiment variants. + """ + # Clone the source query + prepared_funnel_query = FunnelsQuery(**self.query.source.model_dump()) + + # Set the date range to match the experiment's duration, using the project's timezone + if self.team.timezone: + tz = ZoneInfo(self.team.timezone) + start_date = self.experiment.start_date.astimezone(tz) if self.experiment.start_date else None + end_date = self.experiment.end_date.astimezone(tz) if self.experiment.end_date else None + else: + start_date = self.experiment.start_date + end_date = self.experiment.end_date + + prepared_funnel_query.dateRange = InsightDateRange( + date_from=start_date.isoformat() if start_date else None, + date_to=end_date.isoformat() if end_date else None, + explicitDate=True, + ) + + # Configure the breakdown to use the feature flag key + prepared_funnel_query.breakdownFilter = BreakdownFilter( + breakdown=f"$feature/{self.feature_flag.key}", + breakdown_type="event", + ) + + return prepared_funnel_query + + def _get_variants_with_base_stats( + self, funnels_result: FunnelsQueryResponse + ) -> tuple[ExperimentVariantFunnelsBaseStats, list[ExperimentVariantFunnelsBaseStats]]: + control_variant: Optional[ExperimentVariantFunnelsBaseStats] = None + test_variants = [] + + for result in funnels_result.results: + result_dict = cast(list[dict[str, Any]], result) + first_step = result_dict[0] + last_step = result_dict[-1] + + total = first_step.get("count", 0) + success = last_step.get("count", 0) if len(result_dict) > 1 else 0 + failure = total - success + + breakdown_value = cast(list[str], first_step["breakdown_value"])[0] + + if breakdown_value == CONTROL_VARIANT_KEY: + control_variant = ExperimentVariantFunnelsBaseStats( + key=breakdown_value, + success_count=int(success), + failure_count=int(failure), + ) + else: + test_variants.append( + ExperimentVariantFunnelsBaseStats( + key=breakdown_value, success_count=int(success), failure_count=int(failure) + ) + ) + + if control_variant is None: + raise ValueError("Control variant not found in count results") + + return control_variant, test_variants + + def _validate_event_variants(self, funnels_result: FunnelsQueryResponse): + errors = { + ExperimentNoResultsErrorKeys.NO_EVENTS: True, + ExperimentNoResultsErrorKeys.NO_FLAG_INFO: True, + ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT: True, + ExperimentNoResultsErrorKeys.NO_TEST_VARIANT: True, + } + + if not funnels_result.results or not funnels_result.results: + raise ValidationError(code="no-results", detail=json.dumps(errors)) + + errors[ExperimentNoResultsErrorKeys.NO_EVENTS] = False + + # Funnels: the first step must be present for *any* results to show up + eventsWithOrderZero = [] + for eventArr in funnels_result.results: + for event in eventArr: + event_dict = cast(dict[str, Any], event) + if event_dict.get("order") == 0: + eventsWithOrderZero.append(event_dict) + + # Check if "control" is present + for event in eventsWithOrderZero: + event_variant = event.get("breakdown_value", [None])[0] + if event_variant == "control": + errors[ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT] = False + errors[ExperimentNoResultsErrorKeys.NO_FLAG_INFO] = False + break + + # Check if at least one of the test variants is present + test_variants = [variant for variant in self.variants if variant != "control"] + for event in eventsWithOrderZero: + event_variant = event.get("breakdown_value", [None])[0] + if event_variant in test_variants: + errors[ExperimentNoResultsErrorKeys.NO_TEST_VARIANT] = False + errors[ExperimentNoResultsErrorKeys.NO_FLAG_INFO] = False + break + + has_errors = any(errors.values()) + if has_errors: + raise ValidationError(detail=json.dumps(errors)) + + def to_query(self) -> ast.SelectQuery: + raise ValueError(f"Cannot convert source query of type {self.query.source.kind} to query") diff --git a/posthog/hogql_queries/experiments/experiment_trend_query_runner.py b/posthog/hogql_queries/experiments/experiment_trends_query_runner.py similarity index 93% rename from posthog/hogql_queries/experiments/experiment_trend_query_runner.py rename to posthog/hogql_queries/experiments/experiment_trends_query_runner.py index 71173cd35f9f6..7389b65a29bf6 100644 --- a/posthog/hogql_queries/experiments/experiment_trend_query_runner.py +++ b/posthog/hogql_queries/experiments/experiment_trends_query_runner.py @@ -4,7 +4,7 @@ from posthog.constants import ExperimentNoResultsErrorKeys from posthog.hogql import ast from posthog.hogql_queries.experiments import CONTROL_VARIANT_KEY -from posthog.hogql_queries.experiments.trend_statistics import ( +from posthog.hogql_queries.experiments.trends_statistics import ( are_results_significant, calculate_credible_intervals, calculate_probabilities, @@ -17,14 +17,14 @@ from posthog.schema import ( BaseMathType, BreakdownFilter, - CachedExperimentTrendQueryResponse, + CachedExperimentTrendsQueryResponse, ChartDisplayType, EventPropertyFilter, EventsNode, ExperimentSignificanceCode, - ExperimentTrendQuery, - ExperimentTrendQueryResponse, - ExperimentVariantTrendBaseStats, + ExperimentTrendsQuery, + ExperimentTrendsQueryResponse, + ExperimentVariantTrendsBaseStats, InsightDateRange, PropertyMathType, TrendsFilter, @@ -35,10 +35,10 @@ import threading -class ExperimentTrendQueryRunner(QueryRunner): - query: ExperimentTrendQuery - response: ExperimentTrendQueryResponse - cached_response: CachedExperimentTrendQueryResponse +class ExperimentTrendsQueryRunner(QueryRunner): + query: ExperimentTrendsQuery + response: ExperimentTrendsQueryResponse + cached_response: CachedExperimentTrendsQueryResponse def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -207,7 +207,7 @@ def _prepare_exposure_query(self) -> TrendsQuery: return prepared_exposure_query - def calculate(self) -> ExperimentTrendQueryResponse: + def calculate(self) -> ExperimentTrendsQueryResponse: shared_results: dict[str, Optional[Any]] = {"count_result": None, "exposure_result": None} errors = [] @@ -249,14 +249,12 @@ def run(query_runner: TrendsQueryRunner, result_key: str, is_parallel: bool): self._validate_event_variants(count_result) # Statistical analysis - control_variant, test_variants = self._get_variants_with_base_stats( - count_result.results, exposure_result.results - ) + control_variant, test_variants = self._get_variants_with_base_stats(count_result, exposure_result) probabilities = calculate_probabilities(control_variant, test_variants) significance_code, p_value = are_results_significant(control_variant, test_variants, probabilities) credible_intervals = calculate_credible_intervals([control_variant, *test_variants]) - return ExperimentTrendQueryResponse( + return ExperimentTrendsQueryResponse( insight=count_result, variants=[variant.model_dump() for variant in [control_variant, *test_variants]], probability={ @@ -270,14 +268,14 @@ def run(query_runner: TrendsQueryRunner, result_key: str, is_parallel: bool): ) def _get_variants_with_base_stats( - self, count_results: list[dict[str, Any]], exposure_results: list[dict[str, Any]] - ) -> tuple[ExperimentVariantTrendBaseStats, list[ExperimentVariantTrendBaseStats]]: - control_variant: Optional[ExperimentVariantTrendBaseStats] = None + self, count_results: TrendsQueryResponse, exposure_results: TrendsQueryResponse + ) -> tuple[ExperimentVariantTrendsBaseStats, list[ExperimentVariantTrendsBaseStats]]: + control_variant: Optional[ExperimentVariantTrendsBaseStats] = None test_variants = [] exposure_counts = {} exposure_ratios = {} - for result in exposure_results: + for result in exposure_results.results: count = result.get("count", 0) breakdown_value = result.get("breakdown_value") exposure_counts[breakdown_value] = count @@ -288,11 +286,11 @@ def _get_variants_with_base_stats( for key, count in exposure_counts.items(): exposure_ratios[key] = count / control_exposure - for result in count_results: + for result in count_results.results: count = result.get("count", 0) breakdown_value = result.get("breakdown_value") if breakdown_value == CONTROL_VARIANT_KEY: - control_variant = ExperimentVariantTrendBaseStats( + control_variant = ExperimentVariantTrendsBaseStats( key=breakdown_value, count=count, exposure=1, @@ -301,7 +299,7 @@ def _get_variants_with_base_stats( ) else: test_variants.append( - ExperimentVariantTrendBaseStats( + ExperimentVariantTrendsBaseStats( key=breakdown_value, count=count, # TODO: in the absence of exposure data, we should throw rather than default to 1 diff --git a/posthog/hogql_queries/experiments/funnel_statistics.py b/posthog/hogql_queries/experiments/funnels_statistics.py similarity index 93% rename from posthog/hogql_queries/experiments/funnel_statistics.py rename to posthog/hogql_queries/experiments/funnels_statistics.py index 0c84714ae3842..cdec48fa3c681 100644 --- a/posthog/hogql_queries/experiments/funnel_statistics.py +++ b/posthog/hogql_queries/experiments/funnels_statistics.py @@ -7,14 +7,14 @@ FF_DISTRIBUTION_THRESHOLD, MIN_PROBABILITY_FOR_SIGNIFICANCE, ) -from posthog.schema import ExperimentSignificanceCode, ExperimentVariantFunnelResult +from posthog.schema import ExperimentSignificanceCode, ExperimentVariantFunnelsBaseStats Probability = float def calculate_probabilities( - control_variant: ExperimentVariantFunnelResult, - test_variants: list[ExperimentVariantFunnelResult], + control_variant: ExperimentVariantFunnelsBaseStats, + test_variants: list[ExperimentVariantFunnelsBaseStats], priors: tuple[int, int] = (1, 1), ) -> list[Probability]: """ @@ -60,7 +60,7 @@ def calculate_probabilities( def simulate_winning_variant_for_conversion( - target_variant: ExperimentVariantFunnelResult, variants: list[ExperimentVariantFunnelResult] + target_variant: ExperimentVariantFunnelsBaseStats, variants: list[ExperimentVariantFunnelsBaseStats] ) -> Probability: random_sampler = default_rng() prior_success = 1 @@ -94,11 +94,11 @@ def simulate_winning_variant_for_conversion( def are_results_significant( - control_variant: ExperimentVariantFunnelResult, - test_variants: list[ExperimentVariantFunnelResult], + control_variant: ExperimentVariantFunnelsBaseStats, + test_variants: list[ExperimentVariantFunnelsBaseStats], probabilities: list[Probability], ) -> tuple[ExperimentSignificanceCode, Probability]: - def get_conversion_rate(variant: ExperimentVariantFunnelResult): + def get_conversion_rate(variant: ExperimentVariantFunnelsBaseStats): return variant.success_count / (variant.success_count + variant.failure_count) control_sample_size = control_variant.success_count + control_variant.failure_count @@ -136,7 +136,7 @@ def get_conversion_rate(variant: ExperimentVariantFunnelResult): def calculate_expected_loss( - target_variant: ExperimentVariantFunnelResult, variants: list[ExperimentVariantFunnelResult] + target_variant: ExperimentVariantFunnelsBaseStats, variants: list[ExperimentVariantFunnelsBaseStats] ) -> float: """ Calculates expected loss in conversion rate for a given variant. diff --git a/posthog/hogql_queries/experiments/test/test_experiment_funnel_query_runner.py b/posthog/hogql_queries/experiments/test/test_experiment_funnel_query_runner.py deleted file mode 100644 index 7d1472d29315a..0000000000000 --- a/posthog/hogql_queries/experiments/test/test_experiment_funnel_query_runner.py +++ /dev/null @@ -1,107 +0,0 @@ -from posthog.hogql_queries.experiments.experiment_funnel_query_runner import ExperimentFunnelQueryRunner -from posthog.models.experiment import Experiment -from posthog.models.feature_flag.feature_flag import FeatureFlag -from posthog.schema import ( - EventsNode, - ExperimentFunnelQuery, - ExperimentFunnelQueryResponse, - FunnelsQuery, -) -from posthog.test.base import APIBaseTest, ClickhouseTestMixin, _create_event, _create_person, flush_persons_and_events -from freezegun import freeze_time -from typing import cast -from django.utils import timezone -from datetime import timedelta - - -class TestExperimentFunnelQueryRunner(ClickhouseTestMixin, APIBaseTest): - @freeze_time("2020-01-01T12:00:00Z") - def test_query_runner(self): - feature_flag = FeatureFlag.objects.create( - name="Test experiment flag", - key="test-experiment", - team=self.team, - filters={ - "groups": [{"properties": [], "rollout_percentage": None}], - "multivariate": { - "variants": [ - { - "key": "control", - "name": "Control", - "rollout_percentage": 50, - }, - { - "key": "test", - "name": "Test", - "rollout_percentage": 50, - }, - ] - }, - }, - created_by=self.user, - ) - - experiment = Experiment.objects.create( - name="test-experiment", - team=self.team, - feature_flag=feature_flag, - start_date=timezone.now(), - end_date=timezone.now() + timedelta(days=14), - ) - - feature_flag_property = f"$feature/{feature_flag.key}" - - funnels_query = FunnelsQuery( - series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], - dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, - ) - experiment_query = ExperimentFunnelQuery( - experiment_id=experiment.id, - kind="ExperimentFunnelQuery", - source=funnels_query, - ) - - experiment.metrics = [{"type": "primary", "query": experiment_query.model_dump()}] - experiment.save() - - for variant, purchase_count in [("control", 6), ("test", 8)]: - for i in range(10): - _create_person(distinct_ids=[f"user_{variant}_{i}"], team_id=self.team.pk) - _create_event( - team=self.team, - event="$pageview", - distinct_id=f"user_{variant}_{i}", - timestamp="2020-01-02T12:00:00Z", - properties={feature_flag_property: variant}, - ) - if i < purchase_count: - _create_event( - team=self.team, - event="purchase", - distinct_id=f"user_{variant}_{i}", - timestamp="2020-01-02T12:01:00Z", - properties={feature_flag_property: variant}, - ) - - flush_persons_and_events() - - query_runner = ExperimentFunnelQueryRunner( - query=ExperimentFunnelQuery(**experiment.metrics[0]["query"]), team=self.team - ) - result = query_runner.calculate() - - self.assertEqual(result.insight, "FUNNELS") - self.assertEqual(len(result.results), 2) - - funnel_result = cast(ExperimentFunnelQueryResponse, result) - - self.assertIn("control", funnel_result.results) - self.assertIn("test", funnel_result.results) - - control_result = funnel_result.results["control"] - test_result = funnel_result.results["test"] - - self.assertEqual(control_result.success_count, 6) - self.assertEqual(control_result.failure_count, 4) - self.assertEqual(test_result.success_count, 8) - self.assertEqual(test_result.failure_count, 2) diff --git a/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py b/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py new file mode 100644 index 0000000000000..005fe82e089ae --- /dev/null +++ b/posthog/hogql_queries/experiments/test/test_experiment_funnels_query_runner.py @@ -0,0 +1,359 @@ +from typing import cast +from posthog.hogql_queries.experiments.experiment_funnels_query_runner import ExperimentFunnelsQueryRunner +from posthog.models.experiment import Experiment +from posthog.models.feature_flag.feature_flag import FeatureFlag +from posthog.schema import ( + EventsNode, + ExperimentFunnelsQuery, + ExperimentSignificanceCode, + FunnelsQuery, +) +from posthog.test.base import APIBaseTest, ClickhouseTestMixin, _create_event, _create_person, flush_persons_and_events +from freezegun import freeze_time +from django.utils import timezone +from datetime import timedelta +from rest_framework.exceptions import ValidationError +from posthog.constants import ExperimentNoResultsErrorKeys +import json +from posthog.test.test_journeys import journeys_for + + +class TestExperimentFunnelsQueryRunner(ClickhouseTestMixin, APIBaseTest): + def create_feature_flag(self, key="test-experiment"): + return FeatureFlag.objects.create( + name=f"Test experiment flag: {key}", + key=key, + team=self.team, + filters={ + "groups": [{"properties": [], "rollout_percentage": None}], + "multivariate": { + "variants": [ + { + "key": "control", + "name": "Control", + "rollout_percentage": 50, + }, + { + "key": "test", + "name": "Test", + "rollout_percentage": 50, + }, + ] + }, + }, + created_by=self.user, + ) + + def create_experiment(self, name="test-experiment", feature_flag=None): + if feature_flag is None: + feature_flag = self.create_feature_flag(name) + return Experiment.objects.create( + name=name, + team=self.team, + feature_flag=feature_flag, + start_date=timezone.now(), + end_date=timezone.now() + timedelta(days=14), + ) + + @freeze_time("2020-01-01T12:00:00Z") + def test_query_runner(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + feature_flag_property = f"$feature/{feature_flag.key}" + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + experiment.metrics = [{"type": "primary", "query": experiment_query.model_dump()}] + experiment.save() + + for variant, purchase_count in [("control", 6), ("test", 8)]: + for i in range(10): + _create_person(distinct_ids=[f"user_{variant}_{i}"], team_id=self.team.pk) + _create_event( + team=self.team, + event="$pageview", + distinct_id=f"user_{variant}_{i}", + timestamp="2020-01-02T12:00:00Z", + properties={feature_flag_property: variant}, + ) + if i < purchase_count: + _create_event( + team=self.team, + event="purchase", + distinct_id=f"user_{variant}_{i}", + timestamp="2020-01-02T12:01:00Z", + properties={feature_flag_property: variant}, + ) + + flush_persons_and_events() + + query_runner = ExperimentFunnelsQueryRunner( + query=ExperimentFunnelsQuery(**experiment.metrics[0]["query"]), team=self.team + ) + result = query_runner.calculate() + + self.assertEqual(len(result.variants), 2) + + control_variant = next(variant for variant in result.variants if variant.key == "control") + test_variant = next(variant for variant in result.variants if variant.key == "test") + + self.assertEqual(control_variant.success_count, 6) + self.assertEqual(control_variant.failure_count, 4) + self.assertEqual(test_variant.success_count, 8) + self.assertEqual(test_variant.failure_count, 2) + + self.assertIn("control", result.probability) + self.assertIn("test", result.probability) + + self.assertIn("control", result.credible_intervals) + self.assertIn("test", result.credible_intervals) + + @freeze_time("2020-01-01T12:00:00Z") + def test_query_runner_standard_flow(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + ff_property = f"$feature/{feature_flag.key}" + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + experiment.metrics = [{"type": "primary", "query": experiment_query.model_dump()}] + experiment.save() + + journeys_for( + { + "user_control_1": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "control"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "control"}}, + ], + "user_control_2": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "control"}}, + ], + "user_control_3": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "control"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "control"}}, + ], + "user_test_1": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "test"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "test"}}, + ], + "user_test_2": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "test"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "test"}}, + ], + "user_test_3": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "test"}}, + ], + "user_test_4": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "test"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "test"}}, + ], + }, + self.team, + ) + + flush_persons_and_events() + + query_runner = ExperimentFunnelsQueryRunner( + query=ExperimentFunnelsQuery(**experiment.metrics[0]["query"]), team=self.team + ) + result = query_runner.calculate() + + self.assertEqual(len(result.variants), 2) + for variant in result.variants: + self.assertIn(variant.key, ["control", "test"]) + + control_variant = next(v for v in result.variants if v.key == "control") + test_variant = next(v for v in result.variants if v.key == "test") + + self.assertEqual(control_variant.success_count, 2) + self.assertEqual(control_variant.failure_count, 1) + self.assertEqual(test_variant.success_count, 3) + self.assertEqual(test_variant.failure_count, 1) + + self.assertAlmostEqual(result.probability["control"], 0.407, places=2) + self.assertAlmostEqual(result.probability["test"], 0.593, places=2) + + self.assertAlmostEqual(result.credible_intervals["control"][0], 0.1941, places=3) + self.assertAlmostEqual(result.credible_intervals["control"][1], 0.9324, places=3) + self.assertAlmostEqual(result.credible_intervals["test"][0], 0.2836, places=3) + self.assertAlmostEqual(result.credible_intervals["test"][1], 0.9473, places=3) + + self.assertEqual(result.significance_code, ExperimentSignificanceCode.NOT_ENOUGH_EXPOSURE) + + self.assertFalse(result.significant) + self.assertEqual(len(result.variants), 2) + self.assertAlmostEqual(result.expected_loss, 1.0, places=1) + + @freeze_time("2020-01-01T12:00:00Z") + def test_validate_event_variants_no_events(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + query_runner = ExperimentFunnelsQueryRunner(query=experiment_query, team=self.team) + with self.assertRaises(ValidationError) as context: + query_runner.calculate() + + expected_errors = json.dumps( + { + ExperimentNoResultsErrorKeys.NO_EVENTS: True, + ExperimentNoResultsErrorKeys.NO_FLAG_INFO: True, + ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT: True, + ExperimentNoResultsErrorKeys.NO_TEST_VARIANT: True, + } + ) + self.assertEqual(cast(list, context.exception.detail)[0], expected_errors) + + @freeze_time("2020-01-01T12:00:00Z") + def test_validate_event_variants_no_control(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + ff_property = f"$feature/{feature_flag.key}" + journeys_for( + { + "user_test": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "test"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "test"}}, + ], + }, + self.team, + ) + + flush_persons_and_events() + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + query_runner = ExperimentFunnelsQueryRunner(query=experiment_query, team=self.team) + with self.assertRaises(ValidationError) as context: + query_runner.calculate() + + expected_errors = json.dumps( + { + ExperimentNoResultsErrorKeys.NO_EVENTS: False, + ExperimentNoResultsErrorKeys.NO_FLAG_INFO: False, + ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT: True, + ExperimentNoResultsErrorKeys.NO_TEST_VARIANT: False, + } + ) + self.assertEqual(cast(list, context.exception.detail)[0], expected_errors) + + @freeze_time("2020-01-01T12:00:00Z") + def test_validate_event_variants_no_test(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + ff_property = f"$feature/{feature_flag.key}" + journeys_for( + { + "user_control": [ + {"event": "$pageview", "timestamp": "2020-01-02", "properties": {ff_property: "control"}}, + {"event": "purchase", "timestamp": "2020-01-03", "properties": {ff_property: "control"}}, + ], + }, + self.team, + ) + + flush_persons_and_events() + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + query_runner = ExperimentFunnelsQueryRunner(query=experiment_query, team=self.team) + with self.assertRaises(ValidationError) as context: + query_runner.calculate() + + expected_errors = json.dumps( + { + ExperimentNoResultsErrorKeys.NO_EVENTS: False, + ExperimentNoResultsErrorKeys.NO_FLAG_INFO: False, + ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT: False, + ExperimentNoResultsErrorKeys.NO_TEST_VARIANT: True, + } + ) + self.assertEqual(cast(list, context.exception.detail)[0], expected_errors) + + @freeze_time("2020-01-01T12:00:00Z") + def test_validate_event_variants_no_flag_info(self): + feature_flag = self.create_feature_flag() + experiment = self.create_experiment(feature_flag=feature_flag) + + journeys_for( + { + "user_no_flag_1": [ + {"event": "$pageview", "timestamp": "2020-01-02"}, + {"event": "purchase", "timestamp": "2020-01-03"}, + ], + "user_no_flag_2": [ + {"event": "$pageview", "timestamp": "2020-01-03"}, + ], + }, + self.team, + ) + + flush_persons_and_events() + + funnels_query = FunnelsQuery( + series=[EventsNode(event="$pageview"), EventsNode(event="purchase")], + dateRange={"date_from": "2020-01-01", "date_to": "2020-01-14"}, + ) + experiment_query = ExperimentFunnelsQuery( + experiment_id=experiment.id, + kind="ExperimentFunnelsQuery", + source=funnels_query, + ) + + query_runner = ExperimentFunnelsQueryRunner(query=experiment_query, team=self.team) + with self.assertRaises(ValidationError) as context: + query_runner.calculate() + + expected_errors = json.dumps( + { + ExperimentNoResultsErrorKeys.NO_EVENTS: False, + ExperimentNoResultsErrorKeys.NO_FLAG_INFO: True, + ExperimentNoResultsErrorKeys.NO_CONTROL_VARIANT: True, + ExperimentNoResultsErrorKeys.NO_TEST_VARIANT: True, + } + ) + self.assertEqual(cast(list, context.exception.detail)[0], expected_errors) diff --git a/posthog/hogql_queries/experiments/test/test_experiment_trend_query_runner.py b/posthog/hogql_queries/experiments/test/test_experiment_trends_query_runner.py similarity index 88% rename from posthog/hogql_queries/experiments/test/test_experiment_trend_query_runner.py rename to posthog/hogql_queries/experiments/test/test_experiment_trends_query_runner.py index 9d8484c6e25f3..bb3357e62232b 100644 --- a/posthog/hogql_queries/experiments/test/test_experiment_trend_query_runner.py +++ b/posthog/hogql_queries/experiments/test/test_experiment_trends_query_runner.py @@ -1,14 +1,13 @@ from django.test import override_settings -from posthog.hogql_queries.experiments.experiment_trend_query_runner import ExperimentTrendQueryRunner +from posthog.hogql_queries.experiments.experiment_trends_query_runner import ExperimentTrendsQueryRunner from posthog.models.experiment import Experiment from posthog.models.feature_flag.feature_flag import FeatureFlag from posthog.schema import ( EventsNode, ExperimentSignificanceCode, - ExperimentTrendQuery, - ExperimentTrendQueryResponse, + ExperimentTrendsQuery, + ExperimentTrendsQueryResponse, TrendsQuery, - TrendsQueryResponse, ) from posthog.test.base import APIBaseTest, ClickhouseTestMixin, _create_event, flush_persons_and_events from freezegun import freeze_time @@ -22,7 +21,7 @@ @override_settings(IN_UNIT_TESTING=True) -class TestExperimentTrendQueryRunner(ClickhouseTestMixin, APIBaseTest): +class TestExperimentTrendsQueryRunner(ClickhouseTestMixin, APIBaseTest): def create_feature_flag(self, key="test-experiment"): return FeatureFlag.objects.create( name=f"Test experiment flag: {key}", @@ -68,9 +67,9 @@ def test_query_runner(self): count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) exposure_query = TrendsQuery(series=[EventsNode(event="$feature_flag_called")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, exposure_query=exposure_query, ) @@ -100,8 +99,8 @@ def test_query_runner(self): flush_persons_and_events() - query_runner = ExperimentTrendQueryRunner( - query=ExperimentTrendQuery(**experiment.metrics[0]["query"]), team=self.team + query_runner = ExperimentTrendsQueryRunner( + query=ExperimentTrendsQuery(**experiment.metrics[0]["query"]), team=self.team ) result = query_runner.calculate() @@ -126,9 +125,9 @@ def test_query_runner_with_custom_exposure(self): series=[EventsNode(event="custom_exposure_event", properties=[{"key": "valid_exposure", "value": "true"}])] ) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, exposure_query=exposure_query, ) @@ -198,12 +197,12 @@ def test_query_runner_with_custom_exposure(self): flush_persons_and_events() - query_runner = ExperimentTrendQueryRunner( - query=ExperimentTrendQuery(**experiment.metrics[0]["query"]), team=self.team + query_runner = ExperimentTrendsQueryRunner( + query=ExperimentTrendsQuery(**experiment.metrics[0]["query"]), team=self.team ) result = query_runner.calculate() - trend_result = cast(ExperimentTrendQueryResponse, result) + trend_result = cast(ExperimentTrendsQueryResponse, result) control_result = next(variant for variant in trend_result.variants if variant.key == "control") test_result = next(variant for variant in trend_result.variants if variant.key == "test") @@ -222,9 +221,9 @@ def test_query_runner_with_default_exposure(self): ff_property = f"$feature/{feature_flag.key}" count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, exposure_query=None, # No exposure query provided ) @@ -290,12 +289,12 @@ def test_query_runner_with_default_exposure(self): flush_persons_and_events() - query_runner = ExperimentTrendQueryRunner( - query=ExperimentTrendQuery(**experiment.metrics[0]["query"]), team=self.team + query_runner = ExperimentTrendsQueryRunner( + query=ExperimentTrendsQuery(**experiment.metrics[0]["query"]), team=self.team ) result = query_runner.calculate() - trend_result = cast(ExperimentTrendQueryResponse, result) + trend_result = cast(ExperimentTrendsQueryResponse, result) control_result = next(variant for variant in trend_result.variants if variant.key == "control") test_result = next(variant for variant in trend_result.variants if variant.key == "test") @@ -314,9 +313,9 @@ def test_query_runner_with_avg_math(self): count_query = TrendsQuery(series=[EventsNode(event="$pageview", math="avg")]) exposure_query = TrendsQuery(series=[EventsNode(event="$feature_flag_called")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, exposure_query=exposure_query, ) @@ -324,8 +323,8 @@ def test_query_runner_with_avg_math(self): experiment.metrics = [{"type": "primary", "query": experiment_query.model_dump()}] experiment.save() - query_runner = ExperimentTrendQueryRunner( - query=ExperimentTrendQuery(**experiment.metrics[0]["query"]), team=self.team + query_runner = ExperimentTrendsQueryRunner( + query=ExperimentTrendsQuery(**experiment.metrics[0]["query"]), team=self.team ) prepared_count_query = query_runner.prepared_count_query @@ -340,9 +339,9 @@ def test_query_runner_standard_flow(self): count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) exposure_query = TrendsQuery(series=[EventsNode(event="$feature_flag_called")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, exposure_query=exposure_query, ) @@ -386,8 +385,8 @@ def test_query_runner_standard_flow(self): flush_persons_and_events() - query_runner = ExperimentTrendQueryRunner( - query=ExperimentTrendQuery(**experiment.metrics[0]["query"]), team=self.team + query_runner = ExperimentTrendsQueryRunner( + query=ExperimentTrendsQuery(**experiment.metrics[0]["query"]), team=self.team ) result = query_runner.calculate() @@ -433,16 +432,15 @@ def test_validate_event_variants_no_events(self): experiment = self.create_experiment(feature_flag=feature_flag) count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, ) - query_runner = ExperimentTrendQueryRunner(query=experiment_query, team=self.team) - + query_runner = ExperimentTrendsQueryRunner(query=experiment_query, team=self.team) with self.assertRaises(ValidationError) as context: - query_runner._validate_event_variants(TrendsQueryResponse(results=[])) + query_runner.calculate() expected_errors = json.dumps( { @@ -472,17 +470,15 @@ def test_validate_event_variants_no_control(self): flush_persons_and_events() count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, ) - query_runner = ExperimentTrendQueryRunner(query=experiment_query, team=self.team) - result = query_runner.count_query_runner.calculate() - + query_runner = ExperimentTrendsQueryRunner(query=experiment_query, team=self.team) with self.assertRaises(ValidationError) as context: - query_runner._validate_event_variants(result) + query_runner.calculate() expected_errors = json.dumps( { @@ -512,17 +508,15 @@ def test_validate_event_variants_no_test(self): flush_persons_and_events() count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, ) - query_runner = ExperimentTrendQueryRunner(query=experiment_query, team=self.team) - result = query_runner.count_query_runner.calculate() - + query_runner = ExperimentTrendsQueryRunner(query=experiment_query, team=self.team) with self.assertRaises(ValidationError) as context: - query_runner._validate_event_variants(result) + query_runner.calculate() expected_errors = json.dumps( { @@ -554,17 +548,15 @@ def test_validate_event_variants_no_flag_info(self): flush_persons_and_events() count_query = TrendsQuery(series=[EventsNode(event="$pageview")]) - experiment_query = ExperimentTrendQuery( + experiment_query = ExperimentTrendsQuery( experiment_id=experiment.id, - kind="ExperimentTrendQuery", + kind="ExperimentTrendsQuery", count_query=count_query, ) - query_runner = ExperimentTrendQueryRunner(query=experiment_query, team=self.team) - result = query_runner.count_query_runner.calculate() - + query_runner = ExperimentTrendsQueryRunner(query=experiment_query, team=self.team) with self.assertRaises(ValidationError) as context: - query_runner._validate_event_variants(result) + query_runner.calculate() expected_errors = json.dumps( { diff --git a/posthog/hogql_queries/experiments/trend_statistics.py b/posthog/hogql_queries/experiments/trends_statistics.py similarity index 93% rename from posthog/hogql_queries/experiments/trend_statistics.py rename to posthog/hogql_queries/experiments/trends_statistics.py index 9b2218267b0cc..61b19d1486f72 100644 --- a/posthog/hogql_queries/experiments/trend_statistics.py +++ b/posthog/hogql_queries/experiments/trends_statistics.py @@ -12,13 +12,13 @@ P_VALUE_SIGNIFICANCE_LEVEL, ) -from posthog.schema import ExperimentSignificanceCode, ExperimentVariantTrendBaseStats +from posthog.schema import ExperimentSignificanceCode, ExperimentVariantTrendsBaseStats Probability = float def calculate_probabilities( - control_variant: ExperimentVariantTrendBaseStats, test_variants: list[ExperimentVariantTrendBaseStats] + control_variant: ExperimentVariantTrendsBaseStats, test_variants: list[ExperimentVariantTrendsBaseStats] ) -> list[Probability]: """ Calculates probability that A is better than B. First variant is control, rest are test variants. @@ -59,7 +59,7 @@ def calculate_probabilities( def simulate_winning_variant_for_arrival_rates( - target_variant: ExperimentVariantTrendBaseStats, variants: list[ExperimentVariantTrendBaseStats] + target_variant: ExperimentVariantTrendsBaseStats, variants: list[ExperimentVariantTrendsBaseStats] ) -> float: random_sampler = default_rng() simulations_count = 100_000 @@ -85,8 +85,8 @@ def simulate_winning_variant_for_arrival_rates( def are_results_significant( - control_variant: ExperimentVariantTrendBaseStats, - test_variants: list[ExperimentVariantTrendBaseStats], + control_variant: ExperimentVariantTrendsBaseStats, + test_variants: list[ExperimentVariantTrendsBaseStats], probabilities: list[Probability], ) -> tuple[ExperimentSignificanceCode, Probability]: # TODO: Experiment with Expected Loss calculations for trend experiments @@ -152,7 +152,7 @@ def poisson_p_value(control_count, control_exposure, test_count, test_exposure): def calculate_p_value( - control_variant: ExperimentVariantTrendBaseStats, test_variants: list[ExperimentVariantTrendBaseStats] + control_variant: ExperimentVariantTrendsBaseStats, test_variants: list[ExperimentVariantTrendsBaseStats] ) -> Probability: best_test_variant = max(test_variants, key=lambda variant: variant.count) diff --git a/posthog/hogql_queries/query_runner.py b/posthog/hogql_queries/query_runner.py index 8d62fabf66c82..664430cc7da04 100644 --- a/posthog/hogql_queries/query_runner.py +++ b/posthog/hogql_queries/query_runner.py @@ -33,6 +33,7 @@ FunnelsQuery, HogQLQuery, HogQLQueryModifiers, + HogQLVariable, InsightActorsQuery, InsightActorsQueryOptions, LifecycleQuery, @@ -365,10 +366,10 @@ def get_query_runner( limit_context=limit_context, ) - if kind == "ExperimentFunnelQuery": - from .experiments.experiment_funnel_query_runner import ExperimentFunnelQueryRunner + if kind == "ExperimentFunnelsQuery": + from .experiments.experiment_funnels_query_runner import ExperimentFunnelsQueryRunner - return ExperimentFunnelQueryRunner( + return ExperimentFunnelsQueryRunner( query=query, team=team, timings=timings, @@ -376,10 +377,10 @@ def get_query_runner( limit_context=limit_context, ) - if kind == "ExperimentTrendQuery": - from .experiments.experiment_trend_query_runner import ExperimentTrendQueryRunner + if kind == "ExperimentTrendsQuery": + from .experiments.experiment_trends_query_runner import ExperimentTrendsQueryRunner - return ExperimentTrendQueryRunner( + return ExperimentTrendsQueryRunner( query=query, team=team, timings=timings, @@ -721,6 +722,20 @@ def _is_stale(self, last_refresh: Optional[datetime], lazy: bool = False) -> boo def _refresh_frequency(self) -> timedelta: return timedelta(minutes=1) + def apply_variable_overrides(self, variable_overrides: list[HogQLVariable]): + """Irreversably update self.query with provided variable overrides.""" + if not hasattr(self.query, "variables") or not self.query.kind == "HogQLQuery" or len(variable_overrides) == 0: + return + + assert isinstance(self.query, HogQLQuery) + + if not self.query.variables: + return + + for variable in variable_overrides: + if self.query.variables.get(variable.variableId): + self.query.variables[variable.variableId] = variable + def apply_dashboard_filters(self, dashboard_filter: DashboardFilter): """Irreversably update self.query with provided dashboard filters.""" if not hasattr(self.query, "properties") or not hasattr(self.query, "dateRange"): diff --git a/posthog/migrations/0491_alertconfiguration_snoozed_until_and_more.py b/posthog/migrations/0491_alertconfiguration_snoozed_until_and_more.py new file mode 100644 index 0000000000000..d8fa097c43b32 --- /dev/null +++ b/posthog/migrations/0491_alertconfiguration_snoozed_until_and_more.py @@ -0,0 +1,46 @@ +# Generated by Django 4.2.15 on 2024-10-17 09:21 + +from django.db import migrations, models +import posthog.schema + + +class Migration(migrations.Migration): + dependencies = [ + ("posthog", "0490_dashboard_variables"), + ] + + operations = [ + migrations.AddField( + model_name="alertconfiguration", + name="snoozed_until", + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AlterField( + model_name="alertcheck", + name="state", + field=models.CharField( + choices=[ + (posthog.schema.AlertState["FIRING"], posthog.schema.AlertState["FIRING"]), + (posthog.schema.AlertState["NOT_FIRING"], posthog.schema.AlertState["NOT_FIRING"]), + (posthog.schema.AlertState["ERRORED"], posthog.schema.AlertState["ERRORED"]), + (posthog.schema.AlertState["SNOOZED"], posthog.schema.AlertState["SNOOZED"]), + ], + default=posthog.schema.AlertState["NOT_FIRING"], + max_length=10, + ), + ), + migrations.AlterField( + model_name="alertconfiguration", + name="state", + field=models.CharField( + choices=[ + (posthog.schema.AlertState["FIRING"], posthog.schema.AlertState["FIRING"]), + (posthog.schema.AlertState["NOT_FIRING"], posthog.schema.AlertState["NOT_FIRING"]), + (posthog.schema.AlertState["ERRORED"], posthog.schema.AlertState["ERRORED"]), + (posthog.schema.AlertState["SNOOZED"], posthog.schema.AlertState["SNOOZED"]), + ], + default=posthog.schema.AlertState["NOT_FIRING"], + max_length=10, + ), + ), + ] diff --git a/posthog/migrations/0492_team_session_recording_url_trigger_config.py b/posthog/migrations/0492_team_session_recording_url_trigger_config.py new file mode 100644 index 0000000000000..05ec513b2edec --- /dev/null +++ b/posthog/migrations/0492_team_session_recording_url_trigger_config.py @@ -0,0 +1,20 @@ +# Generated by Django 4.2.15 on 2024-10-14 08:09 + +import django.contrib.postgres.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("posthog", "0491_alertconfiguration_snoozed_until_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="team", + name="session_recording_url_trigger_config", + field=django.contrib.postgres.fields.ArrayField( + base_field=models.JSONField(blank=True, null=True), blank=True, default=list, null=True, size=None + ), + ), + ] diff --git a/posthog/models/alert.py b/posthog/models/alert.py index 8db059a992232..d00425327fd48 100644 --- a/posthog/models/alert.py +++ b/posthog/models/alert.py @@ -1,38 +1,24 @@ from datetime import datetime, UTC, timedelta -from typing import Any, Optional, cast -from dateutil.relativedelta import relativedelta from django.db import models from django.core.exceptions import ValidationError +import pydantic from posthog.hogql_queries.legacy_compatibility.flagged_conversion_manager import conversion_to_query_based from posthog.models.insight import Insight from posthog.models.utils import UUIDModel, CreatedMetaFields -from posthog.schema import AlertCondition, InsightThreshold, AlertState, AlertCalculationInterval +from posthog.schema import InsightThreshold, AlertState, AlertCalculationInterval ALERT_STATE_CHOICES = [ (AlertState.FIRING, AlertState.FIRING), (AlertState.NOT_FIRING, AlertState.NOT_FIRING), (AlertState.ERRORED, AlertState.ERRORED), + (AlertState.SNOOZED, AlertState.SNOOZED), ] -def alert_calculation_interval_to_relativedelta(alert_calculation_interval: AlertCalculationInterval) -> relativedelta: - match alert_calculation_interval: - case AlertCalculationInterval.HOURLY: - return relativedelta(hours=1) - case AlertCalculationInterval.DAILY: - return relativedelta(days=1) - case AlertCalculationInterval.WEEKLY: - return relativedelta(weeks=1) - case AlertCalculationInterval.MONTHLY: - return relativedelta(months=1) - case _: - raise ValueError(f"Invalid alert calculation interval: {alert_calculation_interval}") - - def are_alerts_supported_for_insight(insight: Insight) -> bool: with conversion_to_query_based(insight): query = insight.query @@ -43,32 +29,6 @@ def are_alerts_supported_for_insight(insight: Insight) -> bool: return True -class ConditionValidator: - def __init__(self, threshold: Optional[InsightThreshold], condition: AlertCondition): - self.threshold = threshold - self.condition = condition - - def validate(self, calculated_value: float) -> list[str]: - validators: Any = [ - self.validate_absolute_threshold, - ] - breaches = [] - for validator in validators: - breaches += validator(calculated_value) - return breaches - - def validate_absolute_threshold(self, calculated_value: float) -> list[str]: - if not self.threshold or not self.threshold.absoluteThreshold: - return [] - - absolute_threshold = self.threshold.absoluteThreshold - if absolute_threshold.lower is not None and calculated_value < absolute_threshold.lower: - return [f"The trend value ({calculated_value}) is below the lower threshold ({absolute_threshold.lower})"] - if absolute_threshold.upper is not None and calculated_value > absolute_threshold.upper: - return [f"The trend value ({calculated_value}) is above the upper threshold ({absolute_threshold.upper})"] - return [] - - class Alert(models.Model): """ @deprecated("AlertConfiguration should be used instead.") @@ -95,11 +55,15 @@ class Threshold(CreatedMetaFields, UUIDModel): configuration = models.JSONField(default=dict) def clean(self): - config = InsightThreshold.model_validate(self.configuration) - if not config or not config.absoluteThreshold: + try: + config = InsightThreshold.model_validate(self.configuration) + except pydantic.ValidationError as e: + raise ValidationError(f"Invalid threshold configuration: {e}") + + if not config or not config.bounds: return - if config.absoluteThreshold.lower is not None and config.absoluteThreshold.upper is not None: - if config.absoluteThreshold.lower > config.absoluteThreshold.upper: + if config.bounds.lower is not None and config.bounds.upper is not None: + if config.bounds.lower > config.bounds.upper: raise ValidationError("Lower threshold must be less than upper threshold") @@ -145,7 +109,10 @@ class AlertConfiguration(CreatedMetaFields, UUIDModel): last_notified_at = models.DateTimeField(null=True, blank=True) last_checked_at = models.DateTimeField(null=True, blank=True) + # UTC time for when next alert check is due next_check_at = models.DateTimeField(null=True, blank=True) + # UTC time until when we shouldn't check alert/notify user + snoozed_until = models.DateTimeField(null=True, blank=True) def __str__(self): return f"{self.name} (Team: {self.team})" @@ -159,75 +126,6 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) - def evaluate_condition(self, calculated_value) -> list[str]: - threshold = InsightThreshold.model_validate(self.threshold.configuration) if self.threshold else None - condition = AlertCondition.model_validate(self.condition) - validator = ConditionValidator(threshold=threshold, condition=condition) - return validator.validate(calculated_value) - - def add_check( - self, *, aggregated_value: Optional[float], error: Optional[dict] = None - ) -> tuple["AlertCheck", list[str], Optional[dict], bool]: - """ - Add a new AlertCheck, managing state transitions and cool down. - - Args: - aggregated_value: result of insight calculation compressed to one number to compare against threshold - error: any error raised while calculating insight value, if present then set state as errored - """ - - targets_notified: dict[str, list[str]] = {} - breaches = [] - notify = False - - if not error: - try: - breaches = self.evaluate_condition(aggregated_value) if aggregated_value is not None else [] - except Exception as err: - # error checking the condition - error = { - "message": f"Error checking alert condition {str(err)}", - } - - if error: - # If the alert is not already errored, notify user - if self.state != AlertState.ERRORED: - self.state = AlertState.ERRORED - notify = True - elif breaches: - # If the alert is not already firing, notify user - if self.state != AlertState.FIRING: - self.state = AlertState.FIRING - notify = True - else: - self.state = AlertState.NOT_FIRING # Set the Alert to not firing if the threshold is no longer met - # TODO: Optionally send a resolved notification when alert goes from firing to not_firing? - - now = datetime.now(UTC) - self.last_checked_at = datetime.now(UTC) - - # IMPORTANT: update next_check_at according to interval - # ensure we don't recheck alert until the next interval is due - self.next_check_at = (self.next_check_at or now) + alert_calculation_interval_to_relativedelta( - cast(AlertCalculationInterval, self.calculation_interval) - ) - - if notify: - self.last_notified_at = now - targets_notified = {"users": list(self.subscribed_users.all().values_list("email", flat=True))} - - alert_check = AlertCheck.objects.create( - alert_configuration=self, - calculated_value=aggregated_value, - condition=self.condition, - targets_notified=targets_notified, - state=self.state, - error=error, - ) - - self.save() - return alert_check, breaches, error, notify - class AlertSubscription(CreatedMetaFields, UUIDModel): user = models.ForeignKey( diff --git a/posthog/models/filters/test/__snapshots__/test_filter.ambr b/posthog/models/filters/test/__snapshots__/test_filter.ambr index cd2bf85143d05..c9a734ef96c8c 100644 --- a/posthog/models/filters/test/__snapshots__/test_filter.ambr +++ b/posthog/models/filters/test/__snapshots__/test_filter.ambr @@ -25,6 +25,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -90,6 +91,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -155,6 +157,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -220,6 +223,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -285,6 +289,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/models/insight.py b/posthog/models/insight.py index d32c2d1c31ec9..c8e5b0fbc7636 100644 --- a/posthog/models/insight.py +++ b/posthog/models/insight.py @@ -196,9 +196,17 @@ def dashboard_filters( return self.filters def get_effective_query( - self, *, dashboard: Optional[Dashboard], dashboard_filters_override: Optional[dict] = None + self, + *, + dashboard: Optional[Dashboard], + dashboard_filters_override: Optional[dict] = None, + dashboard_variables_override: Optional[dict[str, dict]] = None, ) -> Optional[dict]: from posthog.hogql_queries.apply_dashboard_filters import apply_dashboard_filters_to_dict + from posthog.hogql_queries.apply_dashboard_filters import apply_dashboard_variables_to_dict + + if self.query and dashboard_variables_override: + self.query = apply_dashboard_variables_to_dict(self.query, dashboard_variables_override or {}, self.team) if not (dashboard or dashboard_filters_override) or not self.query: return self.query diff --git a/posthog/models/team/team.py b/posthog/models/team/team.py index 30d2cb546892c..f3e6b5d6e33e4 100644 --- a/posthog/models/team/team.py +++ b/posthog/models/team/team.py @@ -246,6 +246,9 @@ class Meta: ) session_recording_linked_flag = models.JSONField(null=True, blank=True) session_recording_network_payload_capture_config = models.JSONField(null=True, blank=True) + session_recording_url_trigger_config = ArrayField( + models.JSONField(null=True, blank=True), default=list, blank=True, null=True + ) session_replay_config = models.JSONField(null=True, blank=True) survey_config = models.JSONField(null=True, blank=True) capture_console_log_opt_in = models.BooleanField(null=True, blank=True, default=True) diff --git a/posthog/schema.py b/posthog/schema.py index f980c95611bc0..1ba777cd25b26 100644 --- a/posthog/schema.py +++ b/posthog/schema.py @@ -50,17 +50,17 @@ class AlertCalculationInterval(StrEnum): MONTHLY = "monthly" -class AlertCondition(BaseModel): - pass - model_config = ConfigDict( - extra="forbid", - ) +class AlertConditionType(StrEnum): + ABSOLUTE_VALUE = "absolute_value" + RELATIVE_INCREASE = "relative_increase" + RELATIVE_DECREASE = "relative_decrease" class AlertState(StrEnum): FIRING = "Firing" NOT_FIRING = "Not firing" ERRORED = "Errored" + SNOOZED = "Snoozed" class Kind(StrEnum): @@ -525,7 +525,7 @@ class ExperimentSignificanceCode(StrEnum): HIGH_P_VALUE = "high_p_value" -class ExperimentVariantFunnelResult(BaseModel): +class ExperimentVariantFunnelsBaseStats(BaseModel): model_config = ConfigDict( extra="forbid", ) @@ -534,7 +534,7 @@ class ExperimentVariantFunnelResult(BaseModel): success_count: float -class ExperimentVariantTrendBaseStats(BaseModel): +class ExperimentVariantTrendsBaseStats(BaseModel): model_config = ConfigDict( extra="forbid", ) @@ -797,7 +797,12 @@ class InsightNodeKind(StrEnum): LIFECYCLE_QUERY = "LifecycleQuery" -class InsightsThresholdAbsolute(BaseModel): +class InsightThresholdType(StrEnum): + ABSOLUTE = "absolute" + PERCENTAGE = "percentage" + + +class InsightsThresholdBounds(BaseModel): model_config = ConfigDict( extra="forbid", ) @@ -870,8 +875,8 @@ class NodeKind(StrEnum): WEB_STATS_TABLE_QUERY = "WebStatsTableQuery" WEB_EXTERNAL_CLICKS_TABLE_QUERY = "WebExternalClicksTableQuery" WEB_GOALS_QUERY = "WebGoalsQuery" - EXPERIMENT_FUNNEL_QUERY = "ExperimentFunnelQuery" - EXPERIMENT_TREND_QUERY = "ExperimentTrendQuery" + EXPERIMENT_FUNNELS_QUERY = "ExperimentFunnelsQuery" + EXPERIMENT_TRENDS_QUERY = "ExperimentTrendsQuery" DATABASE_SCHEMA_QUERY = "DatabaseSchemaQuery" SUGGESTED_QUESTIONS_QUERY = "SuggestedQuestionsQuery" TEAM_TAXONOMY_QUERY = "TeamTaxonomyQuery" @@ -1022,14 +1027,6 @@ class QueryResponseAlternative7(BaseModel): warnings: list[HogQLNotice] -class QueryResponseAlternative16(BaseModel): - model_config = ConfigDict( - extra="forbid", - ) - insight: Literal["FUNNELS"] = "FUNNELS" - results: dict[str, ExperimentVariantFunnelResult] - - class QueryResponseAlternative38(BaseModel): model_config = ConfigDict( extra="forbid", @@ -1704,6 +1701,13 @@ class ActorsQueryResponse(BaseModel): types: list[str] +class AlertCondition(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + type: AlertConditionType + + class Breakdown(BaseModel): model_config = ConfigDict( extra="forbid", @@ -1910,27 +1914,7 @@ class CachedEventsQueryResponse(BaseModel): types: list[str] -class CachedExperimentFunnelQueryResponse(BaseModel): - model_config = ConfigDict( - extra="forbid", - ) - cache_key: str - cache_target_age: Optional[AwareDatetime] = None - calculation_trigger: Optional[str] = Field( - default=None, description="What triggered the calculation of the query, leave empty if user/immediate" - ) - insight: Literal["FUNNELS"] = "FUNNELS" - is_cached: bool - last_refresh: AwareDatetime - next_allowed_client_refresh: AwareDatetime - query_status: Optional[QueryStatus] = Field( - default=None, description="Query status indicates whether next to the provided data, a query is still running." - ) - results: dict[str, ExperimentVariantFunnelResult] - timezone: str - - -class CachedExperimentTrendQueryResponse(BaseModel): +class CachedExperimentTrendsQueryResponse(BaseModel): model_config = ConfigDict( extra="forbid", ) @@ -1952,7 +1936,7 @@ class CachedExperimentTrendQueryResponse(BaseModel): significance_code: ExperimentSignificanceCode significant: bool timezone: str - variants: list[ExperimentVariantTrendBaseStats] + variants: list[ExperimentVariantTrendsBaseStats] class CachedFunnelCorrelationResponse(BaseModel): @@ -2679,14 +2663,6 @@ class Response9(BaseModel): ) -class Response10(BaseModel): - model_config = ConfigDict( - extra="forbid", - ) - insight: Literal["FUNNELS"] = "FUNNELS" - results: dict[str, ExperimentVariantFunnelResult] - - class Response11(BaseModel): model_config = ConfigDict( extra="forbid", @@ -2697,7 +2673,7 @@ class Response11(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool - variants: list[ExperimentVariantTrendBaseStats] + variants: list[ExperimentVariantTrendsBaseStats] class DataWarehousePersonPropertyFilter(BaseModel): @@ -2850,15 +2826,7 @@ class EventsQueryResponse(BaseModel): types: list[str] -class ExperimentFunnelQueryResponse(BaseModel): - model_config = ConfigDict( - extra="forbid", - ) - insight: Literal["FUNNELS"] = "FUNNELS" - results: dict[str, ExperimentVariantFunnelResult] - - -class ExperimentTrendQueryResponse(BaseModel): +class ExperimentTrendsQueryResponse(BaseModel): model_config = ConfigDict( extra="forbid", ) @@ -2868,7 +2836,7 @@ class ExperimentTrendQueryResponse(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool - variants: list[ExperimentVariantTrendBaseStats] + variants: list[ExperimentVariantTrendsBaseStats] class BreakdownFilter1(BaseModel): @@ -3078,7 +3046,8 @@ class InsightThreshold(BaseModel): model_config = ConfigDict( extra="forbid", ) - absoluteThreshold: Optional[InsightsThresholdAbsolute] = None + bounds: Optional[InsightsThresholdBounds] = None + type: InsightThresholdType class LifecycleFilter(BaseModel): @@ -3460,6 +3429,19 @@ class QueryResponseAlternative15(BaseModel): ) +class QueryResponseAlternative16(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + credible_intervals: dict[str, list[float]] + expected_loss: float + insight: FunnelsQueryResponse + probability: dict[str, float] + significance_code: ExperimentSignificanceCode + significant: bool + variants: list[ExperimentVariantFunnelsBaseStats] + + class QueryResponseAlternative17(BaseModel): model_config = ConfigDict( extra="forbid", @@ -3470,7 +3452,7 @@ class QueryResponseAlternative17(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool - variants: list[ExperimentVariantTrendBaseStats] + variants: list[ExperimentVariantTrendsBaseStats] class QueryResponseAlternative18(BaseModel): @@ -3709,6 +3691,19 @@ class QueryResponseAlternative27(BaseModel): ) +class QueryResponseAlternative28(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + credible_intervals: dict[str, list[float]] + expected_loss: float + insight: FunnelsQueryResponse + probability: dict[str, float] + significance_code: ExperimentSignificanceCode + significant: bool + variants: list[ExperimentVariantFunnelsBaseStats] + + class QueryResponseAlternative29(BaseModel): model_config = ConfigDict( extra="forbid", @@ -3719,7 +3714,7 @@ class QueryResponseAlternative29(BaseModel): probability: dict[str, float] significance_code: ExperimentSignificanceCode significant: bool - variants: list[ExperimentVariantTrendBaseStats] + variants: list[ExperimentVariantTrendsBaseStats] class QueryResponseAlternative30(BaseModel): @@ -4158,6 +4153,31 @@ class AnyResponseType( ] +class CachedExperimentFunnelsQueryResponse(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + cache_key: str + cache_target_age: Optional[AwareDatetime] = None + calculation_trigger: Optional[str] = Field( + default=None, description="What triggered the calculation of the query, leave empty if user/immediate" + ) + credible_intervals: dict[str, list[float]] + expected_loss: float + insight: FunnelsQueryResponse + is_cached: bool + last_refresh: AwareDatetime + next_allowed_client_refresh: AwareDatetime + probability: dict[str, float] + query_status: Optional[QueryStatus] = Field( + default=None, description="Query status indicates whether next to the provided data, a query is still running." + ) + significance_code: ExperimentSignificanceCode + significant: bool + timezone: str + variants: list[ExperimentVariantFunnelsBaseStats] + + class CachedHogQLQueryResponse(BaseModel): model_config = ConfigDict( extra="forbid", @@ -4309,6 +4329,19 @@ class Response2(BaseModel): types: Optional[list] = Field(default=None, description="Types of returned columns") +class Response10(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + credible_intervals: dict[str, list[float]] + expected_loss: float + insight: FunnelsQueryResponse + probability: dict[str, float] + significance_code: ExperimentSignificanceCode + significant: bool + variants: list[ExperimentVariantFunnelsBaseStats] + + class DataWarehouseNode(BaseModel): model_config = ConfigDict( extra="forbid", @@ -4545,6 +4578,19 @@ class EventsNode(BaseModel): response: Optional[dict[str, Any]] = None +class ExperimentFunnelsQueryResponse(BaseModel): + model_config = ConfigDict( + extra="forbid", + ) + credible_intervals: dict[str, list[float]] + expected_loss: float + insight: FunnelsQueryResponse + probability: dict[str, float] + significance_code: ExperimentSignificanceCode + significant: bool + variants: list[ExperimentVariantFunnelsBaseStats] + + class FunnelExclusionActionsNode(BaseModel): model_config = ConfigDict( extra="forbid", @@ -5455,18 +5501,18 @@ class EventsQuery(BaseModel): where: Optional[list[str]] = Field(default=None, description="HogQL filters to apply on returned data") -class ExperimentTrendQuery(BaseModel): +class ExperimentTrendsQuery(BaseModel): model_config = ConfigDict( extra="forbid", ) count_query: TrendsQuery experiment_id: int exposure_query: Optional[TrendsQuery] = None - kind: Literal["ExperimentTrendQuery"] = "ExperimentTrendQuery" + kind: Literal["ExperimentTrendsQuery"] = "ExperimentTrendsQuery" modifiers: Optional[HogQLQueryModifiers] = Field( default=None, description="Modifiers used when performing the query" ) - response: Optional[ExperimentTrendQueryResponse] = None + response: Optional[ExperimentTrendsQueryResponse] = None class FunnelsQuery(BaseModel): @@ -5812,6 +5858,7 @@ class QueryResponseAlternative( QueryResponseAlternative25, QueryResponseAlternative26, QueryResponseAlternative27, + QueryResponseAlternative28, QueryResponseAlternative29, QueryResponseAlternative30, QueryResponseAlternative31, @@ -5851,6 +5898,7 @@ class QueryResponseAlternative( QueryResponseAlternative25, QueryResponseAlternative26, QueryResponseAlternative27, + QueryResponseAlternative28, QueryResponseAlternative29, QueryResponseAlternative30, QueryResponseAlternative31, @@ -5878,16 +5926,16 @@ class DatabaseSchemaQueryResponse(BaseModel): ] -class ExperimentFunnelQuery(BaseModel): +class ExperimentFunnelsQuery(BaseModel): model_config = ConfigDict( extra="forbid", ) experiment_id: int - kind: Literal["ExperimentFunnelQuery"] = "ExperimentFunnelQuery" + kind: Literal["ExperimentFunnelsQuery"] = "ExperimentFunnelsQuery" modifiers: Optional[HogQLQueryModifiers] = Field( default=None, description="Modifiers used when performing the query" ) - response: Optional[ExperimentFunnelQueryResponse] = None + response: Optional[ExperimentFunnelsQueryResponse] = None source: FunnelsQuery @@ -6207,8 +6255,8 @@ class DataTableNode(BaseModel): WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, ] = Field(..., description="Source of the events") @@ -6247,8 +6295,8 @@ class HogQLAutocomplete(BaseModel): WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, ] ] = Field(default=None, description="Query in whose context to validate.") startPosition: int = Field(..., description="Start position of the editor word") @@ -6291,8 +6339,8 @@ class HogQLMetadata(BaseModel): WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, ] ] = Field( default=None, @@ -6333,8 +6381,8 @@ class QueryRequest(BaseModel): WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, DataVisualizationNode, DataTableNode, SavedInsightNode, @@ -6372,6 +6420,7 @@ class QueryRequest(BaseModel): " `query_status` response field." ), ) + variables_override: Optional[dict[str, dict[str, Any]]] = None class QuerySchemaRoot( @@ -6397,8 +6446,8 @@ class QuerySchemaRoot( WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, DataVisualizationNode, DataTableNode, SavedInsightNode, @@ -6436,8 +6485,8 @@ class QuerySchemaRoot( WebGoalsQuery, SessionAttributionExplorerQuery, ErrorTrackingQuery, - ExperimentFunnelQuery, - ExperimentTrendQuery, + ExperimentFunnelsQuery, + ExperimentTrendsQuery, DataVisualizationNode, DataTableNode, SavedInsightNode, diff --git a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr index 4bd1b7a0c1415..550009ec55f1c 100644 --- a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr +++ b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr @@ -25,6 +25,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -90,6 +91,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -155,6 +157,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -220,6 +223,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -285,6 +289,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -458,6 +463,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -548,6 +554,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -917,6 +924,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1014,6 +1022,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1079,6 +1088,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1144,6 +1154,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1209,6 +1220,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1274,6 +1286,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1339,6 +1352,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1436,6 +1450,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1633,6 +1648,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -1768,6 +1784,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2229,6 +2246,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2364,6 +2382,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2513,6 +2532,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -2865,6 +2885,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3019,6 +3040,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3476,6 +3498,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3643,6 +3666,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -3917,6 +3941,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4141,6 +4166,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -4276,6 +4302,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5142,6 +5169,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5277,6 +5305,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5715,6 +5744,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -5871,6 +5901,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6372,6 +6403,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6526,6 +6558,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -6968,6 +7001,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -7103,6 +7137,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/settings/temporal.py b/posthog/settings/temporal.py index b73a7a0b6af83..dcab7bfb9a58a 100644 --- a/posthog/settings/temporal.py +++ b/posthog/settings/temporal.py @@ -17,6 +17,7 @@ BATCH_EXPORT_BIGQUERY_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 100 # 100MB BATCH_EXPORT_HTTP_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 50 # 50MB BATCH_EXPORT_HTTP_BATCH_SIZE: int = 5000 +BATCH_EXPORT_BUFFER_QUEUE_MAX_SIZE_BYTES: int = 1024 * 1024 * 300 # 300MB UNCONSTRAINED_TIMESTAMP_TEAM_IDS: list[str] = get_list(os.getenv("UNCONSTRAINED_TIMESTAMP_TEAM_IDS", "")) ASYNC_ARROW_STREAMING_TEAM_IDS: list[str] = get_list(os.getenv("ASYNC_ARROW_STREAMING_TEAM_IDS", "")) diff --git a/posthog/settings/web.py b/posthog/settings/web.py index 92ec31382fe45..6e32dff2e6138 100644 --- a/posthog/settings/web.py +++ b/posthog/settings/web.py @@ -2,8 +2,8 @@ import os from datetime import timedelta -from corsheaders.defaults import default_headers import structlog +from corsheaders.defaults import default_headers from posthog.settings.base_variables import BASE_DIR, DEBUG, TEST from posthog.settings.utils import get_from_env, get_list, str_to_bool @@ -41,6 +41,9 @@ DECIDE_BILLING_SAMPLING_RATE = get_from_env("DECIDE_BILLING_SAMPLING_RATE", 0.1, type_cast=float) DECIDE_BILLING_ANALYTICS_TOKEN = get_from_env("DECIDE_BILLING_ANALYTICS_TOKEN", None, type_cast=str, optional=True) +# temporary, used for safe rollout of defaulting people into anonymous events / process_persons: identified_only +DEFAULT_IDENTIFIED_ONLY_TEAM_ID_MIN: int = get_from_env("DEFAULT_IDENTIFIED_ONLY_TEAM_ID_MIN", 1000000, type_cast=int) + # Decide regular request analytics # Takes 3 possible formats, all separated by commas: # A number: "2" diff --git a/posthog/tasks/alerts/checks.py b/posthog/tasks/alerts/checks.py index 7c66c1158c12b..4986899047faa 100644 --- a/posthog/tasks/alerts/checks.py +++ b/posthog/tasks/alerts/checks.py @@ -1,17 +1,14 @@ from datetime import datetime, timedelta, UTC -from typing import Optional, cast +from typing import cast from dateutil.relativedelta import relativedelta +import traceback from celery import shared_task from celery.canvas import chain from django.db import transaction -from django.utils import timezone import structlog from sentry_sdk import capture_exception -from posthog.api.services.query import ExecutionMode -from posthog.caching.calculate_results import calculate_for_query_based_insight -from posthog.email import EmailMessage from posthog.errors import CHQueryErrorTooManySimultaneousQueries from posthog.hogql_queries.legacy_compatibility.flagged_conversion_manager import ( conversion_to_query_based, @@ -21,40 +18,28 @@ from posthog.tasks.utils import CeleryQueue from posthog.schema import ( TrendsQuery, - IntervalType, - ChartDisplayType, - NodeKind, AlertCalculationInterval, AlertState, - TrendsAlertConfig, ) from posthog.utils import get_from_dict_or_attr -from posthog.caching.fetch_from_cache import InsightResult -from posthog.clickhouse.client.limit import limit_concurrency from prometheus_client import Counter, Gauge from django.db.models import Q, F -from typing import TypedDict, NotRequired from collections import defaultdict +from posthog.tasks.alerts.utils import ( + AlertEvaluationResult, + calculation_interval_to_order, + send_notifications_for_errors, + send_notifications_for_breaches, + WRAPPER_NODE_KINDS, + alert_calculation_interval_to_relativedelta, +) +from posthog.tasks.alerts.trends import check_trends_alert + + +logger = structlog.get_logger(__name__) -# TODO: move the TrendResult UI type to schema.ts and use that instead -class TrendResult(TypedDict): - action: dict - actions: list[dict] - count: int - data: list[float] - days: list[str] - dates: list[str] - label: str - labels: list[str] - breakdown_value: str | int | list[str] - aggregated_value: NotRequired[float] - status: str | None - compare_label: str | None - compare: bool - persons_urls: list[dict] - persons: dict - filter: dict +class AlertCheckException(Exception): ... HOURLY_ALERTS_BACKLOG_GAUGE = Gauge( @@ -78,28 +63,9 @@ class TrendResult(TypedDict): ) -logger = structlog.get_logger(__name__) - - -WRAPPER_NODE_KINDS = [NodeKind.DATA_TABLE_NODE, NodeKind.DATA_VISUALIZATION_NODE, NodeKind.INSIGHT_VIZ_NODE] - -NON_TIME_SERIES_DISPLAY_TYPES = { - ChartDisplayType.BOLD_NUMBER, - ChartDisplayType.ACTIONS_PIE, - ChartDisplayType.ACTIONS_BAR_VALUE, - ChartDisplayType.ACTIONS_TABLE, - ChartDisplayType.WORLD_MAP, -} - - -def calculation_interval_to_order(interval: AlertCalculationInterval | None) -> int: - match interval: - case AlertCalculationInterval.HOURLY: - return 0 - case AlertCalculationInterval.DAILY: - return 1 - case _: - return 2 +@shared_task(ignore_result=True) +def checks_cleanup_task() -> None: + AlertCheck.clean_up_old_checks() @shared_task( @@ -145,48 +111,18 @@ def check_alerts_task() -> None: """ This runs every 2min to check for alerts that are due to recalculate """ - check_alerts() - - -@shared_task( - ignore_result=True, - queue=CeleryQueue.ALERTS.value, - autoretry_for=(CHQueryErrorTooManySimultaneousQueries,), - retry_backoff=1, - retry_backoff_max=10, - max_retries=3, - expires=60 * 60, -) -@limit_concurrency(5) # Max 5 concurrent alert checks -def check_alert_task(alert_id: str) -> None: - try: - check_alert(alert_id) - except Exception as err: - ALERT_CHECK_ERROR_COUNTER.inc() - capture_exception(Exception(f"Error checking alert, user wasn't notified: {err}")) - raise - - -@shared_task(ignore_result=True) -def checks_cleanup_task() -> None: - AlertCheck.clean_up_old_checks() - - -def check_alerts() -> None: now = datetime.now(UTC) # Use a fixed expiration time since tasks in the chain are executed sequentially expire_after = now + timedelta(minutes=30) - # find all alerts with the provided interval that are due to be calculated (next_check_at is null or less than now) + # find all alerts with the provided interval that are due to be calculated + # (next_check_at is null or less than now) and it's not snoozed alerts = ( AlertConfiguration.objects.filter( Q(enabled=True, is_calculating=False, next_check_at__lte=now) - | Q( - enabled=True, - is_calculating=False, - next_check_at__isnull=True, - ) + | Q(enabled=True, is_calculating=False, next_check_at__isnull=True) ) + .filter(Q(snoozed_until__isnull=True) | Q(snoozed_until__lt=now)) .order_by(F("next_check_at").asc(nulls_first=True)) .only("id", "team", "calculation_interval") ) @@ -207,6 +143,20 @@ def check_alerts() -> None: chain(*(check_alert_task.si(str(alert_id)).set(expires=expire_after) for alert_id in alert_ids))() +@shared_task( + ignore_result=True, + queue=CeleryQueue.ALERTS.value, + autoretry_for=(CHQueryErrorTooManySimultaneousQueries,), + retry_backoff=1, + retry_backoff_max=10, + max_retries=3, + expires=60 * 60, +) +# @limit_concurrency(5) Concurrency controlled by CeleryQueue.ALERTS for now +def check_alert_task(alert_id: str) -> None: + check_alert(alert_id) + + def check_alert(alert_id: str) -> None: try: alert = AlertConfiguration.objects.get(id=alert_id, enabled=True) @@ -230,12 +180,28 @@ def check_alert(alert_id: str) -> None: ) return + if alert.snoozed_until: + if alert.snoozed_until > now: + logger.warning( + "Alert has been snoozed so skipping checking it now", + alert=alert, + ) + return + else: + # not snoozed (anymore) so clear snoozed_until + alert.snoozed_until = None + alert.state = AlertState.NOT_FIRING + alert.is_calculating = True alert.save() try: - check_alert_atomically(alert) - except Exception: + check_alert_and_notify_atomically(alert) + except Exception as err: + ALERT_CHECK_ERROR_COUNTER.inc() + logger.exception(AlertCheckException(err)) + capture_exception(AlertCheckException(err)) + # raise again so alert check is retried depending on error type raise finally: # Get all updates with alert checks @@ -245,185 +211,122 @@ def check_alert(alert_id: str) -> None: @transaction.atomic -def check_alert_atomically(alert: AlertConfiguration) -> None: +def check_alert_and_notify_atomically(alert: AlertConfiguration) -> None: """ - Alert check only gets updated when we successfully - 1. Compute the aggregated value for the insight for the interval - 2. Compare the aggregated value with the threshold - 3. Send notifications if breaches are found + Computes insight results, checks alert for breaches and notifies user. + Only commits updates to alert state if all of the above complete successfully. + TODO: Later separate notification mechanism from alert checking mechanism (when we move to CDP) + so we can retry notification without re-computing insight. """ ALERT_COMPUTED_COUNTER.inc() + value = breaches = error = None - insight = alert.insight - aggregated_value: Optional[float] = None - error: Optional[dict] = None - + # 1. Evaluate insight and get alert value try: - with conversion_to_query_based(insight): - query = insight.query - kind = get_from_dict_or_attr(query, "kind") - - if kind in WRAPPER_NODE_KINDS: - query = get_from_dict_or_attr(query, "source") - kind = get_from_dict_or_attr(query, "kind") - - if kind == "TrendsQuery": - query = TrendsQuery.model_validate(query) - - filters_override = _calculate_date_range_override_for_alert(query) - - calculation_result = calculate_for_query_based_insight( - insight, - team=alert.team, - execution_mode=ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, - user=None, - filters_override=filters_override, - ) - else: - raise NotImplementedError(f"Alerts for {query.kind} are not supported yet") - - if not calculation_result.result: - raise RuntimeError(f"No results for alert {alert.id}") - - aggregated_value = _aggregate_insight_result_value(alert, query, calculation_result) + alert_evaluation_result = check_alert_for_insight(alert) + value = alert_evaluation_result.value + breaches = alert_evaluation_result.breaches except CHQueryErrorTooManySimultaneousQueries: - # error on our side, need to make sure to retry the alert check + # error on our side so we raise + # as celery task can be retried according to config raise except Exception as err: - # error possibly on user's config side - # notify user that alert check errored - error_message = f"AlertCheckError: error computing aggregate value for insight, alert_id = {alert.id}" - logger.exception(error_message) + capture_exception(AlertCheckException(err)) + # error can be on user side (incorrectly configured insight/alert) + # we won't retry and set alert to errored state + error = {"message": str(err), "traceback": traceback.format_exc()} - event_id = capture_exception( - Exception(error_message), - {"alert_id": alert.id, "query": str(query), "message": str(err)}, - ) - - error = { - "sentry_event_id": event_id, - "message": f"{error_message}: {str(err)}", - } + # 2. Check alert value against threshold + alert_check = add_alert_check(alert, value, breaches, error) - try: - # Lock alert to prevent concurrent state changes - alert = AlertConfiguration.objects.select_for_update().get(id=alert.id, enabled=True) - check, breaches, error, notify = alert.add_check(aggregated_value=aggregated_value, error=error) - except Exception as err: - error_message = f"AlertCheckError: error comparing insight value with threshold for alert_id = {alert.id}" - logger.exception(error_message) - - event_id = capture_exception( - Exception(error_message), - {"alert_id": alert.id, "query": str(query), "message": str(err)}, - ) - raise - - if not notify: - # no need to notify users + # 3. Notify users if needed + if not alert_check.targets_notified: return try: - match check.state: + match alert_check.state: case AlertState.NOT_FIRING: - logger.info("Check state is %s", check.state, alert_id=alert.id) + logger.info("Check state is %s", alert_check.state, alert_id=alert.id) case AlertState.ERRORED: - if error: - _send_notifications_for_errors(alert, error) + send_notifications_for_errors(alert, alert_check.error) case AlertState.FIRING: - _send_notifications_for_breaches(alert, breaches) + assert breaches is not None + send_notifications_for_breaches(alert, breaches) except Exception as err: error_message = f"AlertCheckError: error sending notifications for alert_id = {alert.id}" logger.exception(error_message) - event_id = capture_exception( + capture_exception( Exception(error_message), - {"alert_id": alert.id, "query": str(query), "message": str(err)}, + {"alert_id": alert.id, "message": str(err)}, ) + + # don't want alert state to be updated (so that it's retried as next_check_at won't be updated) + # so we raise again as @transaction.atomic decorator won't commit db updates + # TODO: later should have a way just to retry notification mechanism raise -def _calculate_date_range_override_for_alert(query: TrendsQuery) -> Optional[dict]: - if query.trendsFilter and query.trendsFilter.display in NON_TIME_SERIES_DISPLAY_TYPES: - # for single value insights, need to recompute with full time range - return None - - match query.interval: - case IntervalType.DAY: - date_from = "-1d" - case IntervalType.WEEK: - date_from = "-1w" - case IntervalType.MONTH: - date_from = "-1m" - case _: - date_from = "-1h" - - return {"date_from": date_from} - - -def _aggregate_insight_result_value(alert: AlertConfiguration, query: TrendsQuery, results: InsightResult) -> float: - if "type" in alert.config and alert.config["type"] == "TrendsAlertConfig": - alert_config = TrendsAlertConfig.model_validate(alert.config) - series_index = alert_config.series_index - result = cast(list[TrendResult], results.result)[series_index] - - if query.trendsFilter and query.trendsFilter.display in NON_TIME_SERIES_DISPLAY_TYPES: - return result["aggregated_value"] - - return result["data"][-1] - - raise ValueError(f"Unsupported alert config type: {alert_config.type}") - - -def _send_notifications_for_breaches(alert: AlertConfiguration, breaches: list[str]) -> None: - subject = f"PostHog alert {alert.name} is firing" - campaign_key = f"alert-firing-notification-{alert.id}-{timezone.now().timestamp()}" - insight_url = f"/project/{alert.team.pk}/insights/{alert.insight.short_id}?alert_id={alert.id}" - alert_url = f"{insight_url}/alerts/{alert.id}" - message = EmailMessage( - campaign_key=campaign_key, - subject=subject, - template_name="alert_check_firing", - template_context={ - "match_descriptions": breaches, - "insight_url": insight_url, - "insight_name": alert.insight.name, - "alert_url": alert_url, - "alert_name": alert.name, - }, +def check_alert_for_insight(alert: AlertConfiguration) -> AlertEvaluationResult: + """ + Matches insight type with alert checking logic + """ + insight = alert.insight + + with conversion_to_query_based(insight): + query = insight.query + kind = get_from_dict_or_attr(query, "kind") + + if kind in WRAPPER_NODE_KINDS: + query = get_from_dict_or_attr(query, "source") + kind = get_from_dict_or_attr(query, "kind") + + match kind: + case "TrendsQuery": + query = TrendsQuery.model_validate(query) + return check_trends_alert(alert, insight, query) + case _: + raise NotImplementedError(f"AlertCheckError: Alerts for {query.kind} are not supported yet") + + +def add_alert_check( + alert: AlertConfiguration, value: float | None, breaches: list[str] | None, error: dict | None +) -> AlertCheck: + notify = False + targets_notified = {} + + if error: + alert.state = AlertState.ERRORED + notify = True + elif breaches: + alert.state = AlertState.FIRING + notify = True + else: + alert.state = AlertState.NOT_FIRING # Set the Alert to not firing if the threshold is no longer met + # TODO: Optionally send a resolved notification when alert goes from firing to not_firing? + + now = datetime.now(UTC) + alert.last_checked_at = datetime.now(UTC) + + # IMPORTANT: update next_check_at according to interval + # ensure we don't recheck alert until the next interval is due + alert.next_check_at = (alert.next_check_at or now) + alert_calculation_interval_to_relativedelta( + cast(AlertCalculationInterval, alert.calculation_interval) ) - targets = alert.subscribed_users.all().values_list("email", flat=True) - if not targets: - raise RuntimeError(f"no targets configured for the alert {alert.id}") - for target in targets: - message.add_recipient(email=target) - - logger.info(f"Send notifications about {len(breaches)} anomalies", alert_id=alert.id) - message.send() - - -def _send_notifications_for_errors(alert: AlertConfiguration, error: dict) -> None: - subject = f"PostHog alert {alert.name} check failed to evaluate" - campaign_key = f"alert-firing-notification-{alert.id}-{timezone.now().timestamp()}" - insight_url = f"/project/{alert.team.pk}/insights/{alert.insight.short_id}?alert_id={alert.id}" - alert_url = f"{insight_url}/alerts/{alert.id}" - message = EmailMessage( - campaign_key=campaign_key, - subject=subject, - template_name="alert_check_firing", - template_context={ - "match_descriptions": error, - "insight_url": insight_url, - "insight_name": alert.insight.name, - "alert_url": alert_url, - "alert_name": alert.name, - }, + + if notify: + alert.last_notified_at = now + targets_notified = {"users": list(alert.subscribed_users.all().values_list("email", flat=True))} + + alert_check = AlertCheck.objects.create( + alert_configuration=alert, + calculated_value=value, + condition=alert.condition, + targets_notified=targets_notified, + state=alert.state, + error=error, ) - targets = alert.subscribed_users.all().values_list("email", flat=True) - if not targets: - raise RuntimeError(f"no targets configured for the alert {alert.id}") - for target in targets: - message.add_recipient(email=target) - - logger.info(f"Send notifications about alert checking error", alert_id=alert.id) - message.send() + + alert.save() + + return alert_check diff --git a/posthog/tasks/alerts/test/test_alert_checks.py b/posthog/tasks/alerts/test/test_alert_checks.py index e14c48359aac3..79fe6227180a0 100644 --- a/posthog/tasks/alerts/test/test_alert_checks.py +++ b/posthog/tasks/alerts/test/test_alert_checks.py @@ -5,7 +5,8 @@ from posthog.models.alert import AlertCheck from posthog.models.instance_setting import set_instance_setting -from posthog.tasks.alerts.checks import _send_notifications_for_breaches, check_alert +from posthog.tasks.alerts.utils import send_notifications_for_breaches +from posthog.tasks.alerts.checks import check_alert from posthog.test.base import APIBaseTest, _create_event, flush_persons_and_events, ClickhouseDestroyTablesMixin from posthog.api.test.dashboards import DashboardAPI from posthog.schema import ChartDisplayType, EventsNode, TrendsQuery, TrendsFilter, AlertState @@ -14,8 +15,8 @@ @freeze_time("2024-06-02T08:55:00.000Z") -@patch("posthog.tasks.alerts.checks._send_notifications_for_errors") -@patch("posthog.tasks.alerts.checks._send_notifications_for_breaches") +@patch("posthog.tasks.alerts.checks.send_notifications_for_errors") +@patch("posthog.tasks.alerts.checks.send_notifications_for_breaches") class TestAlertChecks(APIBaseTest, ClickhouseDestroyTablesMixin): def setUp(self) -> None: super().setUp() @@ -52,14 +53,15 @@ def setUp(self) -> None: "type": "TrendsAlertConfig", "series_index": 0, }, - "threshold": {"configuration": {"absoluteThreshold": {}}}, + "condition": {"type": "absolute_value"}, + "threshold": {"configuration": {"type": "absolute", "bounds": {}}}, }, ).json() def set_thresholds(self, lower: Optional[int] = None, upper: Optional[int] = None) -> None: self.client.patch( f"/api/projects/{self.team.id}/alerts/{self.alert['id']}", - data={"threshold": {"configuration": {"absoluteThreshold": {"lower": lower, "upper": upper}}}}, + data={"threshold": {"configuration": {"type": "absolute", "bounds": {"lower": lower, "upper": upper}}}}, ) def get_breach_description(self, mock_send_notifications_for_breaches: MagicMock, call_index: int) -> list[str]: @@ -225,7 +227,7 @@ def test_send_error_while_calculating( self, _mock_send_notifications_for_breaches: MagicMock, mock_send_notifications_for_errors: MagicMock ) -> None: with patch( - "posthog.tasks.alerts.checks.calculate_for_query_based_insight" + "posthog.tasks.alerts.trends.calculate_for_query_based_insight" ) as mock_calculate_for_query_based_insight: mock_calculate_for_query_based_insight.side_effect = Exception("Some error") @@ -238,7 +240,6 @@ def test_send_error_while_calculating( ) error_message = latest_alert_check.error["message"] - assert "AlertCheckError: error computing aggregate value for insight" in error_message assert "Some error" in error_message def test_error_while_calculating_on_alert_in_firing_state( @@ -254,7 +255,7 @@ def test_error_while_calculating_on_alert_in_firing_state( assert latest_alert_check.error is None with patch( - "posthog.tasks.alerts.checks.calculate_for_query_based_insight" + "posthog.tasks.alerts.trends.calculate_for_query_based_insight" ) as mock_calculate_for_query_based_insight: mock_calculate_for_query_based_insight.side_effect = Exception("Some error") @@ -269,7 +270,6 @@ def test_error_while_calculating_on_alert_in_firing_state( assert latest_alert_check.state == AlertState.ERRORED error_message = latest_alert_check.error["message"] - assert "AlertCheckError: error computing aggregate value for insight" in error_message assert "Some error" in error_message def test_error_while_calculating_on_alert_in_not_firing_state( @@ -285,7 +285,7 @@ def test_error_while_calculating_on_alert_in_not_firing_state( assert latest_alert_check.error is None with patch( - "posthog.tasks.alerts.checks.calculate_for_query_based_insight" + "posthog.tasks.alerts.trends.calculate_for_query_based_insight" ) as mock_calculate_for_query_based_insight: mock_calculate_for_query_based_insight.side_effect = Exception("Some error") @@ -299,7 +299,6 @@ def test_error_while_calculating_on_alert_in_not_firing_state( ) error_message = latest_alert_check.error["message"] - assert "AlertCheckError: error computing aggregate value for insight" in error_message assert "Some error" in error_message def test_alert_with_insight_with_filter( @@ -318,13 +317,13 @@ def test_alert_with_insight_with_filter( anomalies = self.get_breach_description(mock_send_notifications_for_breaches, call_index=0) assert "The trend value (0) is below the lower threshold (1.0)" in anomalies - @patch("posthog.tasks.alerts.checks.EmailMessage") + @patch("posthog.tasks.alerts.utils.EmailMessage") def test_send_emails( self, MockEmailMessage: MagicMock, mock_send_notifications_for_breaches: MagicMock, mock_send_errors: MagicMock ) -> None: mocked_email_messages = mock_email_messages(MockEmailMessage) alert = AlertConfiguration.objects.get(pk=self.alert["id"]) - _send_notifications_for_breaches(alert, ["first anomaly description", "second anomaly description"]) + send_notifications_for_breaches(alert, ["first anomaly description", "second anomaly description"]) assert len(mocked_email_messages) == 1 email = mocked_email_messages[0] diff --git a/posthog/tasks/alerts/test/test_trend_alerts.py b/posthog/tasks/alerts/test/test_trends_absolute_alerts.py similarity index 95% rename from posthog/tasks/alerts/test/test_trend_alerts.py rename to posthog/tasks/alerts/test/test_trends_absolute_alerts.py index a5ff389d59f98..9402117e79fe0 100644 --- a/posthog/tasks/alerts/test/test_trend_alerts.py +++ b/posthog/tasks/alerts/test/test_trends_absolute_alerts.py @@ -30,9 +30,9 @@ @freeze_time("2024-06-02T08:55:00.000Z") -@patch("posthog.tasks.alerts.checks._send_notifications_for_errors") -@patch("posthog.tasks.alerts.checks._send_notifications_for_breaches") -class TestTimeSeriesTrendsAlerts(APIBaseTest, ClickhouseDestroyTablesMixin): +@patch("posthog.tasks.alerts.checks.send_notifications_for_errors") +@patch("posthog.tasks.alerts.checks.send_notifications_for_breaches") +class TestTimeSeriesTrendsAbsoluteAlerts(APIBaseTest, ClickhouseDestroyTablesMixin): def setUp(self) -> None: super().setUp() @@ -54,8 +54,9 @@ def create_alert( "type": "TrendsAlertConfig", "series_index": series_index, }, + "condition": {"type": "absolute_value"}, "calculation_interval": AlertCalculationInterval.DAILY, - "threshold": {"configuration": {"absoluteThreshold": {"lower": lower, "upper": upper}}}, + "threshold": {"configuration": {"type": "absolute", "bounds": {"lower": lower, "upper": upper}}}, }, ).json() diff --git a/posthog/tasks/alerts/test/test_trends_relative_alerts.py b/posthog/tasks/alerts/test/test_trends_relative_alerts.py new file mode 100644 index 0000000000000..6e5b17b633894 --- /dev/null +++ b/posthog/tasks/alerts/test/test_trends_relative_alerts.py @@ -0,0 +1,775 @@ +from typing import Optional, Any +from unittest.mock import MagicMock, patch +import dateutil + + +import dateutil.relativedelta +from freezegun import freeze_time + +from posthog.models.alert import AlertCheck +from posthog.models.instance_setting import set_instance_setting +from posthog.tasks.alerts.checks import check_alert +from posthog.test.base import APIBaseTest, _create_event, flush_persons_and_events, ClickhouseDestroyTablesMixin +from posthog.api.test.dashboards import DashboardAPI +from posthog.schema import ( + ChartDisplayType, + EventsNode, + TrendsQuery, + TrendsFilter, + IntervalType, + InsightDateRange, + EventPropertyFilter, + PropertyOperator, + BaseMathType, + AlertState, + AlertCalculationInterval, + AlertConditionType, + InsightThresholdType, + BreakdownFilter, +) +from posthog.models import AlertConfiguration + +# Tuesday +FROZEN_TIME = dateutil.parser.parse("2024-06-04T08:55:00.000Z") + + +@freeze_time(FROZEN_TIME) +@patch("posthog.tasks.alerts.checks.send_notifications_for_errors") +@patch("posthog.tasks.alerts.checks.send_notifications_for_breaches") +class TestTimeSeriesTrendsRelativeAlerts(APIBaseTest, ClickhouseDestroyTablesMixin): + def setUp(self) -> None: + super().setUp() + + set_instance_setting("EMAIL_HOST", "fake_host") + set_instance_setting("EMAIL_ENABLED", True) + + self.dashboard_api = DashboardAPI(self.client, self.team, self.assertEqual) + + def create_alert( + self, + insight: dict, + series_index: int, + condition_type: AlertConditionType, + threshold_type: InsightThresholdType, + lower: Optional[float] = None, + upper: Optional[float] = None, + ) -> dict: + alert = self.client.post( + f"/api/projects/{self.team.id}/alerts", + data={ + "name": "alert name", + "insight": insight["id"], + "subscribed_users": [self.user.id], + "config": { + "type": "TrendsAlertConfig", + "series_index": series_index, + }, + "condition": {"type": condition_type}, + "calculation_interval": AlertCalculationInterval.DAILY, + "threshold": {"configuration": {"type": threshold_type, "bounds": {"lower": lower, "upper": upper}}}, + }, + ).json() + + return alert + + def create_time_series_trend_insight( + self, interval: IntervalType, breakdown: Optional[BreakdownFilter] = None + ) -> dict[str, Any]: + query_dict = TrendsQuery( + series=[ + EventsNode( + event="signed_up", + math=BaseMathType.TOTAL, + properties=[ + EventPropertyFilter( + key="$browser", + operator=PropertyOperator.EXACT, + value=["Chrome"], + ) + ], + ), + EventsNode( + event="$pageview", + name="Pageview", + math=BaseMathType.TOTAL, + ), + ], + breakdownFilter=breakdown, + trendsFilter=TrendsFilter(display=ChartDisplayType.ACTIONS_LINE_GRAPH), + interval=interval, + dateRange=InsightDateRange(date_from="-8w"), + ).model_dump() + + insight = self.dashboard_api.create_insight( + data={ + "name": "insight", + "query": query_dict, + } + )[1] + + return insight + + def test_alert_properties(self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + # alert if sign ups increase by less than 1 + alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + lower=1, + ) + + assert alert["state"] == AlertState.NOT_FIRING + assert alert["last_checked_at"] is None + assert alert["last_notified_at"] is None + assert alert["next_check_at"] is None + + check_alert(alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.last_checked_at == FROZEN_TIME + assert updated_alert.last_notified_at == FROZEN_TIME + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=alert["id"]).latest("created_at") + assert alert_check.calculated_value == 0 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_increase_absolute_upper_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by more than 1 + alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=1, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + # Previous to previous interval (last to last week) has 0 events + # add events for previous interval (last week on Sat) + last_sat = FROZEN_TIME - dateutil.relativedelta.relativedelta(days=3) + with freeze_time(last_sat): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + check_alert(alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_increase_upper_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by more than 1 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=1, + ) + + # alert if sign ups increase by more than 20% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=0.2, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 1 event + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 2 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="4", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert should fire as we had *increase* in events of (2 or 200%) week over week + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_increase_lower_threshold_breached_1( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by less than 2 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + lower=2, + ) + + # alert if sign ups increase by less than 20 + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + lower=0.5, # 50% + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 2 events + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 1 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert should fire as overall we had *decrease* in events (-1 or -50%) week over week + # check absolute alert + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == -1 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + # check percentage alert + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == -0.5 # 50% decrease + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_increase_lower_threshold_breached_2( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by less than 2 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + lower=2, + ) + + # alert if sign ups increase by less than 110% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + lower=1.1, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 1 event + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 2 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert should fire as overall we had *increase* in events of just (1 or 100%) week over week + # alert required at least 2 + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 1 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 1 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_decrease_upper_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups decrease by more than 1 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=1, + ) + + # alert if sign ups decrease by more than 20% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + upper=0.2, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 3 event + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 1 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="4", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert should fire as we had decrease in events of (2 or 200%) week over week + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == (2 / 3) + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_decrease_lower_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups decrease by less than 2 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + lower=2, + ) + + # alert if sign ups decrease by less than 80% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + lower=0.8, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 2 event + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 1 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="4", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert should fire as we had decrease in events of (1 or 50%) week over week + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 1 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + + assert alert_check.calculated_value == 0.5 + assert alert_check.state == AlertState.FIRING + assert alert_check.error is None + + def test_relative_increase_no_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by more than 4 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=4, + ) + + # alert if sign ups increase by more than 400% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_INCREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + upper=4, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 1 event + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 3 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="4", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert shouldn't fire as increase was only of 2 or 200% + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.NOT_FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.NOT_FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.NOT_FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.NOT_FIRING + assert alert_check.error is None + + def test_relative_decrease_no_threshold_breached( + self, mock_send_breaches: MagicMock, mock_send_errors: MagicMock + ) -> None: + insight = self.create_time_series_trend_insight(interval=IntervalType.WEEK) + + # alert if sign ups increase by more than 4 + absolute_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.ABSOLUTE, + upper=4, + ) + + # alert if sign ups decrease by more than 80% + percentage_alert = self.create_alert( + insight, + series_index=0, + condition_type=AlertConditionType.RELATIVE_DECREASE, + threshold_type=InsightThresholdType.PERCENTAGE, + upper=0.8, + ) + + # FROZEN_TIME is on Tue, insight has weekly interval + # we aggregate our weekly insight numbers to display for Sun (19th May, 26th May, 2nd June) + + # set previous to previous interval (last to last week) to have 3 events + last_to_last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=2) + + with freeze_time(last_to_last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="1", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="4", + properties={"$browser": "Chrome"}, + ) + _create_event( + team=self.team, + event="signed_up", + distinct_id="2", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # set previous interval to have 1 event + # add events for last week (last Tue) + last_tue = FROZEN_TIME - dateutil.relativedelta.relativedelta(weeks=1) + with freeze_time(last_tue): + _create_event( + team=self.team, + event="signed_up", + distinct_id="3", + properties={"$browser": "Chrome"}, + ) + flush_persons_and_events() + + # alert shouldn't fire as increase was only of 2 or 200% + check_alert(absolute_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=absolute_alert["id"]) + assert updated_alert.state == AlertState.NOT_FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=absolute_alert["id"]).latest("created_at") + assert alert_check.calculated_value == 2 + assert alert_check.state == AlertState.NOT_FIRING + assert alert_check.error is None + + check_alert(percentage_alert["id"]) + + updated_alert = AlertConfiguration.objects.get(pk=percentage_alert["id"]) + assert updated_alert.state == AlertState.NOT_FIRING + assert updated_alert.next_check_at == FROZEN_TIME + dateutil.relativedelta.relativedelta(days=1) + + alert_check = AlertCheck.objects.filter(alert_configuration=percentage_alert["id"]).latest("created_at") + assert alert_check.calculated_value == (2 / 3) + assert alert_check.state == AlertState.NOT_FIRING + assert alert_check.error is None diff --git a/posthog/tasks/alerts/trends.py b/posthog/tasks/alerts/trends.py new file mode 100644 index 0000000000000..3f7fdebae2644 --- /dev/null +++ b/posthog/tasks/alerts/trends.py @@ -0,0 +1,219 @@ +from typing import Optional, cast + +from posthog.api.services.query import ExecutionMode +from posthog.caching.calculate_results import calculate_for_query_based_insight + +from posthog.models import AlertConfiguration, Insight +from posthog.schema import ( + TrendsQuery, + IntervalType, + TrendsAlertConfig, + InsightThreshold, + AlertCondition, + AlertConditionType, + InsightsThresholdBounds, + InsightThresholdType, +) +from posthog.caching.fetch_from_cache import InsightResult +from typing import TypedDict, NotRequired +from posthog.tasks.alerts.utils import ( + AlertEvaluationResult, + NON_TIME_SERIES_DISPLAY_TYPES, +) + + +# TODO: move the TrendResult UI type to schema.ts and use that instead +class TrendResult(TypedDict): + action: dict + actions: list[dict] + count: int + data: list[float] + days: list[str] + dates: list[str] + label: str + labels: list[str] + breakdown_value: str | int | list[str] + aggregated_value: NotRequired[float] + status: str | None + compare_label: str | None + compare: bool + persons_urls: list[dict] + persons: dict + filter: dict + + +def check_trends_alert(alert: AlertConfiguration, insight: Insight, query: TrendsQuery) -> AlertEvaluationResult: + if "type" in alert.config and alert.config["type"] == "TrendsAlertConfig": + config = TrendsAlertConfig.model_validate(alert.config) + else: + ValueError(f"Unsupported alert config type: {alert.config}") + + condition = AlertCondition.model_validate(alert.condition) + threshold = InsightThreshold.model_validate(alert.threshold.configuration) if alert.threshold else None + + if not threshold: + return AlertEvaluationResult(value=0, breaches=[]) + + match condition.type: + case AlertConditionType.ABSOLUTE_VALUE: + if threshold.type != InsightThresholdType.ABSOLUTE: + raise ValueError(f"Absolute threshold not configured for alert condition ABSOLUTE_VALUE") + + # want value for current interval (last hour, last day, last week, last month) + # depending on the alert calculation interval + if _is_non_time_series_trend(query): + filters_override = _date_range_override_for_intervals(query) + else: + # for non time series, it's an aggregated value for full interval + # so we need to compute full insight + filters_override = None + + calculation_result = calculate_for_query_based_insight( + insight, + team=alert.team, + execution_mode=ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, + user=None, + filters_override=filters_override, + ) + + if not calculation_result.result: + raise RuntimeError(f"No results found for insight with alert id = {alert.id}") + + current_interval_value = _pick_interval_value_from_trend_result(config, query, calculation_result) + breaches = _validate_bounds(threshold.bounds, current_interval_value) + + return AlertEvaluationResult(value=current_interval_value, breaches=breaches) + + case AlertConditionType.RELATIVE_INCREASE: + if _is_non_time_series_trend(query): + raise ValueError(f"Relative alerts not supported for non time series trends") + + # to measure relative increase, we can't alert until current interval has completed + # as to check increase less than X, we need interval to complete + # so we need to compute the trend values for last 3 intervals + # and then compare the previous interval with value for the interval before previous + filters_overrides = _date_range_override_for_intervals(query, last_x_intervals=3) + + calculation_result = calculate_for_query_based_insight( + insight, + team=alert.team, + execution_mode=ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, + user=None, + filters_override=filters_overrides, + ) + + prev_interval_value = _pick_interval_value_from_trend_result(config, query, calculation_result, -1) + prev_prev_interval_value = _pick_interval_value_from_trend_result(config, query, calculation_result, -2) + + if threshold.type == InsightThresholdType.ABSOLUTE: + increase = prev_interval_value - prev_prev_interval_value + breaches = _validate_bounds(threshold.bounds, increase) + elif threshold.type == InsightThresholdType.PERCENTAGE: + increase = (prev_interval_value - prev_prev_interval_value) / prev_prev_interval_value + breaches = _validate_bounds(threshold.bounds, increase, is_percentage=True) + else: + raise ValueError( + f"Neither relative nor absolute threshold configured for alert condition RELATIVE_INCREASE" + ) + + return AlertEvaluationResult(value=increase, breaches=breaches) + + case AlertConditionType.RELATIVE_DECREASE: + if _is_non_time_series_trend(query): + raise ValueError(f"Relative alerts not supported for non time series trends") + + # to measure relative decrease, we can't alert until current interval has completed + # as to check decrease more than X, we need interval to complete + # so we need to compute the trend values for last 3 intervals + # and then compare the previous interval with value for the interval before previous + filters_overrides = _date_range_override_for_intervals(query, last_x_intervals=3) + + calculation_result = calculate_for_query_based_insight( + insight, + team=alert.team, + execution_mode=ExecutionMode.RECENT_CACHE_CALCULATE_BLOCKING_IF_STALE, + user=None, + filters_override=filters_overrides, + ) + + prev_interval_value = _pick_interval_value_from_trend_result(config, query, calculation_result, -1) + prev_prev_interval_value = _pick_interval_value_from_trend_result(config, query, calculation_result, -2) + + if threshold.type == InsightThresholdType.ABSOLUTE: + decrease = prev_prev_interval_value - prev_interval_value + breaches = _validate_bounds(threshold.bounds, decrease) + elif threshold.type == InsightThresholdType.PERCENTAGE: + decrease = (prev_prev_interval_value - prev_interval_value) / prev_prev_interval_value + breaches = _validate_bounds(threshold.bounds, decrease, is_percentage=True) + else: + raise ValueError( + f"Neither relative nor absolute threshold configured for alert condition RELATIVE_INCREASE" + ) + + return AlertEvaluationResult(value=decrease, breaches=breaches) + + case _: + raise NotImplementedError(f"Unsupported alert condition type: {condition.type}") + + +def _is_non_time_series_trend(query: TrendsQuery) -> bool: + return bool(query.trendsFilter and query.trendsFilter.display in NON_TIME_SERIES_DISPLAY_TYPES) + + +def _date_range_override_for_intervals(query: TrendsQuery, last_x_intervals: int = 1) -> Optional[dict]: + """ + Resulting filter overrides don't set 'date_to' so we always get value for current interval. + last_x_intervals controls how many intervals to look back to + """ + assert last_x_intervals > 0 + + match query.interval: + case IntervalType.DAY: + date_from = f"-{last_x_intervals}d" + case IntervalType.WEEK: + date_from = f"-{last_x_intervals}w" + case IntervalType.MONTH: + date_from = f"-{last_x_intervals}m" + case _: + date_from = f"-{last_x_intervals}h" + + return {"date_from": date_from} + + +def _pick_interval_value_from_trend_result( + config: TrendsAlertConfig, query: TrendsQuery, results: InsightResult, interval_to_pick: int = 0 +) -> float: + """ + interval_to_pick to controls whether to pick value for current (0), last (-1), one before last (-2)... + """ + assert interval_to_pick <= 0 + + series_index = config.series_index + result = cast(list[TrendResult], results.result)[series_index] + + if _is_non_time_series_trend(query): + # only one value in result + return result["aggregated_value"] + + data = result["data"] + # data is pre sorted in ascending order of timestamps + index_from_back = len(data) - 1 + interval_to_pick + return data[index_from_back] + + +def _validate_bounds( + bounds: InsightsThresholdBounds | None, calculated_value: float, is_percentage: bool = False +) -> list[str]: + if not bounds: + return [] + + formatted_value = f"{calculated_value:.2%}" if is_percentage else calculated_value + + if bounds.lower is not None and calculated_value < bounds.lower: + lower_value = f"{bounds.lower:.2%}" if is_percentage else bounds.lower + return [f"The trend value ({formatted_value}) is below the lower threshold ({lower_value})"] + if bounds.upper is not None and calculated_value > bounds.upper: + upper_value = f"{bounds.upper:.2%}" if is_percentage else bounds.upper + return [f"The trend value ({formatted_value}) is above the upper threshold ({upper_value})"] + + return [] diff --git a/posthog/tasks/alerts/utils.py b/posthog/tasks/alerts/utils.py new file mode 100644 index 0000000000000..06b94cc938089 --- /dev/null +++ b/posthog/tasks/alerts/utils.py @@ -0,0 +1,110 @@ +from dateutil.relativedelta import relativedelta + +from django.utils import timezone +import structlog + +from posthog.email import EmailMessage +from posthog.models import AlertConfiguration +from posthog.schema import ( + ChartDisplayType, + NodeKind, + AlertCalculationInterval, +) +from dataclasses import dataclass + +logger = structlog.get_logger(__name__) + + +@dataclass +class AlertEvaluationResult: + value: float | None + breaches: list[str] | None + + +WRAPPER_NODE_KINDS = [NodeKind.DATA_TABLE_NODE, NodeKind.DATA_VISUALIZATION_NODE, NodeKind.INSIGHT_VIZ_NODE] + +NON_TIME_SERIES_DISPLAY_TYPES = { + ChartDisplayType.BOLD_NUMBER, + ChartDisplayType.ACTIONS_PIE, + ChartDisplayType.ACTIONS_BAR_VALUE, + ChartDisplayType.ACTIONS_TABLE, + ChartDisplayType.WORLD_MAP, +} + + +def calculation_interval_to_order(interval: AlertCalculationInterval | None) -> int: + match interval: + case AlertCalculationInterval.HOURLY: + return 0 + case AlertCalculationInterval.DAILY: + return 1 + case _: + return 2 + + +def alert_calculation_interval_to_relativedelta(alert_calculation_interval: AlertCalculationInterval) -> relativedelta: + match alert_calculation_interval: + case AlertCalculationInterval.HOURLY: + return relativedelta(hours=1) + case AlertCalculationInterval.DAILY: + return relativedelta(days=1) + case AlertCalculationInterval.WEEKLY: + return relativedelta(weeks=1) + case AlertCalculationInterval.MONTHLY: + return relativedelta(months=1) + case _: + raise ValueError(f"Invalid alert calculation interval: {alert_calculation_interval}") + + +def send_notifications_for_breaches(alert: AlertConfiguration, breaches: list[str]) -> None: + subject = f"PostHog alert {alert.name} is firing" + campaign_key = f"alert-firing-notification-{alert.id}-{timezone.now().timestamp()}" + insight_url = f"/project/{alert.team.pk}/insights/{alert.insight.short_id}?alert_id={alert.id}" + alert_url = f"{insight_url}/alerts/{alert.id}" + message = EmailMessage( + campaign_key=campaign_key, + subject=subject, + template_name="alert_check_firing", + template_context={ + "match_descriptions": breaches, + "insight_url": insight_url, + "insight_name": alert.insight.name, + "alert_url": alert_url, + "alert_name": alert.name, + }, + ) + targets = alert.subscribed_users.all().values_list("email", flat=True) + if not targets: + raise RuntimeError(f"no targets configured for the alert {alert.id}") + for target in targets: + message.add_recipient(email=target) + + logger.info(f"Send notifications about {len(breaches)} anomalies", alert_id=alert.id) + message.send() + + +def send_notifications_for_errors(alert: AlertConfiguration, error: dict) -> None: + subject = f"PostHog alert {alert.name} check failed to evaluate" + campaign_key = f"alert-firing-notification-{alert.id}-{timezone.now().timestamp()}" + insight_url = f"/project/{alert.team.pk}/insights/{alert.insight.short_id}?alert_id={alert.id}" + alert_url = f"{insight_url}/alerts/{alert.id}" + message = EmailMessage( + campaign_key=campaign_key, + subject=subject, + template_name="alert_check_firing", + template_context={ + "match_descriptions": error, + "insight_url": insight_url, + "insight_name": alert.insight.name, + "alert_url": alert_url, + "alert_name": alert.name, + }, + ) + targets = alert.subscribed_users.all().values_list("email", flat=True) + if not targets: + raise RuntimeError(f"no targets configured for the alert {alert.id}") + for target in targets: + message.add_recipient(email=target) + + logger.info(f"Send notifications about alert checking error", alert_id=alert.id) + message.send() diff --git a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr index 14053826ae03c..cc4697339daa4 100644 --- a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr +++ b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr @@ -96,6 +96,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -359,6 +360,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/temporal/batch_exports/batch_exports.py b/posthog/temporal/batch_exports/batch_exports.py index 4c1114cfb2cdf..16d4ccdacf0d0 100644 --- a/posthog/temporal/batch_exports/batch_exports.py +++ b/posthog/temporal/batch_exports/batch_exports.py @@ -1,3 +1,5 @@ +import asyncio +import collections import collections.abc import dataclasses import datetime as dt @@ -251,6 +253,135 @@ async def iter_records_from_model_view( yield record_batch +class RecordBatchQueue(asyncio.Queue): + """A queue of pyarrow RecordBatch instances limited by bytes.""" + + def __init__(self, max_size_bytes=0): + super().__init__(maxsize=max_size_bytes) + self._bytes_size = 0 + self._schema_set = asyncio.Event() + self.record_batch_schema = None + # This is set by `asyncio.Queue.__init__` calling `_init` + self._queue: collections.deque + + def _get(self) -> pa.RecordBatch: + """Override parent `_get` to keep track of bytes.""" + item = self._queue.popleft() + self._bytes_size -= item.get_total_buffer_size() + return item + + def _put(self, item: pa.RecordBatch) -> None: + """Override parent `_put` to keep track of bytes.""" + self._bytes_size += item.get_total_buffer_size() + + if not self._schema_set.is_set(): + self.set_schema(item) + + self._queue.append(item) + + def set_schema(self, record_batch: pa.RecordBatch) -> None: + """Used to keep track of schema of events in queue.""" + self.record_batch_schema = record_batch.schema + self._schema_set.set() + + async def get_schema(self) -> pa.Schema: + """Return the schema of events in queue. + + Currently, this is not enforced. It's purely for reporting to users of + the queue what do the record batches look like. It's up to the producer + to ensure all record batches have the same schema. + """ + await self._schema_set.wait() + return self.record_batch_schema + + def qsize(self) -> int: + """Size in bytes of record batches in the queue. + + This is used to determine when the queue is full, so it returns the + number of bytes. + """ + return self._bytes_size + + +def start_produce_batch_export_record_batches( + client: ClickHouseClient, + model_name: str, + is_backfill: bool, + team_id: int, + interval_start: str, + interval_end: str, + fields: list[BatchExportField] | None = None, + destination_default_fields: list[BatchExportField] | None = None, + **parameters, +): + """Start producing batch export record batches from a model query. + + Depending on the model, we issue a query to ClickHouse and initialize a + producer to stream record batches to a queue. Callers can then consume from + this queue as the record batches arrive. The producer runs asynchronously as + a background task, which is returned. + + Returns: + A tuple containing the record batch queue, an event used by the producer + to indicate there is nothing more to produce, and a reference to the + producer task + """ + if fields is None: + if destination_default_fields is None: + fields = default_fields() + else: + fields = destination_default_fields + + if model_name == "persons": + view = SELECT_FROM_PERSONS_VIEW + + else: + if parameters.get("exclude_events", None): + parameters["exclude_events"] = list(parameters["exclude_events"]) + else: + parameters["exclude_events"] = [] + + if parameters.get("include_events", None): + parameters["include_events"] = list(parameters["include_events"]) + else: + parameters["include_events"] = [] + + if str(team_id) in settings.UNCONSTRAINED_TIMESTAMP_TEAM_IDS: + query_template = SELECT_FROM_EVENTS_VIEW_UNBOUNDED + elif is_backfill: + query_template = SELECT_FROM_EVENTS_VIEW_BACKFILL + else: + query_template = SELECT_FROM_EVENTS_VIEW + lookback_days = settings.OVERRIDE_TIMESTAMP_TEAM_IDS.get(team_id, settings.DEFAULT_TIMESTAMP_LOOKBACK_DAYS) + parameters["lookback_days"] = lookback_days + + if "_inserted_at" not in [field["alias"] for field in fields]: + control_fields = [BatchExportField(expression="_inserted_at", alias="_inserted_at")] + else: + control_fields = [] + + query_fields = ",".join(f"{field['expression']} AS {field['alias']}" for field in fields + control_fields) + + view = query_template.substitute(fields=query_fields) + + parameters["team_id"] = team_id + parameters["interval_start"] = dt.datetime.fromisoformat(interval_start).strftime("%Y-%m-%d %H:%M:%S") + parameters["interval_end"] = dt.datetime.fromisoformat(interval_end).strftime("%Y-%m-%d %H:%M:%S") + extra_query_parameters = parameters.pop("extra_query_parameters", {}) or {} + parameters = {**parameters, **extra_query_parameters} + + queue = RecordBatchQueue(max_size_bytes=settings.BATCH_EXPORT_BUFFER_QUEUE_MAX_SIZE_BYTES) + query_id = uuid.uuid4() + done_event = asyncio.Event() + produce_task = asyncio.create_task( + client.aproduce_query_as_arrow_record_batches( + view, queue=queue, done_event=done_event, query_parameters=parameters, query_id=str(query_id) + ) + ) + + return queue, done_event, produce_task + + def iter_records( client: ClickHouseClient, team_id: int, diff --git a/posthog/temporal/batch_exports/bigquery_batch_export.py b/posthog/temporal/batch_exports/bigquery_batch_export.py index 9da8c89e56e53..521c6b1d92f85 100644 --- a/posthog/temporal/batch_exports/bigquery_batch_export.py +++ b/posthog/temporal/batch_exports/bigquery_batch_export.py @@ -3,9 +3,12 @@ import contextlib import dataclasses import datetime as dt +import functools import json +import operator import pyarrow as pa +import structlog from django.conf import settings from google.cloud import bigquery from google.oauth2 import service_account @@ -27,8 +30,8 @@ default_fields, execute_batch_export_insert_activity, get_data_interval, - iter_model_records, start_batch_export_run, + start_produce_batch_export_record_batches, ) from posthog.temporal.batch_exports.metrics import ( get_bytes_exported_metric, @@ -42,18 +45,19 @@ ) from posthog.temporal.batch_exports.utils import ( JsonType, - apeek_first_and_rewind, cast_record_batch_json_columns, set_status_to_running_task, ) from posthog.temporal.common.clickhouse import get_client from posthog.temporal.common.heartbeat import Heartbeater -from posthog.temporal.common.logger import bind_temporal_worker_logger +from posthog.temporal.common.logger import configure_temporal_worker_logger from posthog.temporal.common.utils import ( BatchExportHeartbeatDetails, should_resume_from_activity_heartbeat, ) +logger = structlog.get_logger() + def get_bigquery_fields_from_record_schema( record_schema: pa.Schema, known_json_columns: list[str] @@ -72,6 +76,9 @@ def get_bigquery_fields_from_record_schema( bq_schema: list[bigquery.SchemaField] = [] for name in record_schema.names: + if name == "_inserted_at": + continue + pa_field = record_schema.field(name) if pa.types.is_string(pa_field.type) or isinstance(pa_field.type, JsonType): @@ -264,8 +271,13 @@ async def load_parquet_file(self, parquet_file, table, table_schema): schema=table_schema, ) - load_job = self.load_table_from_file(parquet_file, table, job_config=job_config, rewind=True) - return await asyncio.to_thread(load_job.result) + await logger.adebug("Creating BigQuery load job for Parquet file '%s'", parquet_file) + load_job = await asyncio.to_thread( + self.load_table_from_file, parquet_file, table, job_config=job_config, rewind=True + ) + await logger.adebug("Waiting for BigQuery load job for Parquet file '%s'", parquet_file) + result = await asyncio.to_thread(load_job.result) + return result async def load_jsonl_file(self, jsonl_file, table, table_schema): """Execute a COPY FROM query with given connection to copy contents of jsonl_file.""" @@ -274,8 +286,14 @@ async def load_jsonl_file(self, jsonl_file, table, table_schema): schema=table_schema, ) - load_job = self.load_table_from_file(jsonl_file, table, job_config=job_config, rewind=True) - return await asyncio.to_thread(load_job.result) + await logger.adebug("Creating BigQuery load job for JSONL file '%s'", jsonl_file) + load_job = await asyncio.to_thread( + self.load_table_from_file, jsonl_file, table, job_config=job_config, rewind=True + ) + + await logger.adebug("Waiting for BigQuery load job for JSONL file '%s'", jsonl_file) + result = await asyncio.to_thread(load_job.result) + return result @contextlib.contextmanager @@ -327,7 +345,9 @@ def bigquery_default_fields() -> list[BatchExportField]: @activity.defn async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> RecordsCompleted: """Activity streams data from ClickHouse to BigQuery.""" - logger = await bind_temporal_worker_logger(team_id=inputs.team_id, destination="BigQuery") + logger = await configure_temporal_worker_logger( + logger=structlog.get_logger(), team_id=inputs.team_id, destination="BigQuery" + ) await logger.ainfo( "Batch exporting range %s - %s to BigQuery: %s.%s.%s", inputs.data_interval_start, @@ -357,24 +377,52 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records field.name for field in dataclasses.fields(inputs) }: model = inputs.batch_export_model + if model is not None: + model_name = model.name + extra_query_parameters = model.schema["values"] if model.schema is not None else None + fields = model.schema["fields"] if model.schema is not None else None + else: + model_name = "events" + extra_query_parameters = None + fields = None else: model = inputs.batch_export_schema + model_name = "custom" + extra_query_parameters = model["values"] if model is not None else {} + fields = model["fields"] if model is not None else None - records_iterator = iter_model_records( + queue, done_event, produce_task = start_produce_batch_export_record_batches( client=client, - model=model, + model_name=model_name, + is_backfill=inputs.is_backfill, team_id=inputs.team_id, interval_start=data_interval_start, interval_end=inputs.data_interval_end, exclude_events=inputs.exclude_events, include_events=inputs.include_events, + fields=fields, destination_default_fields=bigquery_default_fields(), - is_backfill=inputs.is_backfill, + extra_query_parameters=extra_query_parameters, ) - first_record_batch, records_iterator = await apeek_first_and_rewind(records_iterator) - if first_record_batch is None: + get_schema_task = asyncio.create_task(queue.get_schema()) + wait_for_producer_done_task = asyncio.create_task(done_event.wait()) + + await asyncio.wait([get_schema_task, wait_for_producer_done_task], return_when=asyncio.FIRST_COMPLETED) + + # Finishing producing happens sequentially after putting to queue and setting the schema. + # So, either we finished both tasks, or we finished without putting anything in the queue. + if get_schema_task.done(): + # In the first case, we'll land here. + # The schema is available, and the queue is not empty, so we can start the batch export. + record_batch_schema = get_schema_task.result() + elif wait_for_producer_done_task.done(): + # In the second case, we'll land here. + # The schema is not available as the queue is empty. + # Since we finished producing with an empty queue, there is nothing to batch export. return 0 + else: + raise Exception("Unreachable") if inputs.use_json_type is True: json_type = "JSON" @@ -383,8 +431,6 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records json_type = "STRING" json_columns = [] - first_record_batch = cast_record_batch_json_columns(first_record_batch, json_columns=json_columns) - if model is None or (isinstance(model, BatchExportModel) and model.name == "events"): schema = [ bigquery.SchemaField("uuid", "STRING"), @@ -401,9 +447,7 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records bigquery.SchemaField("bq_ingested_timestamp", "TIMESTAMP"), ] else: - column_names = [column for column in first_record_batch.schema.names if column != "_inserted_at"] - record_schema = first_record_batch.select(column_names).schema - schema = get_bigquery_fields_from_record_schema(record_schema, known_json_columns=json_columns) + schema = get_bigquery_fields_from_record_schema(record_batch_schema, known_json_columns=json_columns) rows_exported = get_rows_exported_metric() bytes_exported = get_bytes_exported_metric() @@ -446,41 +490,47 @@ async def flush_to_bigquery( last: bool, error: Exception | None, ): + table = bigquery_stage_table if requires_merge else bigquery_table await logger.adebug( - "Loading %s records of size %s bytes", + "Loading %s records of size %s bytes to BigQuery table '%s'", records_since_last_flush, bytes_since_last_flush, + table, ) - table = bigquery_stage_table if requires_merge else bigquery_table await bq_client.load_jsonl_file(local_results_file, table, schema) + await logger.adebug("Loading to BigQuery table '%s' finished", table) rows_exported.add(records_since_last_flush) bytes_exported.add(bytes_since_last_flush) heartbeater.details = (str(last_inserted_at),) - record_schema = pa.schema( - # NOTE: For some reason, some batches set non-nullable fields as non-nullable, whereas other - # record batches have them as nullable. - # Until we figure it out, we set all fields to nullable. There are some fields we know - # are not nullable, but I'm opting for the more flexible option until we out why schemas differ - # between batches. - [ - field.with_nullable(True) - for field in first_record_batch.select([field.name for field in schema]).schema - ] - ) - writer = JSONLBatchExportWriter( - max_bytes=settings.BATCH_EXPORT_BIGQUERY_UPLOAD_CHUNK_SIZE_BYTES, - flush_callable=flush_to_bigquery, - ) + flush_tasks = [] + while not queue.empty() or not done_event.is_set(): + await logger.adebug("Starting record batch writer") + flush_start_event = asyncio.Event() + task = asyncio.create_task( + consume_batch_export_record_batches( + queue, + done_event, + flush_start_event, + flush_to_bigquery, + json_columns, + settings.BATCH_EXPORT_BIGQUERY_UPLOAD_CHUNK_SIZE_BYTES, + ) + ) + + await flush_start_event.wait() - async with writer.open_temporary_file(): - async for record_batch in records_iterator: - record_batch = cast_record_batch_json_columns(record_batch, json_columns=json_columns) + flush_tasks.append(task) + + await logger.adebug( + "Finished producing and consuming all record batches, now waiting on any pending flush tasks" + ) + await asyncio.wait(flush_tasks) - await writer.write_record_batch(record_batch) + records_total = functools.reduce(operator.add, (task.result() for task in flush_tasks)) if requires_merge: merge_key = ( @@ -494,7 +544,74 @@ async def flush_to_bigquery( update_fields=schema, ) - return writer.records_total + return records_total + + +async def consume_batch_export_record_batches( + queue: asyncio.Queue, + done_event: asyncio.Event, + flush_start_event: asyncio.Event, + flush_to_bigquery: FlushCallable, + json_columns: list[str], + max_bytes: int, +): + """Consume batch export record batches from queue into a writing loop. + + Each record will be written to a temporary file, and flushed after + configured `max_bytes`. Flush is done on context manager exit by + `JSONLBatchExportWriter`. + + This coroutine reports when flushing will start by setting the + `flush_start_event`. This is used by the main thread to start a new writer + task as flushing is about to begin, since that can be too slow to do + sequentially. + + If there are not enough events to fill up `max_bytes`, the writing + loop will detect that there are no more events produced and shut itself off + by using the `done_event`, which should be set by the queue producer. + + Arguments: + queue: The queue we will be listening on for record batches. + done_event: Event set by producer when done. + flush_to_start_event: Event set by us when flushing is to about to + start. + json_columns: Used to cast columns of the record batch to JSON. + max_bytes: Max bytes to write before flushing. + + Returns: + Number of total records written and flushed in this task. + """ + writer = JSONLBatchExportWriter( + max_bytes=max_bytes, + flush_callable=flush_to_bigquery, + ) + + async with writer.open_temporary_file(): + await logger.adebug("Starting record batch writing loop") + while True: + try: + record_batch = queue.get_nowait() + except asyncio.QueueEmpty: + if done_event.is_set(): + await logger.adebug("Empty queue with no more events being produced, closing writer loop") + flush_start_event.set() + # Exit context manager to trigger flush + break + else: + await asyncio.sleep(0.1) + continue + + record_batch = cast_record_batch_json_columns(record_batch, json_columns=json_columns) + await writer.write_record_batch(record_batch, flush=False) + + if writer.should_flush(): + await logger.adebug("Writer finished, ready to flush events") + flush_start_event.set() + # Exit context manager to trigger flush + break + + await logger.adebug("Completed %s records", writer.records_total) + return writer.records_total def get_batch_export_writer( diff --git a/posthog/temporal/batch_exports/temporary_file.py b/posthog/temporal/batch_exports/temporary_file.py index 4d7dc45df5496..97d20bc785e09 100644 --- a/posthog/temporal/batch_exports/temporary_file.py +++ b/posthog/temporal/batch_exports/temporary_file.py @@ -96,6 +96,9 @@ def __exit__(self, exc, value, tb): def __iter__(self): yield from self._file + def __str__(self) -> str: + return self._file.name + @property def brotli_compressor(self): if self._brotli_compressor is None: @@ -387,7 +390,7 @@ def track_bytes_written(self, batch_export_file: BatchExportTemporaryFile) -> No self.bytes_total = batch_export_file.bytes_total self.bytes_since_last_flush = batch_export_file.bytes_since_last_reset - async def write_record_batch(self, record_batch: pa.RecordBatch) -> None: + async def write_record_batch(self, record_batch: pa.RecordBatch, flush: bool = True) -> None: """Issue a record batch write tracking progress and flushing if required.""" record_batch = record_batch.sort_by("_inserted_at") last_inserted_at = record_batch.column("_inserted_at")[-1].as_py() @@ -401,9 +404,12 @@ async def write_record_batch(self, record_batch: pa.RecordBatch) -> None: self.track_records_written(record_batch) self.track_bytes_written(self.batch_export_file) - if self.bytes_since_last_flush >= self.max_bytes: + if flush and self.should_flush(): await self.flush(last_inserted_at) + def should_flush(self) -> bool: + return self.bytes_since_last_flush >= self.max_bytes + async def flush(self, last_inserted_at: dt.datetime, is_last: bool = False) -> None: """Call the provided `flush_callable` and reset underlying file. diff --git a/posthog/temporal/common/asyncpa.py b/posthog/temporal/common/asyncpa.py index 31eab18d02928..d76dffb5ecb9c 100644 --- a/posthog/temporal/common/asyncpa.py +++ b/posthog/temporal/common/asyncpa.py @@ -1,6 +1,10 @@ +import asyncio import typing import pyarrow as pa +import structlog + +logger = structlog.get_logger() CONTINUATION_BYTES = b"\xff\xff\xff\xff" @@ -128,3 +132,20 @@ async def read_schema(self) -> pa.Schema: raise TypeError(f"Expected message of type 'schema' got '{message.type}'") return pa.ipc.read_schema(message) + + +class AsyncRecordBatchProducer(AsyncRecordBatchReader): + def __init__(self, bytes_iter: typing.AsyncIterator[tuple[bytes, bool]]) -> None: + super().__init__(bytes_iter) + + async def produce(self, queue: asyncio.Queue, done_event: asyncio.Event): + await logger.adebug("Starting record batch produce loop") + while True: + try: + record_batch = await self.read_next_record_batch() + except StopAsyncIteration: + await logger.adebug("No more record batches to produce, closing loop") + done_event.set() + return + + await queue.put(record_batch) diff --git a/posthog/temporal/common/clickhouse.py b/posthog/temporal/common/clickhouse.py index 485eb68901e21..570cfe8d5bb5e 100644 --- a/posthog/temporal/common/clickhouse.py +++ b/posthog/temporal/common/clickhouse.py @@ -1,3 +1,4 @@ +import asyncio import collections.abc import contextlib import datetime as dt @@ -11,7 +12,7 @@ import requests from django.conf import settings -from posthog.temporal.common.asyncpa import AsyncRecordBatchReader +import posthog.temporal.common.asyncpa as asyncpa def encode_clickhouse_data(data: typing.Any, quote_char="'") -> bytes: @@ -383,13 +384,31 @@ async def astream_query_as_arrow( """Execute the given query in ClickHouse and stream back the response as Arrow record batches. This method makes sense when running with FORMAT ArrowStream, although we currently do not enforce this. - As pyarrow doesn't support async/await buffers, this method is sync and utilizes requests instead of aiohttp. """ async with self.apost_query(query, *data, query_parameters=query_parameters, query_id=query_id) as response: - reader = AsyncRecordBatchReader(response.content.iter_chunks()) + reader = asyncpa.AsyncRecordBatchReader(response.content.iter_chunks()) async for batch in reader: yield batch + async def aproduce_query_as_arrow_record_batches( + self, + query, + *data, + queue: asyncio.Queue, + done_event: asyncio.Event, + query_parameters=None, + query_id: str | None = None, + ) -> None: + """Execute the given query in ClickHouse and produce Arrow record batches to given buffer queue. + + This method makes sense when running with FORMAT ArrowStream, although we currently do not enforce this. + This method is intended to be ran as a background task, producing record batches continuously, while other + downstream consumer tasks process them from the queue. + """ + async with self.apost_query(query, *data, query_parameters=query_parameters, query_id=query_id) as response: + reader = asyncpa.AsyncRecordBatchProducer(response.content.iter_chunks()) + await reader.produce(queue=queue, done_event=done_event) + async def __aenter__(self): """Enter method part of the AsyncContextManager protocol.""" self.connector = aiohttp.TCPConnector(ssl=self.ssl) diff --git a/posthog/temporal/common/logger.py b/posthog/temporal/common/logger.py index c769116921f6c..2b1107d8124cc 100644 --- a/posthog/temporal/common/logger.py +++ b/posthog/temporal/common/logger.py @@ -1,8 +1,8 @@ import asyncio import json import logging -import uuid import ssl +import uuid import aiokafka import structlog @@ -14,7 +14,6 @@ from posthog.kafka_client.topics import KAFKA_LOG_ENTRIES - BACKGROUND_LOGGER_TASKS = set() @@ -29,6 +28,18 @@ async def bind_temporal_worker_logger(team_id: int, destination: str | None = No return logger.new(team_id=team_id, destination=destination, **temporal_context) +async def configure_temporal_worker_logger( + logger, team_id: int, destination: str | None = None +) -> FilteringBoundLogger: + """Return a bound logger for Temporal Workers.""" + if not structlog.is_configured(): + configure_logger() + + temporal_context = get_temporal_context() + + return logger.new(team_id=team_id, destination=destination, **temporal_context) + + async def bind_temporal_org_worker_logger( organization_id: uuid.UUID, destination: str | None = None ) -> FilteringBoundLogger: diff --git a/posthog/temporal/data_imports/pipelines/sql_database/__init__.py b/posthog/temporal/data_imports/pipelines/sql_database/__init__.py index 96bfa8a9d202d..962cbb2d4ad9b 100644 --- a/posthog/temporal/data_imports/pipelines/sql_database/__init__.py +++ b/posthog/temporal/data_imports/pipelines/sql_database/__init__.py @@ -65,6 +65,8 @@ def sql_source_for_type( else: incremental = None + connect_args = [] + if source_type == ExternalDataSource.Type.POSTGRES: credentials = ConnectionStringCredentials( f"postgresql://{user}:{password}@{host}:{port}/{database}?sslmode={sslmode}" @@ -76,6 +78,10 @@ def sql_source_for_type( credentials = ConnectionStringCredentials( f"mysql+pymysql://{user}:{password}@{host}:{port}/{database}?ssl_ca={ssl_ca}&ssl_verify_cert=false" ) + + # PlanetScale needs this to be set + if host.endswith("psdb.cloud"): + connect_args = ["SET workload = 'OLAP';"] elif source_type == ExternalDataSource.Type.MSSQL: credentials = ConnectionStringCredentials( f"mssql+pyodbc://{user}:{password}@{host}:{port}/{database}?driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes" @@ -84,7 +90,12 @@ def sql_source_for_type( raise Exception("Unsupported source_type") db_source = sql_database( - credentials, schema=schema, table_names=table_names, incremental=incremental, team_id=team_id + credentials, + schema=schema, + table_names=table_names, + incremental=incremental, + team_id=team_id, + connect_args=connect_args, ) return db_source @@ -180,6 +191,7 @@ def sql_database( table_names: Optional[List[str]] = dlt.config.value, # noqa: UP006 incremental: Optional[dlt.sources.incremental] = None, team_id: Optional[int] = None, + connect_args: Optional[list[str]] = None, ) -> Iterable[DltResource]: """ A DLT source which loads data from an SQL database using SQLAlchemy. @@ -231,6 +243,7 @@ def sql_database( engine=engine, table=table, incremental=incremental, + connect_args=connect_args, ) ) diff --git a/posthog/temporal/data_imports/pipelines/sql_database/helpers.py b/posthog/temporal/data_imports/pipelines/sql_database/helpers.py index d877effb3e374..50577b6b04d17 100644 --- a/posthog/temporal/data_imports/pipelines/sql_database/helpers.py +++ b/posthog/temporal/data_imports/pipelines/sql_database/helpers.py @@ -14,7 +14,7 @@ from dlt.common.typing import TDataItem from .settings import DEFAULT_CHUNK_SIZE -from sqlalchemy import Table, create_engine, Column +from sqlalchemy import Table, create_engine, Column, text from sqlalchemy.engine import Engine from sqlalchemy.sql import Select @@ -26,11 +26,13 @@ def __init__( table: Table, chunk_size: int = 1000, incremental: Optional[dlt.sources.incremental[Any]] = None, + connect_args: Optional[list[str]] = None, ) -> None: self.engine = engine self.table = table self.chunk_size = chunk_size self.incremental = incremental + self.connect_args = connect_args if incremental: try: self.cursor_column: Optional[Column[Any]] = table.c[incremental.cursor_path] @@ -74,6 +76,9 @@ def make_query(self) -> Select[Any]: def load_rows(self) -> Iterator[list[TDataItem]]: query = self.make_query() with self.engine.connect() as conn: + if self.connect_args: + for stmt in self.connect_args: + conn.execute(text(stmt)) result = conn.execution_options(yield_per=self.chunk_size).execute(query) for partition in result.partitions(size=self.chunk_size): yield [dict(row._mapping) for row in partition] @@ -84,6 +89,7 @@ def table_rows( table: Table, chunk_size: int = DEFAULT_CHUNK_SIZE, incremental: Optional[dlt.sources.incremental[Any]] = None, + connect_args: Optional[list[str]] = None, ) -> Iterator[TDataItem]: """ A DLT source which loads data from an SQL database using SQLAlchemy. @@ -100,7 +106,7 @@ def table_rows( """ yield dlt.mark.materialize_table_schema() # type: ignore - loader = TableLoader(engine, table, incremental=incremental, chunk_size=chunk_size) + loader = TableLoader(engine, table, incremental=incremental, chunk_size=chunk_size, connect_args=connect_args) yield from loader.load_rows() engine.dispose() diff --git a/posthog/temporal/tests/batch_exports/test_batch_exports.py b/posthog/temporal/tests/batch_exports/test_batch_exports.py index dda307dda004a..8c3fb186b82cd 100644 --- a/posthog/temporal/tests/batch_exports/test_batch_exports.py +++ b/posthog/temporal/tests/batch_exports/test_batch_exports.py @@ -2,15 +2,19 @@ import json import operator from random import randint +import asyncio import pytest from django.test import override_settings +import pyarrow as pa from posthog.batch_exports.service import BatchExportModel from posthog.temporal.batch_exports.batch_exports import ( get_data_interval, iter_model_records, iter_records, + start_produce_batch_export_record_batches, + RecordBatchQueue, ) from posthog.temporal.tests.utils.events import generate_test_events_in_clickhouse @@ -404,3 +408,427 @@ def test_get_data_interval(interval, data_interval_end, expected): """Test get_data_interval returns the expected data interval tuple.""" result = get_data_interval(interval, data_interval_end) assert result == expected + + +async def get_record_batch_from_queue(queue, done_event): + while not queue.empty() or not done_event.is_set(): + try: + record_batch = queue.get_nowait() + except asyncio.QueueEmpty: + if done_event.is_set(): + break + else: + await asyncio.sleep(0.1) + continue + + return record_batch + return None + + +async def test_start_produce_batch_export_record_batches_uses_extra_query_parameters(clickhouse_client): + """Test start_produce_batch_export_record_batches uses a HogQL value.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=10, + count_outside_range=0, + count_other_team=0, + duplicate=False, + properties={"$browser": "Chrome", "$os": "Mac OS X", "custom": 3}, + ) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + fields=[ + {"expression": "JSONExtractInt(properties, %(hogql_val_0)s)", "alias": "custom_prop"}, + ], + extra_query_parameters={"hogql_val_0": "custom"}, + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + for expected, record in zip(events, records): + if expected["properties"] is None: + raise ValueError("Empty properties") + + assert record["custom_prop"] == expected["properties"]["custom"] + + +async def test_start_produce_batch_export_record_batches_can_flatten_properties(clickhouse_client): + """Test start_produce_batch_export_record_batches can flatten properties.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=10, + count_outside_range=0, + count_other_team=0, + duplicate=False, + properties={"$browser": "Chrome", "$os": "Mac OS X", "custom-property": 3}, + ) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + fields=[ + {"expression": "event", "alias": "event"}, + {"expression": "JSONExtractString(properties, '$browser')", "alias": "browser"}, + {"expression": "JSONExtractString(properties, '$os')", "alias": "os"}, + {"expression": "JSONExtractInt(properties, 'custom-property')", "alias": "custom_prop"}, + ], + extra_query_parameters={"hogql_val_0": "custom"}, + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + all_expected = sorted(events, key=operator.itemgetter("event")) + all_record = sorted(records, key=operator.itemgetter("event")) + + for expected, record in zip(all_expected, all_record): + if expected["properties"] is None: + raise ValueError("Empty properties") + + assert record["browser"] == expected["properties"]["$browser"] + assert record["os"] == expected["properties"]["$os"] + assert record["custom_prop"] == expected["properties"]["custom-property"] + + +@pytest.mark.parametrize( + "field", + [ + {"expression": "event", "alias": "event_name"}, + {"expression": "team_id", "alias": "team"}, + {"expression": "timestamp", "alias": "time_the_stamp"}, + {"expression": "created_at", "alias": "creation_time"}, + ], +) +async def test_start_produce_batch_export_record_batches_with_single_field_and_alias(clickhouse_client, field): + """Test start_produce_batch_export_record_batches can return a single aliased field.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=10, + count_outside_range=0, + count_other_team=0, + duplicate=False, + properties={"$browser": "Chrome", "$os": "Mac OS X"}, + ) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + fields=[field], + extra_query_parameters={}, + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + all_expected = sorted(events, key=operator.itemgetter(field["expression"])) + all_record = sorted(records, key=operator.itemgetter(field["alias"])) + + for expected, record in zip(all_expected, all_record): + assert len(record) == 2 + # Always set for progress tracking + assert record.get("_inserted_at", None) is not None + + result = record[field["alias"]] + expected_value = expected[field["expression"]] # type: ignore + + if isinstance(result, dt.datetime): + # Event generation function returns datetimes as strings. + expected_value = dt.datetime.fromisoformat(expected_value).replace(tzinfo=dt.UTC) + + assert result == expected_value + + +async def test_start_produce_batch_export_record_batches_ignores_timestamp_predicates(clickhouse_client): + """Test the rows returned ignore timestamp predicates when configured.""" + team_id = randint(1, 1000000) + + inserted_at = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + data_interval_end = inserted_at + dt.timedelta(hours=1) + + # Insert some data with timestamps a couple of years before inserted_at + timestamp_start = inserted_at - dt.timedelta(hours=24 * 365 * 2) + timestamp_end = inserted_at - dt.timedelta(hours=24 * 365) + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=timestamp_start, + end_time=timestamp_end, + count=10, + count_outside_range=0, + count_other_team=0, + duplicate=True, + person_properties={"$browser": "Chrome", "$os": "Mac OS X"}, + inserted_at=inserted_at, + ) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=inserted_at.isoformat(), + interval_end=data_interval_end.isoformat(), + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + assert len(records) == 0 + + with override_settings(UNCONSTRAINED_TIMESTAMP_TEAM_IDS=[str(team_id)]): + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=inserted_at.isoformat(), + interval_end=data_interval_end.isoformat(), + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + assert_records_match_events(records, events) + + +async def test_start_produce_batch_export_record_batches_can_include_events(clickhouse_client): + """Test the rows returned can include events.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=10000, + count_outside_range=0, + count_other_team=0, + duplicate=True, + person_properties={"$browser": "Chrome", "$os": "Mac OS X"}, + ) + + # Include the latter half of events. + include_events = (event["event"] for event in events[5000:]) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + include_events=include_events, + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + assert_records_match_events(records, events[5000:]) + + +async def test_start_produce_batch_export_record_batches_can_exclude_events(clickhouse_client): + """Test the rows returned can include events.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=10000, + count_outside_range=0, + count_other_team=0, + duplicate=True, + person_properties={"$browser": "Chrome", "$os": "Mac OS X"}, + ) + + # Exclude the latter half of events. + exclude_events = (event["event"] for event in events[5000:]) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + exclude_events=exclude_events, + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + assert_records_match_events(records, events[:5000]) + + +async def test_start_produce_batch_export_record_batches_handles_duplicates(clickhouse_client): + """Test the rows returned are de-duplicated.""" + team_id = randint(1, 1000000) + data_interval_end = dt.datetime.fromisoformat("2023-04-25T14:31:00.000000+00:00") + data_interval_start = dt.datetime.fromisoformat("2023-04-25T14:30:00.000000+00:00") + + (events, _, _) = await generate_test_events_in_clickhouse( + client=clickhouse_client, + team_id=team_id, + start_time=data_interval_start, + end_time=data_interval_end, + count=100, + count_outside_range=0, + count_other_team=0, + duplicate=True, + person_properties={"$browser": "Chrome", "$os": "Mac OS X"}, + ) + + queue, done_event, _ = start_produce_batch_export_record_batches( + client=clickhouse_client, + team_id=team_id, + is_backfill=False, + model_name="events", + interval_start=data_interval_start.isoformat(), + interval_end=data_interval_end.isoformat(), + ) + + records = [] + while not queue.empty() or not done_event.is_set(): + record_batch = await get_record_batch_from_queue(queue, done_event) + if record_batch is None: + break + + for record in record_batch.to_pylist(): + records.append(record) + + assert_records_match_events(records, events) + + +async def test_record_batch_queue_tracks_bytes(): + """Test `RecordBatchQueue` tracks bytes from `RecordBatch`.""" + records = [{"test": 1}, {"test": 2}, {"test": 3}] + record_batch = pa.RecordBatch.from_pylist(records) + + queue = RecordBatchQueue() + + await queue.put(record_batch) + assert record_batch.get_total_buffer_size() == queue.qsize() + + item = await queue.get() + + assert item == record_batch + assert queue.qsize() == 0 + + +async def test_record_batch_queue_raises_queue_full(): + """Test `QueueFull` is raised when we put too many bytes.""" + records = [{"test": 1}, {"test": 2}, {"test": 3}] + record_batch = pa.RecordBatch.from_pylist(records) + record_batch_size = record_batch.get_total_buffer_size() + + queue = RecordBatchQueue(max_size_bytes=record_batch_size) + + await queue.put(record_batch) + assert record_batch.get_total_buffer_size() == queue.qsize() + + with pytest.raises(asyncio.QueueFull): + queue.put_nowait(record_batch) + + item = await queue.get() + + assert item == record_batch + assert queue.qsize() == 0 + + +async def test_record_batch_queue_sets_schema(): + """Test `RecordBatchQueue` sets a schema from first `RecordBatch`.""" + records = [{"test": 1}, {"test": 2}, {"test": 3}] + record_batch = pa.RecordBatch.from_pylist(records) + + queue = RecordBatchQueue() + + await queue.put(record_batch) + + assert queue._schema_set.is_set() + + schema = await queue.get_schema() + assert schema == record_batch.schema diff --git a/posthog/temporal/tests/batch_exports/test_bigquery_batch_export_workflow.py b/posthog/temporal/tests/batch_exports/test_bigquery_batch_export_workflow.py index 0f184b79356a1..00228adcb8cff 100644 --- a/posthog/temporal/tests/batch_exports/test_bigquery_batch_export_workflow.py +++ b/posthog/temporal/tests/batch_exports/test_bigquery_batch_export_workflow.py @@ -105,7 +105,12 @@ async def assert_clickhouse_records_in_bigquery( inserted_bq_ingested_timestamp.append(v) continue - inserted_record[k] = json.loads(v) if k in json_columns and v is not None else v + if k in json_columns: + assert ( + isinstance(v, dict) or v is None + ), f"Expected '{k}' to be JSON, but it was not deserialized to dict" + + inserted_record[k] = v inserted_records.append(inserted_record) diff --git a/posthog/test/__snapshots__/test_feature_flag.ambr b/posthog/test/__snapshots__/test_feature_flag.ambr index 339a4714be80f..2c30fecdac689 100644 --- a/posthog/test/__snapshots__/test_feature_flag.ambr +++ b/posthog/test/__snapshots__/test_feature_flag.ambr @@ -149,6 +149,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -320,6 +321,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", @@ -656,6 +658,7 @@ "posthog_team"."session_recording_minimum_duration_milliseconds", "posthog_team"."session_recording_linked_flag", "posthog_team"."session_recording_network_payload_capture_config", + "posthog_team"."session_recording_url_trigger_config", "posthog_team"."session_replay_config", "posthog_team"."survey_config", "posthog_team"."capture_console_log_opt_in", diff --git a/posthog/utils.py b/posthog/utils.py index 7db447e77c82e..7535df0700638 100644 --- a/posthog/utils.py +++ b/posthog/utils.py @@ -175,8 +175,14 @@ def relative_date_parse_with_delta_mapping( *, always_truncate: bool = False, now: Optional[datetime.datetime] = None, + increase: bool = False, ) -> tuple[datetime.datetime, Optional[dict[str, int]], str | None]: - """Returns the parsed datetime, along with the period mapping - if the input was a relative datetime string.""" + """ + Returns the parsed datetime, along with the period mapping - if the input was a relative datetime string. + + :increase controls whether to add relative delta to the current time or subtract + Should later control this using +/- infront of the input regex + """ try: try: # This supports a few formats, but we primarily care about: @@ -245,9 +251,13 @@ def relative_date_parse_with_delta_mapping( delta_mapping["month"] = 1 delta_mapping["day"] = 1 elif match.group("position") == "End": - delta_mapping["month"] = 12 delta_mapping["day"] = 31 - parsed_dt -= relativedelta(**delta_mapping) # type: ignore + + if increase: + parsed_dt += relativedelta(**delta_mapping) # type: ignore + else: + parsed_dt -= relativedelta(**delta_mapping) # type: ignore + if always_truncate: # Truncate to the start of the hour for hour-precision datetimes, to the start of the day for larger intervals # TODO: Remove this from this function, this should not be the responsibility of it @@ -264,8 +274,11 @@ def relative_date_parse( *, always_truncate: bool = False, now: Optional[datetime.datetime] = None, + increase: bool = False, ) -> datetime.datetime: - return relative_date_parse_with_delta_mapping(input, timezone_info, always_truncate=always_truncate, now=now)[0] + return relative_date_parse_with_delta_mapping( + input, timezone_info, always_truncate=always_truncate, now=now, increase=increase + )[0] def get_js_url(request: HttpRequest) -> str: @@ -1068,6 +1081,20 @@ def filters_override_requested_by_client(request: Request) -> Optional[dict]: return None +def variables_override_requested_by_client(request: Request) -> Optional[dict[str, dict]]: + raw_variables = request.query_params.get("variables_override") + + if raw_variables is not None: + try: + return json.loads(raw_variables) + except Exception: + raise serializers.ValidationError( + {"variables_override": "Invalid JSON passed in variables_override parameter"} + ) + + return None + + def _request_has_key_set(key: str, request: Request, allowed_values: Optional[list[str]] = None) -> bool | str: query_param = request.query_params.get(key) data_value = request.data.get(key) diff --git a/posthog/warehouse/README.md b/posthog/warehouse/README.md index 724bcc677d825..877f15da8175c 100644 --- a/posthog/warehouse/README.md +++ b/posthog/warehouse/README.md @@ -13,3 +13,9 @@ Without this, you'll get the following error when connecting a SQL database to d ``` symbol not found in flat namespace '_bcp_batch' ``` + +If the issue persists, install from source without cache again + +``` +pip install --pre --no-binary :all: pymssql --no-cache +``` diff --git a/posthog/year_in_posthog/2023.html b/posthog/year_in_posthog/2023.html index 113ec1730c381..5604fb0c8fbcb 100644 --- a/posthog/year_in_posthog/2023.html +++ b/posthog/year_in_posthog/2023.html @@ -20,7 +20,7 @@