From 9df4e0193ffeb6d1cc323bdebb7e2bdfb2a375e2 Mon Sep 17 00:00:00 2001 From: Sampras Lopes Date: Wed, 29 Nov 2023 17:04:53 +0530 Subject: [PATCH] feat(analytics): Add Clickhouse based analytics (#2988) Co-authored-by: harsh_sharma_juspay Co-authored-by: Ivor Dsouza Co-authored-by: Chethan Rao <70657455+Chethan-rao@users.noreply.github.com> Co-authored-by: nain-F49FF806 <126972030+nain-F49FF806@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: hyperswitch-bot[bot] <148525504+hyperswitch-bot[bot]@users.noreply.github.com> Co-authored-by: akshay.s Co-authored-by: Gnanasundari24 <118818938+Gnanasundari24@users.noreply.github.com> --- Cargo.lock | 121 +- Dockerfile | 4 +- config/development.toml | 30 + config/docker_compose.toml | 18 +- crates/analytics/Cargo.toml | 37 + crates/analytics/docs/clickhouse/README.md | 45 + .../docs/clickhouse/cluster_setup/README.md | 347 +++ .../config/clickhouse_config.xml | 370 ++++ .../config/clickhouse_metrika.xml | 60 + .../cluster_setup/config/macros/macros-01.xml | 9 + .../cluster_setup/config/macros/macros-02.xml | 9 + .../cluster_setup/config/macros/macros-03.xml | 9 + .../cluster_setup/config/macros/macros-04.xml | 9 + .../cluster_setup/config/macros/macros-05.xml | 9 + .../cluster_setup/config/macros/macros-06.xml | 9 + .../clickhouse/cluster_setup/config/users.xml | 117 + .../cluster_setup/docker-compose.yml | 198 ++ .../clickhouse/cluster_setup/kafka-script.sh | 11 + .../cluster_setup/scripts/api_event_logs.sql | 237 ++ .../scripts/payment_attempts.sql | 217 ++ .../cluster_setup/scripts/payment_intents.sql | 165 ++ .../scripts/refund_analytics.sql | 173 ++ .../cluster_setup/scripts/sdk_events.sql | 156 ++ .../cluster_setup/scripts/seed_scripts.sql | 1 + .../docs/clickhouse/scripts/api_events_v2.sql | 134 ++ .../clickhouse/scripts/payment_attempts.sql | 156 ++ .../clickhouse/scripts/payment_intents.sql | 116 + .../docs/clickhouse/scripts/refunds.sql | 121 ++ crates/analytics/src/api_event.rs | 9 + crates/analytics/src/api_event/core.rs | 176 ++ crates/analytics/src/api_event/events.rs | 105 + crates/analytics/src/api_event/filters.rs | 53 + crates/analytics/src/api_event/metrics.rs | 110 + .../src/api_event/metrics/api_count.rs | 106 + .../src/api_event/metrics/latency.rs | 138 ++ .../api_event/metrics/status_code_count.rs | 103 + crates/analytics/src/api_event/types.rs | 33 + crates/analytics/src/clickhouse.rs | 458 ++++ crates/analytics/src/core.rs | 31 + .../src/analytics => analytics/src}/errors.rs | 0 crates/analytics/src/lambda_utils.rs | 36 + crates/analytics/src/lib.rs | 509 +++++ crates/analytics/src/main.rs | 3 + .../analytics => analytics/src}/metrics.rs | 0 .../src}/metrics/request.rs | 28 +- crates/analytics/src/payments.rs | 16 + .../src}/payments/accumulator.rs | 72 +- crates/analytics/src/payments/core.rs | 303 +++ crates/analytics/src/payments/distribution.rs | 92 + .../distribution/payment_error_message.rs | 176 ++ .../src}/payments/filters.rs | 10 +- .../src}/payments/metrics.rs | 41 +- .../src}/payments/metrics/avg_ticket_size.rs | 16 +- .../metrics/connector_success_rate.rs | 130 ++ .../src}/payments/metrics/payment_count.rs | 8 +- .../metrics/payment_processed_amount.rs | 10 +- .../payments/metrics/payment_success_count.rs | 10 +- .../src/payments/metrics/retries_count.rs | 122 ++ .../src}/payments/metrics/success_rate.rs | 8 +- .../src}/payments/types.rs | 11 +- .../src/analytics => analytics/src}/query.rs | 272 ++- .../analytics => analytics/src}/refunds.rs | 2 +- .../src}/refunds/accumulator.rs | 4 +- crates/analytics/src/refunds/core.rs | 203 ++ .../src}/refunds/filters.rs | 10 +- .../src}/refunds/metrics.rs | 13 +- .../src}/refunds/metrics/refund_count.rs | 9 +- .../metrics/refund_processed_amount.rs | 9 +- .../refunds/metrics/refund_success_count.rs | 9 +- .../refunds/metrics/refund_success_rate.rs | 7 +- .../src}/refunds/types.rs | 2 +- crates/analytics/src/sdk_events.rs | 14 + .../analytics/src/sdk_events/accumulator.rs | 98 + crates/analytics/src/sdk_events/core.rs | 201 ++ crates/analytics/src/sdk_events/events.rs | 80 + crates/analytics/src/sdk_events/filters.rs | 56 + crates/analytics/src/sdk_events/metrics.rs | 181 ++ .../metrics/average_payment_time.rs | 129 ++ .../sdk_events/metrics/payment_attempts.rs | 118 + .../metrics/payment_data_filled_count.rs | 118 + .../metrics/payment_method_selected_count.rs | 118 + .../metrics/payment_methods_call_count.rs | 126 ++ .../metrics/payment_success_count.rs | 118 + .../sdk_events/metrics/sdk_initiated_count.rs | 118 + .../sdk_events/metrics/sdk_rendered_count.rs | 118 + crates/analytics/src/sdk_events/types.rs | 50 + .../src/analytics => analytics/src}/sqlx.rs | 189 +- .../src/analytics => analytics/src}/types.rs | 26 +- .../src/analytics => analytics/src}/utils.rs | 18 + crates/api_models/src/analytics.rs | 154 +- crates/api_models/src/analytics/api_event.rs | 148 ++ crates/api_models/src/analytics/payments.rs | 52 +- crates/api_models/src/analytics/refunds.rs | 21 +- crates/api_models/src/analytics/sdk_events.rs | 215 ++ crates/api_models/src/events.rs | 36 +- crates/api_models/src/payments.rs | 2 + crates/data_models/Cargo.toml | 1 - crates/router/Cargo.toml | 4 +- crates/router/src/analytics.rs | 655 +++++- crates/router/src/analytics/core.rs | 96 - crates/router/src/analytics/payments.rs | 13 - crates/router/src/analytics/payments/core.rs | 129 -- crates/router/src/analytics/refunds/core.rs | 104 - crates/router/src/analytics/routes.rs | 164 -- crates/router/src/bin/scheduler.rs | 2 - crates/router/src/configs/kms.rs | 2 +- crates/router/src/configs/settings.rs | 25 +- crates/router/src/core/refunds.rs | 4 +- crates/router/src/core/webhooks.rs | 2 + crates/router/src/db.rs | 94 +- crates/router/src/db/kafka_store.rs | 1917 +++++++++++++++++ crates/router/src/events.rs | 64 +- crates/router/src/events/api_logs.rs | 6 + crates/router/src/events/event_logger.rs | 2 +- crates/router/src/events/kafka_handler.rs | 29 + crates/router/src/lib.rs | 9 +- crates/router/src/routes/app.rs | 49 +- crates/router/src/services.rs | 1 + crates/router/src/services/api.rs | 2 + crates/router/src/services/kafka.rs | 314 +++ crates/router/src/services/kafka/api_event.rs | 108 + .../src/services/kafka/outgoing_request.rs | 19 + .../src/services/kafka/payment_attempt.rs | 92 + .../src/services/kafka/payment_intent.rs | 71 + crates/router/src/services/kafka/refund.rs | 68 + .../src/types/storage/payment_attempt.rs | 4 - crates/router/tests/connectors/aci.rs | 4 + crates/router/tests/connectors/utils.rs | 9 + crates/router/tests/payments2.rs | 1 + crates/router/tests/utils.rs | 1 + crates/router_env/src/lib.rs | 13 +- crates/scheduler/Cargo.toml | 2 +- crates/storage_impl/src/config.rs | 38 +- crates/storage_impl/src/database/store.rs | 2 +- docker-compose.yml | 63 + 135 files changed, 12141 insertions(+), 897 deletions(-) create mode 100644 crates/analytics/Cargo.toml create mode 100644 crates/analytics/docs/clickhouse/README.md create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/README.md create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/config/users.xml create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml create mode 100755 crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql create mode 100644 crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql create mode 100644 crates/analytics/docs/clickhouse/scripts/api_events_v2.sql create mode 100644 crates/analytics/docs/clickhouse/scripts/payment_attempts.sql create mode 100644 crates/analytics/docs/clickhouse/scripts/payment_intents.sql create mode 100644 crates/analytics/docs/clickhouse/scripts/refunds.sql create mode 100644 crates/analytics/src/api_event.rs create mode 100644 crates/analytics/src/api_event/core.rs create mode 100644 crates/analytics/src/api_event/events.rs create mode 100644 crates/analytics/src/api_event/filters.rs create mode 100644 crates/analytics/src/api_event/metrics.rs create mode 100644 crates/analytics/src/api_event/metrics/api_count.rs create mode 100644 crates/analytics/src/api_event/metrics/latency.rs create mode 100644 crates/analytics/src/api_event/metrics/status_code_count.rs create mode 100644 crates/analytics/src/api_event/types.rs create mode 100644 crates/analytics/src/clickhouse.rs create mode 100644 crates/analytics/src/core.rs rename crates/{router/src/analytics => analytics/src}/errors.rs (100%) create mode 100644 crates/analytics/src/lambda_utils.rs create mode 100644 crates/analytics/src/lib.rs create mode 100644 crates/analytics/src/main.rs rename crates/{router/src/analytics => analytics/src}/metrics.rs (100%) rename crates/{router/src/analytics => analytics/src}/metrics/request.rs (51%) create mode 100644 crates/analytics/src/payments.rs rename crates/{router/src/analytics => analytics/src}/payments/accumulator.rs (62%) create mode 100644 crates/analytics/src/payments/core.rs create mode 100644 crates/analytics/src/payments/distribution.rs create mode 100644 crates/analytics/src/payments/distribution/payment_error_message.rs rename crates/{router/src/analytics => analytics/src}/payments/filters.rs (87%) rename crates/{router/src/analytics => analytics/src}/payments/metrics.rs (76%) rename crates/{router/src/analytics => analytics/src}/payments/metrics/avg_ticket_size.rs (90%) create mode 100644 crates/analytics/src/payments/metrics/connector_success_rate.rs rename crates/{router/src/analytics => analytics/src}/payments/metrics/payment_count.rs (94%) rename crates/{router/src/analytics => analytics/src}/payments/metrics/payment_processed_amount.rs (94%) rename crates/{router/src/analytics => analytics/src}/payments/metrics/payment_success_count.rs (94%) create mode 100644 crates/analytics/src/payments/metrics/retries_count.rs rename crates/{router/src/analytics => analytics/src}/payments/metrics/success_rate.rs (95%) rename crates/{router/src/analytics => analytics/src}/payments/types.rs (82%) rename crates/{router/src/analytics => analytics/src}/query.rs (65%) rename crates/{router/src/analytics => analytics/src}/refunds.rs (81%) rename crates/{router/src/analytics => analytics/src}/refunds/accumulator.rs (98%) create mode 100644 crates/analytics/src/refunds/core.rs rename crates/{router/src/analytics => analytics/src}/refunds/filters.rs (90%) rename crates/{router/src/analytics => analytics/src}/refunds/metrics.rs (91%) rename crates/{router/src/analytics => analytics/src}/refunds/metrics/refund_count.rs (94%) rename crates/{router/src/analytics => analytics/src}/refunds/metrics/refund_processed_amount.rs (95%) rename crates/{router/src/analytics => analytics/src}/refunds/metrics/refund_success_count.rs (95%) rename crates/{router/src/analytics => analytics/src}/refunds/metrics/refund_success_rate.rs (96%) rename crates/{router/src/analytics => analytics/src}/refunds/types.rs (98%) create mode 100644 crates/analytics/src/sdk_events.rs create mode 100644 crates/analytics/src/sdk_events/accumulator.rs create mode 100644 crates/analytics/src/sdk_events/core.rs create mode 100644 crates/analytics/src/sdk_events/events.rs create mode 100644 crates/analytics/src/sdk_events/filters.rs create mode 100644 crates/analytics/src/sdk_events/metrics.rs create mode 100644 crates/analytics/src/sdk_events/metrics/average_payment_time.rs create mode 100644 crates/analytics/src/sdk_events/metrics/payment_attempts.rs create mode 100644 crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs create mode 100644 crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs create mode 100644 crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs create mode 100644 crates/analytics/src/sdk_events/metrics/payment_success_count.rs create mode 100644 crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs create mode 100644 crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs create mode 100644 crates/analytics/src/sdk_events/types.rs rename crates/{router/src/analytics => analytics/src}/sqlx.rs (64%) rename crates/{router/src/analytics => analytics/src}/types.rs (83%) rename crates/{router/src/analytics => analytics/src}/utils.rs (52%) create mode 100644 crates/api_models/src/analytics/api_event.rs create mode 100644 crates/api_models/src/analytics/sdk_events.rs delete mode 100644 crates/router/src/analytics/core.rs delete mode 100644 crates/router/src/analytics/payments.rs delete mode 100644 crates/router/src/analytics/payments/core.rs delete mode 100644 crates/router/src/analytics/refunds/core.rs delete mode 100644 crates/router/src/analytics/routes.rs create mode 100644 crates/router/src/db/kafka_store.rs create mode 100644 crates/router/src/events/kafka_handler.rs create mode 100644 crates/router/src/services/kafka.rs create mode 100644 crates/router/src/services/kafka/api_event.rs create mode 100644 crates/router/src/services/kafka/outgoing_request.rs create mode 100644 crates/router/src/services/kafka/payment_attempt.rs create mode 100644 crates/router/src/services/kafka/payment_intent.rs create mode 100644 crates/router/src/services/kafka/refund.rs diff --git a/Cargo.lock b/Cargo.lock index 96bdcff3f86e..417e6d85db6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,6 +332,36 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +[[package]] +name = "analytics" +version = "0.1.0" +dependencies = [ + "actix-web", + "api_models", + "async-trait", + "aws-config", + "aws-sdk-lambda", + "aws-smithy-types", + "bigdecimal", + "common_utils", + "diesel_models", + "error-stack", + "external_services", + "futures 0.3.28", + "masking", + "once_cell", + "reqwest", + "router_env", + "serde", + "serde_json", + "sqlx", + "storage_impl", + "strum 0.25.0", + "thiserror", + "time", + "tokio 1.32.0", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -729,6 +759,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-lambda" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3ad176ffaa3aafa532246eb6a9f18a7d68da19950704ecc95d33d9dc3c62a9b" +dependencies = [ + "aws-credential-types", + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes 1.5.0", + "http", + "regex", + "tokio-stream", + "tower", + "tracing", +] + [[package]] name = "aws-sdk-s3" version = "0.28.0" @@ -1148,6 +1203,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", + "serde", ] [[package]] @@ -1256,7 +1312,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3" dependencies = [ "once_cell", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -3862,6 +3918,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "object" version = "0.32.1" @@ -4395,6 +4472,16 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit 0.19.10", +] + [[package]] name = "proc-macro-crate" version = "2.0.0" @@ -4688,6 +4775,36 @@ dependencies = [ "crossbeam-utils 0.8.16", ] +[[package]] +name = "rdkafka" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54f02a5a40220f8a2dfa47ddb38ba9064475a5807a69504b6f91711df2eea63" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio 1.32.0", +] + +[[package]] +name = "rdkafka-sys" +version = "4.7.0+2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55e0d2f9ba6253f6ec72385e453294f8618e9e15c2c6aba2a5c01ccf9622d615" +dependencies = [ + "libc", + "libz-sys", + "num_enum", + "pkg-config", +] + [[package]] name = "redis-protocol" version = "4.1.0" @@ -4939,6 +5056,7 @@ dependencies = [ "actix-multipart", "actix-rt", "actix-web", + "analytics", "api_models", "argon2", "async-bb8-diesel", @@ -4988,6 +5106,7 @@ dependencies = [ "qrcode", "rand 0.8.5", "rand_chacha 0.3.1", + "rdkafka", "redis_interface", "regex", "reqwest", diff --git a/Dockerfile b/Dockerfile index 8eb321dd2afd..e9591e5e9f27 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:slim-bookworm as builder +FROM rust:bookworm as builder ARG EXTRA_FEATURES="" @@ -36,7 +36,7 @@ RUN cargo build --release --features release ${EXTRA_FEATURES} -FROM debian:bookworm-slim +FROM debian:bookworm # Placing config and binary executable in different directories ARG CONFIG_DIR=/local/config diff --git a/config/development.toml b/config/development.toml index f2620bd37135..fa5fddb0d60a 100644 --- a/config/development.toml +++ b/config/development.toml @@ -475,3 +475,33 @@ delay_between_retries_in_milliseconds = 500 [kv_config] ttl = 900 # 15 * 60 seconds + +[events] +source = "logs" + +[events.kafka] +brokers = ["localhost:9092"] +intent_analytics_topic = "hyperswitch-payment-intent-events" +attempt_analytics_topic = "hyperswitch-payment-attempt-events" +refund_analytics_topic = "hyperswitch-refund-events" +api_logs_topic = "hyperswitch-api-log-events" +connector_events_topic = "hyperswitch-connector-api-events" + +[analytics] +source = "sqlx" + +[analytics.clickhouse] +username = "default" +# password = "" +host = "http://localhost:8123" +database_name = "default" + +[analytics.sqlx] +username = "db_user" +password = "db_pass" +host = "localhost" +port = 5432 +dbname = "hyperswitch_db" +pool_size = 5 +connection_timeout = 10 +queue_strategy = "Fifo" \ No newline at end of file diff --git a/config/docker_compose.toml b/config/docker_compose.toml index 445e1e856846..4d50600e1bf8 100644 --- a/config/docker_compose.toml +++ b/config/docker_compose.toml @@ -333,16 +333,32 @@ supported_connectors = "braintree" redis_lock_expiry_seconds = 180 # 3 * 60 seconds delay_between_retries_in_milliseconds = 500 +[events.kafka] +brokers = ["localhost:9092"] +intent_analytics_topic = "hyperswitch-payment-intent-events" +attempt_analytics_topic = "hyperswitch-payment-attempt-events" +refund_analytics_topic = "hyperswitch-refund-events" +api_logs_topic = "hyperswitch-api-log-events" +connector_events_topic = "hyperswitch-connector-api-events" + [analytics] source = "sqlx" +[analytics.clickhouse] +username = "default" +# password = "" +host = "http://localhost:8123" +database_name = "default" + [analytics.sqlx] username = "db_user" password = "db_pass" -host = "pg" +host = "localhost" port = 5432 dbname = "hyperswitch_db" pool_size = 5 +connection_timeout = 10 +queue_strategy = "Fifo" [kv_config] ttl = 900 # 15 * 60 seconds diff --git a/crates/analytics/Cargo.toml b/crates/analytics/Cargo.toml new file mode 100644 index 000000000000..f49fe322ae3b --- /dev/null +++ b/crates/analytics/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "analytics" +version = "0.1.0" +description = "Analytics / Reports related functionality" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + + +[dependencies] +# First party crates +api_models = { version = "0.1.0", path = "../api_models" , features = ["errors"]} +storage_impl = { version = "0.1.0", path = "../storage_impl", default-features = false } +common_utils = { version = "0.1.0", path = "../common_utils"} +external_services = { version = "0.1.0", path = "../external_services", default-features = false} +masking = { version = "0.1.0", path = "../masking" } +router_env = { version = "0.1.0", path = "../router_env", features = ["log_extra_implicit_fields", "log_custom_entries_to_extra"] } +diesel_models = { version = "0.1.0", path = "../diesel_models", features = ["kv_store"] } + +#Third Party dependencies +actix-web = "4.3.1" +async-trait = "0.1.68" +aws-config = { version = "0.55.3" } +aws-sdk-lambda = { version = "0.28.0" } +aws-smithy-types = { version = "0.55.3" } +bigdecimal = { version = "0.3.1", features = ["serde"] } +error-stack = "0.3.1" +futures = "0.3.28" +once_cell = "1.18.0" +reqwest = { version = "0.11.18", features = ["serde_json"] } +serde = { version = "1.0.163", features = ["derive", "rc"] } +serde_json = "1.0.96" +sqlx = { version = "0.6.3", features = ["postgres", "runtime-actix", "runtime-actix-native-tls", "time", "bigdecimal"] } +strum = { version = "0.25.0", features = ["derive"] } +thiserror = "1.0.43" +time = { version = "0.3.21", features = ["serde", "serde-well-known", "std"] } +tokio = { version = "1.28.2", features = ["macros", "rt-multi-thread"] } diff --git a/crates/analytics/docs/clickhouse/README.md b/crates/analytics/docs/clickhouse/README.md new file mode 100644 index 000000000000..2fd48a30c29f --- /dev/null +++ b/crates/analytics/docs/clickhouse/README.md @@ -0,0 +1,45 @@ +#### Starting the containers + +In our use case we rely on kafka for ingesting events. +hence we can use docker compose to start all the components + +``` +docker compose up -d clickhouse-server kafka-ui +``` + +> kafka-ui is a visual tool for inspecting kafka on localhost:8090 + +#### Setting up Clickhouse + +Once clickhouse is up & running you need to create the required tables for it + +you can either visit the url (http://localhost:8123/play) in which the clickhouse-server is running to get a playground +Alternatively you can bash into the clickhouse container & execute commands manually +``` +# On your local terminal +docker compose exec clickhouse-server bash + +# Inside the clickhouse-server container shell +clickhouse-client --user default + +# Inside the clickhouse-client shell +SHOW TABLES; +CREATE TABLE ...... +``` + +The table creation scripts are provided [here](./scripts) + +#### Running/Debugging your application +Once setup you can run your application either via docker compose or normally via cargo run + +Remember to enable the kafka_events via development.toml/docker_compose.toml files + +Inspect the [kafka-ui](http://localhost:8090) to check the messages being inserted in queue + +If the messages/topic are available then you can run select queries on your clickhouse table to ensure data is being populated... + +If the data is not being populated in clickhouse, you can check the error logs in clickhouse server via +``` +# Inside the clickhouse-server container shell +tail -f /var/log/clickhouse-server/clickhouse-server.err.log +``` \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/cluster_setup/README.md b/crates/analytics/docs/clickhouse/cluster_setup/README.md new file mode 100644 index 000000000000..cd5f2dfeb023 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/README.md @@ -0,0 +1,347 @@ +# Tutorial for set up clickhouse server + + +## Single server with docker + + +- Run server + +``` +docker run -d --name clickhouse-server -p 9000:9000 --ulimit nofile=262144:262144 yandex/clickhouse-server + +``` + +- Run client + +``` +docker run -it --rm --link clickhouse-server:clickhouse-server yandex/clickhouse-client --host clickhouse-server +``` + +Now you can see if it success setup or not. + + +## Setup Cluster + + +This part we will setup + +- 1 cluster, with 3 shards +- Each shard has 2 replica server +- Use ReplicatedMergeTree & Distributed table to setup our table. + + +### Cluster + +Let's see our docker-compose.yml first. + +``` +version: '3' + +services: + clickhouse-zookeeper: + image: zookeeper + ports: + - "2181:2181" + - "2182:2182" + container_name: clickhouse-zookeeper + hostname: clickhouse-zookeeper + + clickhouse-01: + image: yandex/clickhouse-server + hostname: clickhouse-01 + container_name: clickhouse-01 + ports: + - 9001:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-01.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-01:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-02: + image: yandex/clickhouse-server + hostname: clickhouse-02 + container_name: clickhouse-02 + ports: + - 9002:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-02.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-02:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-03: + image: yandex/clickhouse-server + hostname: clickhouse-03 + container_name: clickhouse-03 + ports: + - 9003:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-03.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-03:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-04: + image: yandex/clickhouse-server + hostname: clickhouse-04 + container_name: clickhouse-04 + ports: + - 9004:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-04.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-04:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-05: + image: yandex/clickhouse-server + hostname: clickhouse-05 + container_name: clickhouse-05 + ports: + - 9005:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-05.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-05:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-06: + image: yandex/clickhouse-server + hostname: clickhouse-06 + container_name: clickhouse-06 + ports: + - 9006:9000 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-06.xml:/etc/clickhouse-server/config.d/macros.xml + # - ./data/server-06:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" +networks: + default: + external: + name: clickhouse-net +``` + + +We have 6 clickhouse server container and one zookeeper container. + + +**To enable replication ZooKeeper is required. ClickHouse will take care of data consistency on all replicas and run restore procedure after failure automatically. It's recommended to deploy ZooKeeper cluster to separate servers.** + +**ZooKeeper is not a requirement — in some simple cases you can duplicate the data by writing it into all the replicas from your application code. This approach is not recommended — in this case ClickHouse is not able to guarantee data consistency on all replicas. This remains the responsibility of your application.** + + +Let's see config file. + +`./config/clickhouse_config.xml` is the default config file in docker, we copy it out and add this line + +``` + + /etc/clickhouse-server/metrika.xml +``` + + +So lets see `clickhouse_metrika.xml` + +``` + + + + + 1 + true + + clickhouse-01 + 9000 + + + clickhouse-06 + 9000 + + + + 1 + true + + clickhouse-02 + 9000 + + + clickhouse-03 + 9000 + + + + 1 + true + + + clickhouse-04 + 9000 + + + clickhouse-05 + 9000 + + + + + + + clickhouse-zookeeper + 2181 + + + + ::/0 + + + + 10000000000 + 0.01 + lz4 + + + +``` + +and macros.xml, each instances has there own macros settings, like server 1: + +``` + + + clickhouse-01 + 01 + 01 + + +``` + + +**Make sure your macros settings is equal to remote server settings in metrika.xml** + +So now you can start the server. + +``` +docker network create clickhouse-net +docker-compose up -d +``` + +Conn to server and see if the cluster settings fine; + +``` +docker run -it --rm --network="clickhouse-net" --link clickhouse-01:clickhouse-server yandex/clickhouse-client --host clickhouse-server +``` + +```sql +clickhouse-01 :) select * from system.clusters; + +SELECT * +FROM system.clusters + +┌─cluster─────────────────────┬─shard_num─┬─shard_weight─┬─replica_num─┬─host_name─────┬─host_address─┬─port─┬─is_local─┬─user────┬─default_database─┐ +│ cluster_1 │ 1 │ 1 │ 1 │ clickhouse-01 │ 172.21.0.4 │ 9000 │ 1 │ default │ │ +│ cluster_1 │ 1 │ 1 │ 2 │ clickhouse-06 │ 172.21.0.5 │ 9000 │ 1 │ default │ │ +│ cluster_1 │ 2 │ 1 │ 1 │ clickhouse-02 │ 172.21.0.8 │ 9000 │ 0 │ default │ │ +│ cluster_1 │ 2 │ 1 │ 2 │ clickhouse-03 │ 172.21.0.6 │ 9000 │ 0 │ default │ │ +│ cluster_1 │ 3 │ 1 │ 1 │ clickhouse-04 │ 172.21.0.7 │ 9000 │ 0 │ default │ │ +│ cluster_1 │ 3 │ 1 │ 2 │ clickhouse-05 │ 172.21.0.3 │ 9000 │ 0 │ default │ │ +│ test_shard_localhost │ 1 │ 1 │ 1 │ localhost │ 127.0.0.1 │ 9000 │ 1 │ default │ │ +│ test_shard_localhost_secure │ 1 │ 1 │ 1 │ localhost │ 127.0.0.1 │ 9440 │ 0 │ default │ │ +└─────────────────────────────┴───────────┴──────────────┴─────────────┴───────────────┴──────────────┴──────┴──────────┴─────────┴──────────────────┘ +``` + +If you see this, it means cluster's settings work well(but not conn fine). + + +### Replica Table + +So now we have a cluster and replica settings. For clickhouse, we need to create ReplicatedMergeTree Table as a local table in every server. + +```sql +CREATE TABLE ttt (id Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/ttt', '{replica}') PARTITION BY id ORDER BY id +``` + +and Create Distributed Table conn to local table + +```sql +CREATE TABLE ttt_all as ttt ENGINE = Distributed(cluster_1, default, ttt, rand()); +``` + + +### Insert and test + +gen some data and test. + + +``` +# docker exec into client server 1 and +for ((idx=1;idx<=100;++idx)); do clickhouse-client --host clickhouse-server --query "Insert into default.ttt_all values ($idx)"; done; +``` + +For Distributed table. + +``` +select count(*) from ttt_all; +``` + +For loacl table. + +``` +select count(*) from ttt; +``` + + +## Authentication + +Please see config/users.xml + + +- Conn +```bash +docker run -it --rm --network="clickhouse-net" --link clickhouse-01:clickhouse-server yandex/clickhouse-client --host clickhouse-server -u user1 --password 123456 +``` + +## Source + +- https://clickhouse.yandex/docs/en/operations/table_engines/replication/#creating-replicated-tables diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml new file mode 100644 index 000000000000..94c854dc273a --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml @@ -0,0 +1,370 @@ + + + + + error + 1000M + 1 + 10 + + + + 8123 + 9000 + + + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + /var/lib/clickhouse/user_files/ + + + users.xml + + + default + + + + + + default + + + + + + + + + + + + + + localhost + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + + /etc/clickhouse-server/metrika.xml + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + 7500 +
+ + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + +
+ diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml new file mode 100644 index 000000000000..b58ffc34bc29 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml @@ -0,0 +1,60 @@ + + + + + 1 + true + + clickhouse-01 + 9000 + + + clickhouse-06 + 9000 + + + + 1 + true + + clickhouse-02 + 9000 + + + clickhouse-03 + 9000 + + + + 1 + true + + + clickhouse-04 + 9000 + + + clickhouse-05 + 9000 + + + + + + + clickhouse-zookeeper + 2181 + + + + ::/0 + + + + 10000000000 + 0.01 + lz4 + + + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml new file mode 100644 index 000000000000..75df1c5916e8 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml @@ -0,0 +1,9 @@ + + + clickhouse-01 + 01 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml new file mode 100644 index 000000000000..67e4a545b30c --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml @@ -0,0 +1,9 @@ + + + clickhouse-02 + 02 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml new file mode 100644 index 000000000000..e9278191b80f --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml @@ -0,0 +1,9 @@ + + + clickhouse-03 + 02 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml new file mode 100644 index 000000000000..033c0ad1152e --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml @@ -0,0 +1,9 @@ + + + clickhouse-04 + 03 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml new file mode 100644 index 000000000000..c63314c5acea --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml @@ -0,0 +1,9 @@ + + + clickhouse-05 + 03 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml new file mode 100644 index 000000000000..4b01bda9948c --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml @@ -0,0 +1,9 @@ + + + clickhouse-06 + 01 + 01 + data + cluster_1 + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml new file mode 100644 index 000000000000..e1b8de78e37a --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml @@ -0,0 +1,117 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + 123456 + + ::/0 + + default + default + + + + + + + + + ::/0 + + + + default + + + default + + + + + + + ::1 + 127.0.0.1 + + readonly + default + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml b/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml new file mode 100644 index 000000000000..96d7618b47e6 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml @@ -0,0 +1,198 @@ +version: '3' + +networks: + ckh_net: + +services: + clickhouse-zookeeper: + image: zookeeper + ports: + - "2181:2181" + - "2182:2182" + container_name: clickhouse-zookeeper + hostname: clickhouse-zookeeper + networks: + - ckh_net + + clickhouse-01: + image: clickhouse/clickhouse-server + hostname: clickhouse-01 + container_name: clickhouse-01 + networks: + - ckh_net + ports: + - 9001:9000 + - 8124:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-01.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-01:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-02: + image: clickhouse/clickhouse-server + hostname: clickhouse-02 + container_name: clickhouse-02 + networks: + - ckh_net + ports: + - 9002:9000 + - 8125:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-02.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-02:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-03: + image: clickhouse/clickhouse-server + hostname: clickhouse-03 + container_name: clickhouse-03 + networks: + - ckh_net + ports: + - 9003:9000 + - 8126:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-03.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-03:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-04: + image: clickhouse/clickhouse-server + hostname: clickhouse-04 + container_name: clickhouse-04 + networks: + - ckh_net + ports: + - 9004:9000 + - 8127:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-04.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-04:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-05: + image: clickhouse/clickhouse-server + hostname: clickhouse-05 + container_name: clickhouse-05 + networks: + - ckh_net + ports: + - 9005:9000 + - 8128:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-05.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-05:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + clickhouse-06: + image: clickhouse/clickhouse-server + hostname: clickhouse-06 + container_name: clickhouse-06 + networks: + - ckh_net + ports: + - 9006:9000 + - 8129:8123 + volumes: + - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml + - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml + - ./config/macros/macros-06.xml:/etc/clickhouse-server/config.d/macros.xml + - ./config/users.xml:/etc/clickhouse-server/users.xml + # - ./data/server-06:/var/lib/clickhouse + ulimits: + nofile: + soft: 262144 + hard: 262144 + depends_on: + - "clickhouse-zookeeper" + + kafka0: + image: confluentinc/cp-kafka:7.0.5 + hostname: kafka0 + container_name: kafka0 + ports: + - 9092:9092 + - 9093 + - 9997 + - 29092 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' + KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' + JMX_PORT: 9997 + KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 + volumes: + - ./kafka-script.sh:/tmp/update_run.sh + command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'" + networks: + ckh_net: + aliases: + - hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local + + + # Kafka UI for debugging kafka queues + kafka-ui: + container_name: kafka-ui + image: provectuslabs/kafka-ui:latest + ports: + - 8090:8080 + depends_on: + - kafka0 + networks: + - ckh_net + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 + KAFKA_CLUSTERS_0_JMXPORT: 9997 + diff --git a/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh b/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh new file mode 100755 index 000000000000..023c832b4e1b --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh @@ -0,0 +1,11 @@ +# This script is required to run kafka cluster (without zookeeper) +#!/bin/sh + +# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter +sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure + +# Docker workaround: Ignore cub zk-ready +sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure + +# KRaft required step: Format the storage directory with a new cluster ID +echo "kafka-storage format --ignore-formatted -t $(kafka-storage random-uuid) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql new file mode 100644 index 000000000000..0fe194a0e676 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql @@ -0,0 +1,237 @@ +CREATE TABLE hyperswitch.api_events_queue on cluster '{cluster}' ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `request_id` String, + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `url_path` LowCardinality(Nullable(String)), + `event_type` LowCardinality(Nullable(String)), + `created_at` DateTime CODEC(T64, LZ4), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String) +) ENGINE = Kafka SETTINGS kafka_broker_list = 'hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local:9092', +kafka_topic_list = 'hyperswitch-api-log-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + + +CREATE TABLE hyperswitch.api_events_clustered on cluster '{cluster}' ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `request_id` Nullable(String), + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String), + INDEX flowIndex flow_type TYPE bloom_filter GRANULARITY 1, + INDEX apiIndex api_name TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status_code TYPE bloom_filter GRANULARITY 1 +) ENGINE = ReplicatedMergeTree( + '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/api_events_clustered', + '{replica}' +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, flow_type, status_code, api_name) +TTL created_at + toIntervalMonth(6) +; + + +CREATE TABLE hyperswitch.api_events_dist on cluster '{cluster}' ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `request_id` Nullable(String), + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `url_path` LowCardinality(Nullable(String)), + `event_type` LowCardinality(Nullable(String)), + `inserted_at` DateTime64(3), + `created_at` DateTime64(3), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String) +) ENGINE = Distributed('{cluster}', 'hyperswitch', 'api_events_clustered', rand()); + +CREATE MATERIALIZED VIEW hyperswitch.api_events_mv on cluster '{cluster}' TO hyperswitch.api_events_dist ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `request_id` Nullable(String), + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `url_path` LowCardinality(Nullable(String)), + `event_type` LowCardinality(Nullable(String)), + `inserted_at` DateTime64(3), + `created_at` DateTime64(3), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String) +) AS +SELECT + merchant_id, + payment_id, + refund_id, + payment_method_id, + payment_method, + payment_method_type, + customer_id, + user_id, + request_id, + flow_type, + api_name, + request, + response, + status_code, + url_path, + event_type, + now() as inserted_at, + created_at, + latency, + user_agent, + ip_addr +FROM + hyperswitch.api_events_queue +WHERE length(_error) = 0; + + +CREATE MATERIALIZED VIEW hyperswitch.api_events_parse_errors on cluster '{cluster}' +( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) +ENGINE = MergeTree +ORDER BY (topic, partition, offset) +SETTINGS index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM hyperswitch.api_events_queue +WHERE length(_error) > 0 +; + + +ALTER TABLE hyperswitch.api_events_clustered on cluster '{cluster}' ADD COLUMN `url_path` LowCardinality(Nullable(String)); +ALTER TABLE hyperswitch.api_events_clustered on cluster '{cluster}' ADD COLUMN `event_type` LowCardinality(Nullable(String)); + + +CREATE TABLE hyperswitch.api_audit_log ON CLUSTER '{cluster}' ( + `merchant_id` LowCardinality(String), + `payment_id` String, + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `user_id` Nullable(String), + `request_id` Nullable(String), + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String), + `url_path` LowCardinality(Nullable(String)), + `event_type` LowCardinality(Nullable(String)), + `customer_id` LowCardinality(Nullable(String)) +) ENGINE = ReplicatedMergeTree( '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/api_audit_log', '{replica}' ) PARTITION BY merchant_id +ORDER BY (merchant_id, payment_id) +TTL created_at + toIntervalMonth(18) +SETTINGS index_granularity = 8192 + + +CREATE MATERIALIZED VIEW hyperswitch.api_audit_log_mv ON CLUSTER `{cluster}` TO hyperswitch.api_audit_log( + `merchant_id` LowCardinality(String), + `payment_id` String, + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `request_id` Nullable(String), + `flow_type` LowCardinality(String), + `api_name` LowCardinality(String), + `request` String, + `response` String, + `status_code` UInt32, + `url_path` LowCardinality(Nullable(String)), + `event_type` LowCardinality(Nullable(String)), + `inserted_at` DateTime64(3), + `created_at` DateTime64(3), + `latency` Nullable(UInt128), + `user_agent` Nullable(String), + `ip_addr` Nullable(String) +) AS +SELECT + merchant_id, + multiIf(payment_id IS NULL, '', payment_id) AS payment_id, + refund_id, + payment_method_id, + payment_method, + payment_method_type, + customer_id, + user_id, + request_id, + flow_type, + api_name, + request, + response, + status_code, + url_path, + api_event_type AS event_type, + now() AS inserted_at, + created_at, + latency, + user_agent, + ip_addr +FROM hyperswitch.api_events_queue +WHERE length(_error) = 0 \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql new file mode 100644 index 000000000000..3a6281ae9050 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql @@ -0,0 +1,217 @@ +CREATE TABLE hyperswitch.payment_attempt_queue on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` LowCardinality(Nullable(String)), + `capture_on` Nullable(DateTime) CODEC(T64, LZ4), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `modified_at` DateTime CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-payment-attempt-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + + +CREATE TABLE hyperswitch.payment_attempt_dist on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` Nullable(String), + `capture_on` Nullable(DateTime) CODEC(T64, LZ4), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Distributed('{cluster}', 'hyperswitch', 'payment_attempt_clustered', cityHash64(attempt_id)); + + + +CREATE MATERIALIZED VIEW hyperswitch.payment_attempt_mv on cluster '{cluster}' TO hyperswitch.payment_attempt_dist ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` Nullable(String), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime64(3), + `capture_on` Nullable(DateTime64(3)), + `last_synced` Nullable(DateTime64(3)), + `modified_at` DateTime64(3), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + payment_id, + merchant_id, + attempt_id, + status, + amount, + currency, + connector, + save_to_locker, + error_message, + offer_amount, + surcharge_amount, + tax_amount, + payment_method_id, + payment_method, + payment_method_type, + connector_transaction_id, + capture_method, + confirm, + authentication_type, + cancellation_reason, + amount_to_capture, + mandate_id, + browser_info, + error_code, + connector_metadata, + payment_experience, + created_at, + capture_on, + last_synced, + modified_at, + now() as inserted_at, + sign_flag +FROM + hyperswitch.payment_attempt_queue +WHERE length(_error) = 0; + + +CREATE TABLE hyperswitch.payment_attempt_clustered on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` Nullable(String), + `capture_on` Nullable(DateTime) CODEC(T64, LZ4), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1, + INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1, + INDEX authenticationTypeIndex authentication_type TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status TYPE bloom_filter GRANULARITY 1 +) ENGINE = ReplicatedCollapsingMergeTree( + '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/payment_attempt_clustered', + '{replica}', + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, attempt_id) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW hyperswitch.payment_attempt_parse_errors on cluster '{cluster}' +( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) +ENGINE = MergeTree +ORDER BY (topic, partition, offset) +SETTINGS index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM hyperswitch.payment_attempt_queue +WHERE length(_error) > 0 +; \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql new file mode 100644 index 000000000000..eb2d83140e92 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql @@ -0,0 +1,165 @@ +CREATE TABLE hyperswitch.payment_intents_queue on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` String, + `business_label` String, + `modified_at` DateTime, + `created_at` DateTime, + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-payment-intent-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + +CREATE TABLE hyperswitch.payment_intents_dist on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` LowCardinality(String), + `business_label` String, + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Distributed('{cluster}', 'hyperswitch', 'payment_intents_clustered', cityHash64(payment_id)); + +CREATE TABLE hyperswitch.payment_intents_clustered on cluster '{cluster}' ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` LowCardinality(String), + `business_label` String, + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector_id TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status TYPE bloom_filter GRANULARITY 1 +) ENGINE = ReplicatedCollapsingMergeTree( + '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/payment_intents_clustered', + '{replica}', + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, payment_id) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW hyperswitch.payment_intent_mv on cluster '{cluster}' TO hyperswitch.payment_intents_dist ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` LowCardinality(String), + `business_label` String, + `modified_at` DateTime64(3), + `created_at` DateTime64(3), + `last_synced` Nullable(DateTime64(3)), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + payment_id, + merchant_id, + status, + amount, + currency, + amount_captured, + customer_id, + description, + return_url, + connector_id, + statement_descriptor_name, + statement_descriptor_suffix, + setup_future_usage, + off_session, + client_secret, + active_attempt_id, + business_country, + business_label, + modified_at, + created_at, + last_synced, + now() as inserted_at, + sign_flag +FROM hyperswitch.payment_intents_queue +WHERE length(_error) = 0; + +CREATE MATERIALIZED VIEW hyperswitch.payment_intent_parse_errors on cluster '{cluster}' +( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) +ENGINE = MergeTree +ORDER BY (topic, partition, offset) +SETTINGS index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM hyperswitch.payment_intents_queue +WHERE length(_error) > 0 +; diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql new file mode 100644 index 000000000000..bf5f6e0e2405 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql @@ -0,0 +1,173 @@ +CREATE TABLE hyperswitch.refund_queue on cluster '{cluster}' ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime, + `modified_at` DateTime, + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-refund-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + +CREATE TABLE hyperswitch.refund_dist on cluster '{cluster}' ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Distributed('{cluster}', 'hyperswitch', 'refund_clustered', cityHash64(refund_id)); + + + +CREATE TABLE hyperswitch.refund_clustered on cluster '{cluster}' ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1, + INDEX refundTypeIndex refund_type TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex refund_status TYPE bloom_filter GRANULARITY 1 +) ENGINE = ReplicatedCollapsingMergeTree( + '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/refund_clustered', + '{replica}', + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, refund_id) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW hyperswitch.kafka_parse_refund on cluster '{cluster}' TO hyperswitch.refund_dist ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime64(3), + `modified_at` DateTime64(3), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + internal_reference_id, + refund_id, + payment_id, + merchant_id, + connector_transaction_id, + connector, + connector_refund_id, + external_reference_id, + refund_type, + total_amount, + currency, + refund_amount, + refund_status, + sent_to_gateway, + refund_error_message, + refund_arn, + attempt_id, + description, + refund_reason, + refund_error_code, + created_at, + modified_at, + now() as inserted_at, + sign_flag +FROM hyperswitch.refund_queue +WHERE length(_error) = 0; + +CREATE MATERIALIZED VIEW hyperswitch.refund_parse_errors on cluster '{cluster}' +( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) +ENGINE = MergeTree +ORDER BY (topic, partition, offset) +SETTINGS index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM hyperswitch.refund_queue +WHERE length(_error) > 0 +; \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql new file mode 100644 index 000000000000..37766392bc70 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql @@ -0,0 +1,156 @@ +CREATE TABLE hyperswitch.sdk_events_queue on cluster '{cluster}' ( + `payment_id` Nullable(String), + `merchant_id` String, + `remote_ip` Nullable(String), + `log_type` LowCardinality(Nullable(String)), + `event_name` LowCardinality(Nullable(String)), + `first_event` LowCardinality(Nullable(String)), + `latency` Nullable(UInt32), + `timestamp` String, + `browser_name` LowCardinality(Nullable(String)), + `browser_version` Nullable(String), + `platform` LowCardinality(Nullable(String)), + `source` LowCardinality(Nullable(String)), + `category` LowCardinality(Nullable(String)), + `version` LowCardinality(Nullable(String)), + `value` Nullable(String), + `component` LowCardinality(Nullable(String)), + `payment_method` LowCardinality(Nullable(String)), + `payment_experience` LowCardinality(Nullable(String)) +) ENGINE = Kafka SETTINGS + kafka_broker_list = 'hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local:9092', + kafka_topic_list = 'hyper-sdk-logs', + kafka_group_name = 'hyper-c1', + kafka_format = 'JSONEachRow', + kafka_handle_error_mode = 'stream'; + +CREATE TABLE hyperswitch.sdk_events_clustered on cluster '{cluster}' ( + `payment_id` Nullable(String), + `merchant_id` String, + `remote_ip` Nullable(String), + `log_type` LowCardinality(Nullable(String)), + `event_name` LowCardinality(Nullable(String)), + `first_event` Bool DEFAULT 1, + `browser_name` LowCardinality(Nullable(String)), + `browser_version` Nullable(String), + `platform` LowCardinality(Nullable(String)), + `source` LowCardinality(Nullable(String)), + `category` LowCardinality(Nullable(String)), + `version` LowCardinality(Nullable(String)), + `value` Nullable(String), + `component` LowCardinality(Nullable(String)), + `payment_method` LowCardinality(Nullable(String)), + `payment_experience` LowCardinality(Nullable(String)) DEFAULT '', + `created_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4), + `inserted_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4), + `latency` Nullable(UInt32) DEFAULT 0, + INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1, + INDEX eventIndex event_name TYPE bloom_filter GRANULARITY 1, + INDEX platformIndex platform TYPE bloom_filter GRANULARITY 1, + INDEX logTypeIndex log_type TYPE bloom_filter GRANULARITY 1, + INDEX categoryIndex category TYPE bloom_filter GRANULARITY 1, + INDEX sourceIndex source TYPE bloom_filter GRANULARITY 1, + INDEX componentIndex component TYPE bloom_filter GRANULARITY 1, + INDEX firstEventIndex first_event TYPE bloom_filter GRANULARITY 1 +) ENGINE = ReplicatedMergeTree( + '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/sdk_events_clustered', '{replica}' +) +PARTITION BY + toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id) +TTL + toDateTime(created_at) + toIntervalMonth(6) +SETTINGS + index_granularity = 8192 +; + +CREATE TABLE hyperswitch.sdk_events_dist on cluster '{cluster}' ( + `payment_id` Nullable(String), + `merchant_id` String, + `remote_ip` Nullable(String), + `log_type` LowCardinality(Nullable(String)), + `event_name` LowCardinality(Nullable(String)), + `first_event` Bool DEFAULT 1, + `browser_name` LowCardinality(Nullable(String)), + `browser_version` Nullable(String), + `platform` LowCardinality(Nullable(String)), + `source` LowCardinality(Nullable(String)), + `category` LowCardinality(Nullable(String)), + `version` LowCardinality(Nullable(String)), + `value` Nullable(String), + `component` LowCardinality(Nullable(String)), + `payment_method` LowCardinality(Nullable(String)), + `payment_experience` LowCardinality(Nullable(String)) DEFAULT '', + `created_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4), + `inserted_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4), + `latency` Nullable(UInt32) DEFAULT 0 +) ENGINE = Distributed( + '{cluster}', 'hyperswitch', 'sdk_events_clustered', rand() +); + +CREATE MATERIALIZED VIEW hyperswitch.sdk_events_mv on cluster '{cluster}' TO hyperswitch.sdk_events_dist ( + `payment_id` Nullable(String), + `merchant_id` String, + `remote_ip` Nullable(String), + `log_type` LowCardinality(Nullable(String)), + `event_name` LowCardinality(Nullable(String)), + `first_event` Bool, + `latency` Nullable(UInt32), + `browser_name` LowCardinality(Nullable(String)), + `browser_version` Nullable(String), + `platform` LowCardinality(Nullable(String)), + `source` LowCardinality(Nullable(String)), + `category` LowCardinality(Nullable(String)), + `version` LowCardinality(Nullable(String)), + `value` Nullable(String), + `component` LowCardinality(Nullable(String)), + `payment_method` LowCardinality(Nullable(String)), + `payment_experience` LowCardinality(Nullable(String)), + `created_at` DateTime64(3) +) AS +SELECT + payment_id, + merchant_id, + remote_ip, + log_type, + event_name, + multiIf(first_event = 'true', 1, 0) AS first_event, + latency, + browser_name, + browser_version, + platform, + source, + category, + version, + value, + component, + payment_method, + payment_experience, + toDateTime64(timestamp, 3) AS created_at +FROM + hyperswitch.sdk_events_queue +WHERE length(_error) = 0 +; + +CREATE MATERIALIZED VIEW hyperswitch.sdk_parse_errors on cluster '{cluster}' ( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) ENGINE = MergeTree + ORDER BY (topic, partition, offset) +SETTINGS + index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM + hyperswitch.sdk_events_queue +WHERE + length(_error) > 0 +; diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql new file mode 100644 index 000000000000..202b94ac6040 --- /dev/null +++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql @@ -0,0 +1 @@ +create database hyperswitch on cluster '{cluster}'; \ No newline at end of file diff --git a/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql b/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql new file mode 100644 index 000000000000..b41a75fe67e5 --- /dev/null +++ b/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql @@ -0,0 +1,134 @@ +CREATE TABLE api_events_v2_queue ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `connector` Nullable(String), + `request_id` String, + `flow_type` LowCardinality(String), + `api_flow` LowCardinality(String), + `api_auth_type` LowCardinality(String), + `request` String, + `response` Nullable(String), + `authentication_data` Nullable(String), + `status_code` UInt32, + `created_at` DateTime CODEC(T64, LZ4), + `latency` UInt128, + `user_agent` String, + `ip_addr` String, +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-api-log-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + + +CREATE TABLE api_events_v2_dist ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `connector` Nullable(String), + `request_id` String, + `flow_type` LowCardinality(String), + `api_flow` LowCardinality(String), + `api_auth_type` LowCardinality(String), + `request` String, + `response` Nullable(String), + `authentication_data` Nullable(String), + `status_code` UInt32, + `created_at` DateTime CODEC(T64, LZ4), + `inserted_at` DateTime CODEC(T64, LZ4), + `latency` UInt128, + `user_agent` String, + `ip_addr` String, + INDEX flowIndex flow_type TYPE bloom_filter GRANULARITY 1, + INDEX apiIndex api_flow TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status_code TYPE bloom_filter GRANULARITY 1 +) ENGINE = MergeTree +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, flow_type, status_code, api_flow) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW api_events_v2_mv TO api_events_v2_dist ( + `merchant_id` String, + `payment_id` Nullable(String), + `refund_id` Nullable(String), + `payment_method_id` Nullable(String), + `payment_method` Nullable(String), + `payment_method_type` Nullable(String), + `customer_id` Nullable(String), + `user_id` Nullable(String), + `connector` Nullable(String), + `request_id` String, + `flow_type` LowCardinality(String), + `api_flow` LowCardinality(String), + `api_auth_type` LowCardinality(String), + `request` String, + `response` Nullable(String), + `authentication_data` Nullable(String), + `status_code` UInt32, + `created_at` DateTime CODEC(T64, LZ4), + `inserted_at` DateTime CODEC(T64, LZ4), + `latency` UInt128, + `user_agent` String, + `ip_addr` String +) AS +SELECT + merchant_id, + payment_id, + refund_id, + payment_method_id, + payment_method, + payment_method_type, + customer_id, + user_id, + connector, + request_id, + flow_type, + api_flow, + api_auth_type, + request, + response, + authentication_data, + status_code, + created_at, + now() as inserted_at, + latency, + user_agent, + ip_addr +FROM + api_events_v2_queue +where length(_error) = 0; + + +CREATE MATERIALIZED VIEW api_events_parse_errors +( + `topic` String, + `partition` Int64, + `offset` Int64, + `raw` String, + `error` String +) +ENGINE = MergeTree +ORDER BY (topic, partition, offset) +SETTINGS index_granularity = 8192 AS +SELECT + _topic AS topic, + _partition AS partition, + _offset AS offset, + _raw_message AS raw, + _error AS error +FROM api_events_v2_queue +WHERE length(_error) > 0 +; diff --git a/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql b/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql new file mode 100644 index 000000000000..276e311e57a9 --- /dev/null +++ b/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql @@ -0,0 +1,156 @@ +CREATE TABLE payment_attempts_queue ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` LowCardinality(Nullable(String)), + `capture_on` Nullable(DateTime) CODEC(T64, LZ4), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `modified_at` DateTime CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-payment-attempt-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + +CREATE TABLE payment_attempt_dist ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` Nullable(String), + `capture_on` Nullable(DateTime) CODEC(T64, LZ4), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1, + INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1, + INDEX authenticationTypeIndex authentication_type TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status TYPE bloom_filter GRANULARITY 1 +) ENGINE = CollapsingMergeTree( + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, attempt_id) +TTL created_at + toIntervalMonth(6) +; + + +CREATE MATERIALIZED VIEW kafka_parse_pa TO payment_attempt_dist ( + `payment_id` String, + `merchant_id` String, + `attempt_id` String, + `status` LowCardinality(String), + `amount` Nullable(UInt32), + `currency` LowCardinality(Nullable(String)), + `connector` LowCardinality(Nullable(String)), + `save_to_locker` Nullable(Bool), + `error_message` Nullable(String), + `offer_amount` Nullable(UInt32), + `surcharge_amount` Nullable(UInt32), + `tax_amount` Nullable(UInt32), + `payment_method_id` Nullable(String), + `payment_method` LowCardinality(Nullable(String)), + `payment_method_type` LowCardinality(Nullable(String)), + `connector_transaction_id` Nullable(String), + `capture_method` Nullable(String), + `confirm` Bool, + `authentication_type` LowCardinality(Nullable(String)), + `cancellation_reason` Nullable(String), + `amount_to_capture` Nullable(UInt32), + `mandate_id` Nullable(String), + `browser_info` Nullable(String), + `error_code` Nullable(String), + `connector_metadata` Nullable(String), + `payment_experience` Nullable(String), + `created_at` DateTime64(3), + `capture_on` Nullable(DateTime64(3)), + `last_synced` Nullable(DateTime64(3)), + `modified_at` DateTime64(3), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + payment_id, + merchant_id, + attempt_id, + status, + amount, + currency, + connector, + save_to_locker, + error_message, + offer_amount, + surcharge_amount, + tax_amount, + payment_method_id, + payment_method, + payment_method_type, + connector_transaction_id, + capture_method, + confirm, + authentication_type, + cancellation_reason, + amount_to_capture, + mandate_id, + browser_info, + error_code, + connector_metadata, + payment_experience, + created_at, + capture_on, + last_synced, + modified_at, + now() as inserted_at, + sign_flag +FROM + payment_attempts_queue; + diff --git a/crates/analytics/docs/clickhouse/scripts/payment_intents.sql b/crates/analytics/docs/clickhouse/scripts/payment_intents.sql new file mode 100644 index 000000000000..8cd487f364b4 --- /dev/null +++ b/crates/analytics/docs/clickhouse/scripts/payment_intents.sql @@ -0,0 +1,116 @@ +CREATE TABLE payment_intents_queue ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` String, + `business_label` String, + `modified_at` DateTime CODEC(T64, LZ4), + `created_at` DateTime CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-payment-intent-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + + +CREATE TABLE payment_intents_dist ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` LowCardinality(String), + `business_label` String, + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `last_synced` Nullable(DateTime) CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector_id TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex status TYPE bloom_filter GRANULARITY 1 +) ENGINE = CollapsingMergeTree( + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, payment_id) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW kafka_parse_payment_intent TO payment_intents_dist ( + `payment_id` String, + `merchant_id` String, + `status` LowCardinality(String), + `amount` UInt32, + `currency` LowCardinality(Nullable(String)), + `amount_captured` Nullable(UInt32), + `customer_id` Nullable(String), + `description` Nullable(String), + `return_url` Nullable(String), + `connector_id` LowCardinality(Nullable(String)), + `statement_descriptor_name` Nullable(String), + `statement_descriptor_suffix` Nullable(String), + `setup_future_usage` LowCardinality(Nullable(String)), + `off_session` Nullable(Bool), + `client_secret` Nullable(String), + `active_attempt_id` String, + `business_country` LowCardinality(String), + `business_label` String, + `modified_at` DateTime64(3), + `created_at` DateTime64(3), + `last_synced` Nullable(DateTime64(3)), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + payment_id, + merchant_id, + status, + amount, + currency, + amount_captured, + customer_id, + description, + return_url, + connector_id, + statement_descriptor_name, + statement_descriptor_suffix, + setup_future_usage, + off_session, + client_secret, + active_attempt_id, + business_country, + business_label, + modified_at, + created_at, + last_synced, + now() as inserted_at, + sign_flag +FROM payment_intents_queue; diff --git a/crates/analytics/docs/clickhouse/scripts/refunds.sql b/crates/analytics/docs/clickhouse/scripts/refunds.sql new file mode 100644 index 000000000000..a131270c1326 --- /dev/null +++ b/crates/analytics/docs/clickhouse/scripts/refunds.sql @@ -0,0 +1,121 @@ +CREATE TABLE refund_queue ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime CODEC(T64, LZ4), + `modified_at` DateTime CODEC(T64, LZ4), + `sign_flag` Int8 +) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092', +kafka_topic_list = 'hyperswitch-refund-events', +kafka_group_name = 'hyper-c1', +kafka_format = 'JSONEachRow', +kafka_handle_error_mode = 'stream'; + + +CREATE TABLE refund_dist ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4), + `sign_flag` Int8, + INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1, + INDEX refundTypeIndex refund_type TYPE bloom_filter GRANULARITY 1, + INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1, + INDEX statusIndex refund_status TYPE bloom_filter GRANULARITY 1 +) ENGINE = CollapsingMergeTree( + sign_flag +) +PARTITION BY toStartOfDay(created_at) +ORDER BY + (created_at, merchant_id, refund_id) +TTL created_at + toIntervalMonth(6) +; + +CREATE MATERIALIZED VIEW kafka_parse_refund TO refund_dist ( + `internal_reference_id` String, + `refund_id` String, + `payment_id` String, + `merchant_id` String, + `connector_transaction_id` String, + `connector` LowCardinality(Nullable(String)), + `connector_refund_id` Nullable(String), + `external_reference_id` Nullable(String), + `refund_type` LowCardinality(String), + `total_amount` Nullable(UInt32), + `currency` LowCardinality(String), + `refund_amount` Nullable(UInt32), + `refund_status` LowCardinality(String), + `sent_to_gateway` Bool, + `refund_error_message` Nullable(String), + `refund_arn` Nullable(String), + `attempt_id` String, + `description` Nullable(String), + `refund_reason` Nullable(String), + `refund_error_code` Nullable(String), + `created_at` DateTime64(3), + `modified_at` DateTime64(3), + `inserted_at` DateTime64(3), + `sign_flag` Int8 +) AS +SELECT + internal_reference_id, + refund_id, + payment_id, + merchant_id, + connector_transaction_id, + connector, + connector_refund_id, + external_reference_id, + refund_type, + total_amount, + currency, + refund_amount, + refund_status, + sent_to_gateway, + refund_error_message, + refund_arn, + attempt_id, + description, + refund_reason, + refund_error_code, + created_at, + modified_at, + now() as inserted_at, + sign_flag +FROM refund_queue; diff --git a/crates/analytics/src/api_event.rs b/crates/analytics/src/api_event.rs new file mode 100644 index 000000000000..113344d47254 --- /dev/null +++ b/crates/analytics/src/api_event.rs @@ -0,0 +1,9 @@ +mod core; +pub mod events; +pub mod filters; +pub mod metrics; +pub mod types; + +pub trait APIEventAnalytics: events::ApiLogsFilterAnalytics {} + +pub use self::core::{api_events_core, get_api_event_metrics, get_filters}; diff --git a/crates/analytics/src/api_event/core.rs b/crates/analytics/src/api_event/core.rs new file mode 100644 index 000000000000..b368d6374f75 --- /dev/null +++ b/crates/analytics/src/api_event/core.rs @@ -0,0 +1,176 @@ +use std::collections::HashMap; + +use api_models::analytics::{ + api_event::{ + ApiEventMetricsBucketIdentifier, ApiEventMetricsBucketValue, ApiLogsRequest, + ApiMetricsBucketResponse, + }, + AnalyticsMetadata, ApiEventFiltersResponse, GetApiEventFiltersRequest, + GetApiEventMetricRequest, MetricsResponse, +}; +use error_stack::{IntoReport, ResultExt}; +use router_env::{ + instrument, logger, + tracing::{self, Instrument}, +}; + +use super::{ + events::{get_api_event, ApiLogsResult}, + metrics::ApiEventMetricRow, +}; +use crate::{ + errors::{AnalyticsError, AnalyticsResult}, + metrics, + types::FiltersError, + AnalyticsProvider, +}; + +#[instrument(skip_all)] +pub async fn api_events_core( + pool: &AnalyticsProvider, + req: ApiLogsRequest, + merchant_id: String, +) -> AnalyticsResult> { + let data = match pool { + AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented) + .into_report() + .attach_printable("SQL Analytics is not implemented for API Events"), + AnalyticsProvider::Clickhouse(pool) => get_api_event(&merchant_id, req, pool).await, + AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool) + | AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => { + get_api_event(&merchant_id, req, ckh_pool).await + } + } + .change_context(AnalyticsError::UnknownError)?; + Ok(data) +} + +pub async fn get_filters( + pool: &AnalyticsProvider, + req: GetApiEventFiltersRequest, + merchant_id: String, +) -> AnalyticsResult { + use api_models::analytics::{api_event::ApiEventDimensions, ApiEventFilterValue}; + + use super::filters::get_api_event_filter_for_dimension; + use crate::api_event::filters::ApiEventFilter; + + let mut res = ApiEventFiltersResponse::default(); + for dim in req.group_by_names { + let values = match pool { + AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented) + .into_report() + .attach_printable("SQL Analytics is not implemented for API Events"), + AnalyticsProvider::Clickhouse(ckh_pool) + | AnalyticsProvider::CombinedSqlx(_, ckh_pool) + | AnalyticsProvider::CombinedCkh(_, ckh_pool) => { + get_api_event_filter_for_dimension(dim, &merchant_id, &req.time_range, ckh_pool) + .await + } + } + .change_context(AnalyticsError::UnknownError)? + .into_iter() + .filter_map(|fil: ApiEventFilter| match dim { + ApiEventDimensions::StatusCode => fil.status_code.map(|i| i.to_string()), + ApiEventDimensions::FlowType => fil.flow_type, + ApiEventDimensions::ApiFlow => fil.api_flow, + }) + .collect::>(); + res.query_data.push(ApiEventFilterValue { + dimension: dim, + values, + }) + } + + Ok(res) +} + +#[instrument(skip_all)] +pub async fn get_api_event_metrics( + pool: &AnalyticsProvider, + merchant_id: &str, + req: GetApiEventMetricRequest, +) -> AnalyticsResult> { + let mut metrics_accumulator: HashMap = + HashMap::new(); + + let mut set = tokio::task::JoinSet::new(); + for metric_type in req.metrics.iter().cloned() { + let req = req.clone(); + let pool = pool.clone(); + let task_span = tracing::debug_span!( + "analytics_api_metrics_query", + api_event_metric = metric_type.as_ref() + ); + + // TODO: lifetime issues with joinset, + // can be optimized away if joinset lifetime requirements are relaxed + let merchant_id_scoped = merchant_id.to_owned(); + set.spawn( + async move { + let data = pool + .get_api_event_metrics( + &metric_type, + &req.group_by_names.clone(), + &merchant_id_scoped, + &req.filters, + &req.time_series.map(|t| t.granularity), + &req.time_range, + ) + .await + .change_context(AnalyticsError::UnknownError); + (metric_type, data) + } + .instrument(task_span), + ); + } + + while let Some((metric, data)) = set + .join_next() + .await + .transpose() + .into_report() + .change_context(AnalyticsError::UnknownError)? + { + let data = data?; + let attributes = &[ + metrics::request::add_attributes("metric_type", metric.to_string()), + metrics::request::add_attributes("source", pool.to_string()), + ]; + + let value = u64::try_from(data.len()); + if let Ok(val) = value { + metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes); + logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); + } + for (id, value) in data { + metrics_accumulator + .entry(id) + .and_modify(|data| { + data.api_count = data.api_count.or(value.api_count); + data.status_code_count = data.status_code_count.or(value.status_code_count); + data.latency = data.latency.or(value.latency); + }) + .or_insert(value); + } + } + + let query_data: Vec = metrics_accumulator + .into_iter() + .map(|(id, val)| ApiMetricsBucketResponse { + values: ApiEventMetricsBucketValue { + latency: val.latency, + api_count: val.api_count, + status_code_count: val.status_code_count, + }, + dimensions: id, + }) + .collect(); + + Ok(MetricsResponse { + query_data, + meta_data: [AnalyticsMetadata { + current_time_range: req.time_range, + }], + }) +} diff --git a/crates/analytics/src/api_event/events.rs b/crates/analytics/src/api_event/events.rs new file mode 100644 index 000000000000..73b3fb9cbad2 --- /dev/null +++ b/crates/analytics/src/api_event/events.rs @@ -0,0 +1,105 @@ +use api_models::analytics::{ + api_event::{ApiLogsRequest, QueryType}, + Granularity, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use router_env::Flow; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, +}; +pub trait ApiLogsFilterAnalytics: LoadRow {} + +pub async fn get_api_event( + merchant_id: &String, + query_param: ApiLogsRequest, + pool: &T, +) -> FiltersResult> +where + T: AnalyticsDataSource + ApiLogsFilterAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents); + query_builder.add_select_column("*").switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + match query_param.query_param { + QueryType::Payment { payment_id } => query_builder + .add_filter_clause("payment_id", payment_id) + .switch()?, + QueryType::Refund { + payment_id, + refund_id, + } => { + query_builder + .add_filter_clause("payment_id", payment_id) + .switch()?; + query_builder + .add_filter_clause("refund_id", refund_id) + .switch()?; + } + } + if let Some(list_api_name) = query_param.api_name_filter { + query_builder + .add_filter_in_range_clause("api_flow", &list_api_name) + .switch()?; + } else { + query_builder + .add_filter_in_range_clause( + "api_flow", + &[ + Flow::PaymentsCancel, + Flow::PaymentsCapture, + Flow::PaymentsConfirm, + Flow::PaymentsCreate, + Flow::PaymentsStart, + Flow::PaymentsUpdate, + Flow::RefundsCreate, + Flow::IncomingWebhookReceive, + ], + ) + .switch()?; + } + //TODO!: update the execute_query function to return reports instead of plain errors... + query_builder + .execute_query::(pool) + .await + .change_context(FiltersError::QueryBuildingError)? + .change_context(FiltersError::QueryExecutionFailure) +} +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct ApiLogsResult { + pub merchant_id: String, + pub payment_id: Option, + pub refund_id: Option, + pub payment_method_id: Option, + pub payment_method: Option, + pub payment_method_type: Option, + pub customer_id: Option, + pub user_id: Option, + pub connector: Option, + pub request_id: Option, + pub flow_type: String, + pub api_flow: String, + pub api_auth_type: Option, + pub request: String, + pub response: Option, + pub error: Option, + pub authentication_data: Option, + pub status_code: u16, + pub latency: Option, + pub user_agent: Option, + pub hs_latency: Option, + pub ip_addr: Option, + #[serde(with = "common_utils::custom_serde::iso8601")] + pub created_at: PrimitiveDateTime, +} diff --git a/crates/analytics/src/api_event/filters.rs b/crates/analytics/src/api_event/filters.rs new file mode 100644 index 000000000000..87414ebad4ba --- /dev/null +++ b/crates/analytics/src/api_event/filters.rs @@ -0,0 +1,53 @@ +use api_models::analytics::{api_event::ApiEventDimensions, Granularity, TimeRange}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, +}; + +pub trait ApiEventFilterAnalytics: LoadRow {} + +pub async fn get_api_event_filter_for_dimension( + dimension: ApiEventDimensions, + merchant_id: &String, + time_range: &TimeRange, + pool: &T, +) -> FiltersResult> +where + T: AnalyticsDataSource + ApiEventFilterAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents); + + query_builder.add_select_column(dimension).switch()?; + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + + query_builder.set_distinct(); + + query_builder + .execute_query::(pool) + .await + .change_context(FiltersError::QueryBuildingError)? + .change_context(FiltersError::QueryExecutionFailure) +} + +#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] +pub struct ApiEventFilter { + pub status_code: Option, + pub flow_type: Option, + pub api_flow: Option, +} diff --git a/crates/analytics/src/api_event/metrics.rs b/crates/analytics/src/api_event/metrics.rs new file mode 100644 index 000000000000..16f2d7a2f5ab --- /dev/null +++ b/crates/analytics/src/api_event/metrics.rs @@ -0,0 +1,110 @@ +use api_models::analytics::{ + api_event::{ + ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier, + }, + Granularity, TimeRange, +}; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult}, +}; + +mod api_count; +pub mod latency; +mod status_code_count; +use api_count::ApiCount; +use latency::MaxLatency; +use status_code_count::StatusCodeCount; + +use self::latency::LatencyAvg; + +#[derive(Debug, PartialEq, Eq, serde::Deserialize)] +pub struct ApiEventMetricRow { + pub latency: Option, + pub api_count: Option, + pub status_code_count: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub start_bucket: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub end_bucket: Option, +} + +pub trait ApiEventMetricAnalytics: LoadRow + LoadRow {} + +#[async_trait::async_trait] +pub trait ApiEventMetric +where + T: AnalyticsDataSource + ApiEventMetricAnalytics, +{ + async fn load_metrics( + &self, + dimensions: &[ApiEventDimensions], + merchant_id: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult>; +} + +#[async_trait::async_trait] +impl ApiEventMetric for ApiEventMetrics +where + T: AnalyticsDataSource + ApiEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[ApiEventDimensions], + merchant_id: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + match self { + Self::Latency => { + MaxLatency + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::ApiCount => { + ApiCount + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::StatusCodeCount => { + StatusCodeCount + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + } + } +} diff --git a/crates/analytics/src/api_event/metrics/api_count.rs b/crates/analytics/src/api_event/metrics/api_count.rs new file mode 100644 index 000000000000..7f5f291aa53e --- /dev/null +++ b/crates/analytics/src/api_event/metrics/api_count.rs @@ -0,0 +1,106 @@ +use api_models::analytics::{ + api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::ApiEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct ApiCount; + +#[async_trait::async_trait] +impl super::ApiEventMetric for ApiCount +where + T: AnalyticsDataSource + super::ApiEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + _dimensions: &[ApiEventDimensions], + merchant_id: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents); + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("api_count"), + }) + .switch()?; + if !filters.flow_type.is_empty() { + query_builder + .add_filter_in_range_clause(ApiEventDimensions::FlowType, &filters.flow_type) + .attach_printable("Error adding flow_type filter") + .switch()?; + } + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + ApiEventMetricsBucketIdentifier::new(TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/api_event/metrics/latency.rs b/crates/analytics/src/api_event/metrics/latency.rs new file mode 100644 index 000000000000..379b39fbeb9e --- /dev/null +++ b/crates/analytics/src/api_event/metrics/latency.rs @@ -0,0 +1,138 @@ +use api_models::analytics::{ + api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::ApiEventMetricRow; +use crate::{ + query::{ + Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, + Window, + }, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct MaxLatency; + +#[async_trait::async_trait] +impl super::ApiEventMetric for MaxLatency +where + T: AnalyticsDataSource + super::ApiEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + _dimensions: &[ApiEventDimensions], + merchant_id: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents); + + query_builder + .add_select_column(Aggregate::Sum { + field: "latency", + alias: Some("latency_sum"), + }) + .switch()?; + + query_builder + .add_select_column(Aggregate::Count { + field: Some("latency"), + alias: Some("latency_count"), + }) + .switch()?; + + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + query_builder + .add_custom_filter_clause("request", "10.63.134.6", FilterTypes::NotLike) + .attach_printable("Error filtering out locker IP") + .switch()?; + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + ApiEventMetricsBucketIdentifier::new(TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }), + ApiEventMetricRow { + latency: if i.latency_count != 0 { + Some(i.latency_sum.unwrap_or(0) / i.latency_count) + } else { + None + }, + api_count: None, + status_code_count: None, + start_bucket: i.start_bucket, + end_bucket: i.end_bucket, + }, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} + +#[derive(Debug, PartialEq, Eq, serde::Deserialize)] +pub struct LatencyAvg { + latency_sum: Option, + latency_count: u64, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub start_bucket: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub end_bucket: Option, +} diff --git a/crates/analytics/src/api_event/metrics/status_code_count.rs b/crates/analytics/src/api_event/metrics/status_code_count.rs new file mode 100644 index 000000000000..5c652fd8e0c9 --- /dev/null +++ b/crates/analytics/src/api_event/metrics/status_code_count.rs @@ -0,0 +1,103 @@ +use api_models::analytics::{ + api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::ApiEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct StatusCodeCount; + +#[async_trait::async_trait] +impl super::ApiEventMetric for StatusCodeCount +where + T: AnalyticsDataSource + super::ApiEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + _dimensions: &[ApiEventDimensions], + merchant_id: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents); + + query_builder + .add_select_column(Aggregate::Count { + field: Some("status_code"), + alias: Some("status_code_count"), + }) + .switch()?; + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + ApiEventMetricsBucketIdentifier::new(TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/api_event/types.rs b/crates/analytics/src/api_event/types.rs new file mode 100644 index 000000000000..72205fc72abf --- /dev/null +++ b/crates/analytics/src/api_event/types.rs @@ -0,0 +1,33 @@ +use api_models::analytics::api_event::{ApiEventDimensions, ApiEventFilters}; +use error_stack::ResultExt; + +use crate::{ + query::{QueryBuilder, QueryFilter, QueryResult, ToSql}, + types::{AnalyticsCollection, AnalyticsDataSource}, +}; + +impl QueryFilter for ApiEventFilters +where + T: AnalyticsDataSource, + AnalyticsCollection: ToSql, +{ + fn set_filter_clause(&self, builder: &mut QueryBuilder) -> QueryResult<()> { + if !self.status_code.is_empty() { + builder + .add_filter_in_range_clause(ApiEventDimensions::StatusCode, &self.status_code) + .attach_printable("Error adding status_code filter")?; + } + if !self.flow_type.is_empty() { + builder + .add_filter_in_range_clause(ApiEventDimensions::FlowType, &self.flow_type) + .attach_printable("Error adding flow_type filter")?; + } + if !self.api_flow.is_empty() { + builder + .add_filter_in_range_clause(ApiEventDimensions::ApiFlow, &self.api_flow) + .attach_printable("Error adding api_name filter")?; + } + + Ok(()) + } +} diff --git a/crates/analytics/src/clickhouse.rs b/crates/analytics/src/clickhouse.rs new file mode 100644 index 000000000000..964486c93649 --- /dev/null +++ b/crates/analytics/src/clickhouse.rs @@ -0,0 +1,458 @@ +use std::sync::Arc; + +use actix_web::http::StatusCode; +use common_utils::errors::ParsingError; +use error_stack::{IntoReport, Report, ResultExt}; +use router_env::logger; +use time::PrimitiveDateTime; + +use super::{ + payments::{ + distribution::PaymentDistributionRow, filters::FilterRow, metrics::PaymentMetricRow, + }, + query::{Aggregate, ToSql, Window}, + refunds::{filters::RefundFilterRow, metrics::RefundMetricRow}, + sdk_events::{filters::SdkEventFilter, metrics::SdkEventMetricRow}, + types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, QueryExecutionError}, +}; +use crate::{ + api_event::{ + events::ApiLogsResult, + filters::ApiEventFilter, + metrics::{latency::LatencyAvg, ApiEventMetricRow}, + }, + sdk_events::events::SdkEventsResult, + types::TableEngine, +}; + +pub type ClickhouseResult = error_stack::Result; + +#[derive(Clone, Debug)] +pub struct ClickhouseClient { + pub config: Arc, +} + +#[derive(Clone, Debug, serde::Deserialize)] +pub struct ClickhouseConfig { + username: String, + password: Option, + host: String, + database_name: String, +} + +impl Default for ClickhouseConfig { + fn default() -> Self { + Self { + username: "default".to_string(), + password: None, + host: "http://localhost:8123".to_string(), + database_name: "default".to_string(), + } + } +} + +impl ClickhouseClient { + async fn execute_query(&self, query: &str) -> ClickhouseResult> { + logger::debug!("Executing query: {query}"); + let client = reqwest::Client::new(); + let params = CkhQuery { + date_time_output_format: String::from("iso"), + output_format_json_quote_64bit_integers: 0, + database: self.config.database_name.clone(), + }; + let response = client + .post(&self.config.host) + .query(¶ms) + .basic_auth(self.config.username.clone(), self.config.password.clone()) + .body(format!("{query}\nFORMAT JSON")) + .send() + .await + .into_report() + .change_context(ClickhouseError::ConnectionError)?; + + logger::debug!(clickhouse_response=?response, query=?query, "Clickhouse response"); + if response.status() != StatusCode::OK { + response.text().await.map_or_else( + |er| { + Err(ClickhouseError::ResponseError) + .into_report() + .attach_printable_lazy(|| format!("Error: {er:?}")) + }, + |t| Err(ClickhouseError::ResponseNotOK(t)).into_report(), + ) + } else { + Ok(response + .json::>() + .await + .into_report() + .change_context(ClickhouseError::ResponseError)? + .data) + } + } +} + +#[async_trait::async_trait] +impl AnalyticsDataSource for ClickhouseClient { + type Row = serde_json::Value; + + async fn load_results( + &self, + query: &str, + ) -> common_utils::errors::CustomResult, QueryExecutionError> + where + Self: LoadRow, + { + self.execute_query(query) + .await + .change_context(QueryExecutionError::DatabaseError)? + .into_iter() + .map(Self::load_row) + .collect::, _>>() + .change_context(QueryExecutionError::RowExtractionFailure) + } + + fn get_table_engine(table: AnalyticsCollection) -> TableEngine { + match table { + AnalyticsCollection::Payment + | AnalyticsCollection::Refund + | AnalyticsCollection::PaymentIntent => { + TableEngine::CollapsingMergeTree { sign: "sign_flag" } + } + AnalyticsCollection::SdkEvents => TableEngine::BasicTree, + AnalyticsCollection::ApiEvents => TableEngine::BasicTree, + } + } +} + +impl LoadRow for ClickhouseClient +where + Self::Row: TryInto>, +{ + fn load_row(row: Self::Row) -> common_utils::errors::CustomResult { + row.try_into() + .change_context(QueryExecutionError::RowExtractionFailure) + } +} + +impl super::payments::filters::PaymentFilterAnalytics for ClickhouseClient {} +impl super::payments::metrics::PaymentMetricAnalytics for ClickhouseClient {} +impl super::payments::distribution::PaymentDistributionAnalytics for ClickhouseClient {} +impl super::refunds::metrics::RefundMetricAnalytics for ClickhouseClient {} +impl super::refunds::filters::RefundFilterAnalytics for ClickhouseClient {} +impl super::sdk_events::filters::SdkEventFilterAnalytics for ClickhouseClient {} +impl super::sdk_events::metrics::SdkEventMetricAnalytics for ClickhouseClient {} +impl super::sdk_events::events::SdkEventsFilterAnalytics for ClickhouseClient {} +impl super::api_event::events::ApiLogsFilterAnalytics for ClickhouseClient {} +impl super::api_event::filters::ApiEventFilterAnalytics for ClickhouseClient {} +impl super::api_event::metrics::ApiEventMetricAnalytics for ClickhouseClient {} + +#[derive(Debug, serde::Serialize)] +struct CkhQuery { + date_time_output_format: String, + output_format_json_quote_64bit_integers: u8, + database: String, +} + +#[derive(Debug, serde::Deserialize)] +struct CkhOutput { + data: Vec, +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse ApiLogsResult in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse SdkEventsResult in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse PaymentMetricRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse PaymentDistributionRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse FilterRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse RefundMetricRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse RefundFilterRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse ApiEventMetricRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse LatencyAvg in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse SdkEventMetricRow in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse SdkEventFilter in clickhouse results", + )) + } +} + +impl TryInto for serde_json::Value { + type Error = Report; + + fn try_into(self) -> Result { + serde_json::from_value(self) + .into_report() + .change_context(ParsingError::StructParseFailure( + "Failed to parse ApiEventFilter in clickhouse results", + )) + } +} + +impl ToSql for PrimitiveDateTime { + fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result { + let format = + time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second]") + .into_report() + .change_context(ParsingError::DateTimeParsingError) + .attach_printable("Failed to parse format description")?; + self.format(&format) + .into_report() + .change_context(ParsingError::EncodeError( + "failed to encode to clickhouse date-time format", + )) + .attach_printable("Failed to format date time") + } +} + +impl ToSql for AnalyticsCollection { + fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result { + match self { + Self::Payment => Ok("payment_attempt_dist".to_string()), + Self::Refund => Ok("refund_dist".to_string()), + Self::SdkEvents => Ok("sdk_events_dist".to_string()), + Self::ApiEvents => Ok("api_audit_log".to_string()), + Self::PaymentIntent => Ok("payment_intents_dist".to_string()), + } + } +} + +impl ToSql for Aggregate +where + T: ToSql, +{ + fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result { + Ok(match self { + Self::Count { field: _, alias } => { + let query = match table_engine { + TableEngine::CollapsingMergeTree { sign } => format!("sum({sign})"), + TableEngine::BasicTree => "count(*)".to_string(), + }; + format!( + "{query}{}", + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + Self::Sum { field, alias } => { + let query = match table_engine { + TableEngine::CollapsingMergeTree { sign } => format!( + "sum({sign} * {})", + field + .to_sql(table_engine) + .attach_printable("Failed to sum aggregate")? + ), + TableEngine::BasicTree => format!( + "sum({})", + field + .to_sql(table_engine) + .attach_printable("Failed to sum aggregate")? + ), + }; + format!( + "{query}{}", + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + Self::Min { field, alias } => { + format!( + "min({}){}", + field + .to_sql(table_engine) + .attach_printable("Failed to min aggregate")?, + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + Self::Max { field, alias } => { + format!( + "max({}){}", + field + .to_sql(table_engine) + .attach_printable("Failed to max aggregate")?, + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + }) + } +} + +impl ToSql for Window +where + T: ToSql, +{ + fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result { + Ok(match self { + Self::Sum { + field, + partition_by, + order_by, + alias, + } => { + format!( + "sum({}) over ({}{}){}", + field + .to_sql(table_engine) + .attach_printable("Failed to sum window")?, + partition_by.as_ref().map_or_else( + || "".to_owned(), + |partition_by| format!("partition by {}", partition_by.to_owned()) + ), + order_by.as_ref().map_or_else( + || "".to_owned(), + |(order_column, order)| format!( + " order by {} {}", + order_column.to_owned(), + order.to_string() + ) + ), + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + Self::RowNumber { + field: _, + partition_by, + order_by, + alias, + } => { + format!( + "row_number() over ({}{}){}", + partition_by.as_ref().map_or_else( + || "".to_owned(), + |partition_by| format!("partition by {}", partition_by.to_owned()) + ), + order_by.as_ref().map_or_else( + || "".to_owned(), + |(order_column, order)| format!( + " order by {} {}", + order_column.to_owned(), + order.to_string() + ) + ), + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + }) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ClickhouseError { + #[error("Clickhouse connection error")] + ConnectionError, + #[error("Clickhouse NON-200 response content: '{0}'")] + ResponseNotOK(String), + #[error("Clickhouse response error")] + ResponseError, +} diff --git a/crates/analytics/src/core.rs b/crates/analytics/src/core.rs new file mode 100644 index 000000000000..354e1e2f1766 --- /dev/null +++ b/crates/analytics/src/core.rs @@ -0,0 +1,31 @@ +use api_models::analytics::GetInfoResponse; + +use crate::{types::AnalyticsDomain, utils}; + +pub async fn get_domain_info( + domain: AnalyticsDomain, +) -> crate::errors::AnalyticsResult { + let info = match domain { + AnalyticsDomain::Payments => GetInfoResponse { + metrics: utils::get_payment_metrics_info(), + download_dimensions: None, + dimensions: utils::get_payment_dimensions(), + }, + AnalyticsDomain::Refunds => GetInfoResponse { + metrics: utils::get_refund_metrics_info(), + download_dimensions: None, + dimensions: utils::get_refund_dimensions(), + }, + AnalyticsDomain::SdkEvents => GetInfoResponse { + metrics: utils::get_sdk_event_metrics_info(), + download_dimensions: None, + dimensions: utils::get_sdk_event_dimensions(), + }, + AnalyticsDomain::ApiEvents => GetInfoResponse { + metrics: utils::get_api_event_metrics_info(), + download_dimensions: None, + dimensions: utils::get_api_event_dimensions(), + }, + }; + Ok(info) +} diff --git a/crates/router/src/analytics/errors.rs b/crates/analytics/src/errors.rs similarity index 100% rename from crates/router/src/analytics/errors.rs rename to crates/analytics/src/errors.rs diff --git a/crates/analytics/src/lambda_utils.rs b/crates/analytics/src/lambda_utils.rs new file mode 100644 index 000000000000..f9446a402b4e --- /dev/null +++ b/crates/analytics/src/lambda_utils.rs @@ -0,0 +1,36 @@ +use aws_config::{self, meta::region::RegionProviderChain}; +use aws_sdk_lambda::{config::Region, types::InvocationType::Event, Client}; +use aws_smithy_types::Blob; +use common_utils::errors::CustomResult; +use error_stack::{IntoReport, ResultExt}; + +use crate::errors::AnalyticsError; + +async fn get_aws_client(region: String) -> Client { + let region_provider = RegionProviderChain::first_try(Region::new(region)); + let sdk_config = aws_config::from_env().region(region_provider).load().await; + Client::new(&sdk_config) +} + +pub async fn invoke_lambda( + function_name: &str, + region: &str, + json_bytes: &[u8], +) -> CustomResult<(), AnalyticsError> { + get_aws_client(region.to_string()) + .await + .invoke() + .function_name(function_name) + .invocation_type(Event) + .payload(Blob::new(json_bytes.to_owned())) + .send() + .await + .into_report() + .map_err(|er| { + let er_rep = format!("{er:?}"); + er.attach_printable(er_rep) + }) + .change_context(AnalyticsError::UnknownError) + .attach_printable("Lambda invocation failed")?; + Ok(()) +} diff --git a/crates/analytics/src/lib.rs b/crates/analytics/src/lib.rs new file mode 100644 index 000000000000..24da77f84f2b --- /dev/null +++ b/crates/analytics/src/lib.rs @@ -0,0 +1,509 @@ +mod clickhouse; +pub mod core; +pub mod errors; +pub mod metrics; +pub mod payments; +mod query; +pub mod refunds; + +pub mod api_event; +pub mod sdk_events; +mod sqlx; +mod types; +use api_event::metrics::{ApiEventMetric, ApiEventMetricRow}; +pub use types::AnalyticsDomain; +pub mod lambda_utils; +pub mod utils; + +use std::sync::Arc; + +use api_models::analytics::{ + api_event::{ + ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier, + }, + payments::{PaymentDimensions, PaymentFilters, PaymentMetrics, PaymentMetricsBucketIdentifier}, + refunds::{RefundDimensions, RefundFilters, RefundMetrics, RefundMetricsBucketIdentifier}, + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetrics, SdkEventMetricsBucketIdentifier, + }, + Distribution, Granularity, TimeRange, +}; +use clickhouse::ClickhouseClient; +pub use clickhouse::ClickhouseConfig; +use error_stack::IntoReport; +use router_env::{ + logger, + tracing::{self, instrument}, +}; +use storage_impl::config::Database; + +use self::{ + payments::{ + distribution::{PaymentDistribution, PaymentDistributionRow}, + metrics::{PaymentMetric, PaymentMetricRow}, + }, + refunds::metrics::{RefundMetric, RefundMetricRow}, + sdk_events::metrics::{SdkEventMetric, SdkEventMetricRow}, + sqlx::SqlxClient, + types::MetricsError, +}; + +#[derive(Clone, Debug)] +pub enum AnalyticsProvider { + Sqlx(SqlxClient), + Clickhouse(ClickhouseClient), + CombinedCkh(SqlxClient, ClickhouseClient), + CombinedSqlx(SqlxClient, ClickhouseClient), +} + +impl Default for AnalyticsProvider { + fn default() -> Self { + Self::Sqlx(SqlxClient::default()) + } +} + +impl ToString for AnalyticsProvider { + fn to_string(&self) -> String { + String::from(match self { + Self::Clickhouse(_) => "Clickhouse", + Self::Sqlx(_) => "Sqlx", + Self::CombinedCkh(_, _) => "CombinedCkh", + Self::CombinedSqlx(_, _) => "CombinedSqlx", + }) + } +} + +impl AnalyticsProvider { + #[instrument(skip_all)] + pub async fn get_payment_metrics( + &self, + metric: &PaymentMetrics, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + ) -> types::MetricsResult> { + // Metrics to get the fetch time for each payment metric + metrics::request::record_operation_time( + async { + match self { + Self::Sqlx(pool) => { + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::Clickhouse(pool) => { + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::CombinedCkh(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!(metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + )); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics") + }, + _ => {} + + }; + + ckh_result + } + Self::CombinedSqlx(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!(metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + )); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics") + }, + _ => {} + + }; + + sqlx_result + } + } + }, + &metrics::METRIC_FETCH_TIME, + metric, + self, + ) + .await + } + + pub async fn get_payment_distribution( + &self, + distribution: &Distribution, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + ) -> types::MetricsResult> { + // Metrics to get the fetch time for each payment metric + metrics::request::record_operation_time( + async { + match self { + Self::Sqlx(pool) => { + distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::Clickhouse(pool) => { + distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::CombinedCkh(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!(distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + )); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics distribution") + }, + _ => {} + + }; + + ckh_result + } + Self::CombinedSqlx(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!(distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + distribution.distribution_for + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + )); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics distribution") + }, + _ => {} + + }; + + sqlx_result + } + } + }, + &metrics::METRIC_FETCH_TIME, + &distribution.distribution_for, + self, + ) + .await + } + + pub async fn get_refund_metrics( + &self, + metric: &RefundMetrics, + dimensions: &[RefundDimensions], + merchant_id: &str, + filters: &RefundFilters, + granularity: &Option, + time_range: &TimeRange, + ) -> types::MetricsResult> { + // Metrics to get the fetch time for each refund metric + metrics::request::record_operation_time( + async { + match self { + Self::Sqlx(pool) => { + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::Clickhouse(pool) => { + metric + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::CombinedCkh(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!( + metric.load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + metric.load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + ) + ); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics metrics") + } + _ => {} + }; + ckh_result + } + Self::CombinedSqlx(sqlx_pool, ckh_pool) => { + let (ckh_result, sqlx_result) = tokio::join!( + metric.load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + ckh_pool, + ), + metric.load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + sqlx_pool, + ) + ); + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics metrics") + } + _ => {} + }; + sqlx_result + } + } + }, + &metrics::METRIC_FETCH_TIME, + metric, + self, + ) + .await + } + + pub async fn get_sdk_event_metrics( + &self, + metric: &SdkEventMetrics, + dimensions: &[SdkEventDimensions], + pub_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + ) -> types::MetricsResult> { + match self { + Self::Sqlx(_pool) => Err(MetricsError::NotImplemented).into_report(), + Self::Clickhouse(pool) => { + metric + .load_metrics(dimensions, pub_key, filters, granularity, time_range, pool) + .await + } + Self::CombinedCkh(_sqlx_pool, ckh_pool) | Self::CombinedSqlx(_sqlx_pool, ckh_pool) => { + metric + .load_metrics( + dimensions, + pub_key, + filters, + granularity, + // Since SDK events are ckh only use ckh here + time_range, + ckh_pool, + ) + .await + } + } + } + + pub async fn get_api_event_metrics( + &self, + metric: &ApiEventMetrics, + dimensions: &[ApiEventDimensions], + pub_key: &str, + filters: &ApiEventFilters, + granularity: &Option, + time_range: &TimeRange, + ) -> types::MetricsResult> { + match self { + Self::Sqlx(_pool) => Err(MetricsError::NotImplemented).into_report(), + Self::Clickhouse(ckh_pool) + | Self::CombinedCkh(_, ckh_pool) + | Self::CombinedSqlx(_, ckh_pool) => { + // Since API events are ckh only use ckh here + metric + .load_metrics( + dimensions, + pub_key, + filters, + granularity, + time_range, + ckh_pool, + ) + .await + } + } + } + + pub async fn from_conf(config: &AnalyticsConfig) -> Self { + match config { + AnalyticsConfig::Sqlx { sqlx } => Self::Sqlx(SqlxClient::from_conf(sqlx).await), + AnalyticsConfig::Clickhouse { clickhouse } => Self::Clickhouse(ClickhouseClient { + config: Arc::new(clickhouse.clone()), + }), + AnalyticsConfig::CombinedCkh { sqlx, clickhouse } => Self::CombinedCkh( + SqlxClient::from_conf(sqlx).await, + ClickhouseClient { + config: Arc::new(clickhouse.clone()), + }, + ), + AnalyticsConfig::CombinedSqlx { sqlx, clickhouse } => Self::CombinedSqlx( + SqlxClient::from_conf(sqlx).await, + ClickhouseClient { + config: Arc::new(clickhouse.clone()), + }, + ), + } + } +} + +#[derive(Clone, Debug, serde::Deserialize)] +#[serde(tag = "source")] +#[serde(rename_all = "lowercase")] +pub enum AnalyticsConfig { + Sqlx { + sqlx: Database, + }, + Clickhouse { + clickhouse: ClickhouseConfig, + }, + CombinedCkh { + sqlx: Database, + clickhouse: ClickhouseConfig, + }, + CombinedSqlx { + sqlx: Database, + clickhouse: ClickhouseConfig, + }, +} + +impl Default for AnalyticsConfig { + fn default() -> Self { + Self::Sqlx { + sqlx: Database::default(), + } + } +} + +#[derive(Clone, Debug, serde::Deserialize, Default, serde::Serialize)] +pub struct ReportConfig { + pub payment_function: String, + pub refund_function: String, + pub dispute_function: String, + pub region: String, +} diff --git a/crates/analytics/src/main.rs b/crates/analytics/src/main.rs new file mode 100644 index 000000000000..5bf256ea9783 --- /dev/null +++ b/crates/analytics/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello world"); +} diff --git a/crates/router/src/analytics/metrics.rs b/crates/analytics/src/metrics.rs similarity index 100% rename from crates/router/src/analytics/metrics.rs rename to crates/analytics/src/metrics.rs diff --git a/crates/router/src/analytics/metrics/request.rs b/crates/analytics/src/metrics/request.rs similarity index 51% rename from crates/router/src/analytics/metrics/request.rs rename to crates/analytics/src/metrics/request.rs index b7c202f2db25..3d1a78808f34 100644 --- a/crates/router/src/analytics/metrics/request.rs +++ b/crates/analytics/src/metrics/request.rs @@ -6,24 +6,20 @@ pub fn add_attributes>( } #[inline] -pub async fn record_operation_time( +pub async fn record_operation_time( future: F, metric: &once_cell::sync::Lazy>, - metric_name: &api_models::analytics::payments::PaymentMetrics, - source: &crate::analytics::AnalyticsProvider, + metric_name: &T, + source: &crate::AnalyticsProvider, ) -> R where F: futures::Future, + T: ToString, { let (result, time) = time_future(future).await; let attributes = &[ add_attributes("metric_name", metric_name.to_string()), - add_attributes( - "source", - match source { - crate::analytics::AnalyticsProvider::Sqlx(_) => "Sqlx", - }, - ), + add_attributes("source", source.to_string()), ]; let value = time.as_secs_f64(); metric.record(&super::CONTEXT, value, attributes); @@ -44,17 +40,3 @@ where let time_spent = start.elapsed(); (result, time_spent) } - -#[macro_export] -macro_rules! histogram_metric { - ($name:ident, $meter:ident) => { - pub(crate) static $name: once_cell::sync::Lazy< - $crate::opentelemetry::metrics::Histogram, - > = once_cell::sync::Lazy::new(|| $meter.u64_histogram(stringify!($name)).init()); - }; - ($name:ident, $meter:ident, $description:literal) => { - pub(crate) static $name: once_cell::sync::Lazy< - $crate::opentelemetry::metrics::Histogram, - > = once_cell::sync::Lazy::new(|| $meter.u64_histogram($description).init()); - }; -} diff --git a/crates/analytics/src/payments.rs b/crates/analytics/src/payments.rs new file mode 100644 index 000000000000..984647172c5b --- /dev/null +++ b/crates/analytics/src/payments.rs @@ -0,0 +1,16 @@ +pub mod accumulator; +mod core; +pub mod distribution; +pub mod filters; +pub mod metrics; +pub mod types; +pub use accumulator::{ + PaymentDistributionAccumulator, PaymentMetricAccumulator, PaymentMetricsAccumulator, +}; + +pub trait PaymentAnalytics: + metrics::PaymentMetricAnalytics + filters::PaymentFilterAnalytics +{ +} + +pub use self::core::{get_filters, get_metrics}; diff --git a/crates/router/src/analytics/payments/accumulator.rs b/crates/analytics/src/payments/accumulator.rs similarity index 62% rename from crates/router/src/analytics/payments/accumulator.rs rename to crates/analytics/src/payments/accumulator.rs index 5eebd0974693..c340f2888f8b 100644 --- a/crates/router/src/analytics/payments/accumulator.rs +++ b/crates/analytics/src/payments/accumulator.rs @@ -1,8 +1,9 @@ -use api_models::analytics::payments::PaymentMetricsBucketValue; -use common_enums::enums as storage_enums; +use api_models::analytics::payments::{ErrorResult, PaymentMetricsBucketValue}; +use bigdecimal::ToPrimitive; +use diesel_models::enums as storage_enums; use router_env::logger; -use super::metrics::PaymentMetricRow; +use super::{distribution::PaymentDistributionRow, metrics::PaymentMetricRow}; #[derive(Debug, Default)] pub struct PaymentMetricsAccumulator { @@ -11,6 +12,22 @@ pub struct PaymentMetricsAccumulator { pub payment_success: CountAccumulator, pub processed_amount: SumAccumulator, pub avg_ticket_size: AverageAccumulator, + pub payment_error_message: ErrorDistributionAccumulator, + pub retries_count: CountAccumulator, + pub retries_amount_processed: SumAccumulator, + pub connector_success_rate: SuccessRateAccumulator, +} + +#[derive(Debug, Default)] +pub struct ErrorDistributionRow { + pub count: i64, + pub total: i64, + pub error_message: String, +} + +#[derive(Debug, Default)] +pub struct ErrorDistributionAccumulator { + pub error_vec: Vec, } #[derive(Debug, Default)] @@ -45,6 +62,51 @@ pub trait PaymentMetricAccumulator { fn collect(self) -> Self::MetricOutput; } +pub trait PaymentDistributionAccumulator { + type DistributionOutput; + + fn add_distribution_bucket(&mut self, distribution: &PaymentDistributionRow); + + fn collect(self) -> Self::DistributionOutput; +} + +impl PaymentDistributionAccumulator for ErrorDistributionAccumulator { + type DistributionOutput = Option>; + + fn add_distribution_bucket(&mut self, distribution: &PaymentDistributionRow) { + self.error_vec.push(ErrorDistributionRow { + count: distribution.count.unwrap_or_default(), + total: distribution + .total + .clone() + .map(|i| i.to_i64().unwrap_or_default()) + .unwrap_or_default(), + error_message: distribution.error_message.clone().unwrap_or("".to_string()), + }) + } + + fn collect(mut self) -> Self::DistributionOutput { + if self.error_vec.is_empty() { + None + } else { + self.error_vec.sort_by(|a, b| b.count.cmp(&a.count)); + let mut res: Vec = Vec::new(); + for val in self.error_vec.into_iter() { + let perc = f64::from(u32::try_from(val.count).ok()?) * 100.0 + / f64::from(u32::try_from(val.total).ok()?); + + res.push(ErrorResult { + reason: val.error_message, + count: val.count, + percentage: (perc * 100.0).round() / 100.0, + }) + } + + Some(res) + } + } +} + impl PaymentMetricAccumulator for SuccessRateAccumulator { type MetricOutput = Option; @@ -145,6 +207,10 @@ impl PaymentMetricsAccumulator { payment_success_count: self.payment_success.collect(), payment_processed_amount: self.processed_amount.collect(), avg_ticket_size: self.avg_ticket_size.collect(), + payment_error_message: self.payment_error_message.collect(), + retries_count: self.retries_count.collect(), + retries_amount_processed: self.retries_amount_processed.collect(), + connector_success_rate: self.connector_success_rate.collect(), } } } diff --git a/crates/analytics/src/payments/core.rs b/crates/analytics/src/payments/core.rs new file mode 100644 index 000000000000..138e88789327 --- /dev/null +++ b/crates/analytics/src/payments/core.rs @@ -0,0 +1,303 @@ +#![allow(dead_code)] +use std::collections::HashMap; + +use api_models::analytics::{ + payments::{ + MetricsBucketResponse, PaymentDimensions, PaymentDistributions, PaymentMetrics, + PaymentMetricsBucketIdentifier, + }, + AnalyticsMetadata, FilterValue, GetPaymentFiltersRequest, GetPaymentMetricRequest, + MetricsResponse, PaymentFiltersResponse, +}; +use common_utils::errors::CustomResult; +use error_stack::{IntoReport, ResultExt}; +use router_env::{ + instrument, logger, + tracing::{self, Instrument}, +}; + +use super::{ + distribution::PaymentDistributionRow, + filters::{get_payment_filter_for_dimension, FilterRow}, + metrics::PaymentMetricRow, + PaymentMetricsAccumulator, +}; +use crate::{ + errors::{AnalyticsError, AnalyticsResult}, + metrics, + payments::{PaymentDistributionAccumulator, PaymentMetricAccumulator}, + AnalyticsProvider, +}; + +#[derive(Debug)] +pub enum TaskType { + MetricTask( + PaymentMetrics, + CustomResult, AnalyticsError>, + ), + DistributionTask( + PaymentDistributions, + CustomResult, AnalyticsError>, + ), +} + +#[instrument(skip_all)] +pub async fn get_metrics( + pool: &AnalyticsProvider, + merchant_id: &str, + req: GetPaymentMetricRequest, +) -> AnalyticsResult> { + let mut metrics_accumulator: HashMap< + PaymentMetricsBucketIdentifier, + PaymentMetricsAccumulator, + > = HashMap::new(); + + let mut set = tokio::task::JoinSet::new(); + for metric_type in req.metrics.iter().cloned() { + let req = req.clone(); + let pool = pool.clone(); + let task_span = tracing::debug_span!( + "analytics_payments_metrics_query", + payment_metric = metric_type.as_ref() + ); + + // TODO: lifetime issues with joinset, + // can be optimized away if joinset lifetime requirements are relaxed + let merchant_id_scoped = merchant_id.to_owned(); + set.spawn( + async move { + let data = pool + .get_payment_metrics( + &metric_type, + &req.group_by_names.clone(), + &merchant_id_scoped, + &req.filters, + &req.time_series.map(|t| t.granularity), + &req.time_range, + ) + .await + .change_context(AnalyticsError::UnknownError); + TaskType::MetricTask(metric_type, data) + } + .instrument(task_span), + ); + } + + if let Some(distribution) = req.clone().distribution { + let req = req.clone(); + let pool = pool.clone(); + let task_span = tracing::debug_span!( + "analytics_payments_distribution_query", + payment_distribution = distribution.distribution_for.as_ref() + ); + + let merchant_id_scoped = merchant_id.to_owned(); + set.spawn( + async move { + let data = pool + .get_payment_distribution( + &distribution, + &req.group_by_names.clone(), + &merchant_id_scoped, + &req.filters, + &req.time_series.map(|t| t.granularity), + &req.time_range, + ) + .await + .change_context(AnalyticsError::UnknownError); + TaskType::DistributionTask(distribution.distribution_for, data) + } + .instrument(task_span), + ); + } + + while let Some(task_type) = set + .join_next() + .await + .transpose() + .into_report() + .change_context(AnalyticsError::UnknownError)? + { + match task_type { + TaskType::MetricTask(metric, data) => { + let data = data?; + let attributes = &[ + metrics::request::add_attributes("metric_type", metric.to_string()), + metrics::request::add_attributes("source", pool.to_string()), + ]; + + let value = u64::try_from(data.len()); + if let Ok(val) = value { + metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes); + logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); + } + + for (id, value) in data { + logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}"); + let metrics_builder = metrics_accumulator.entry(id).or_default(); + match metric { + PaymentMetrics::PaymentSuccessRate => metrics_builder + .payment_success_rate + .add_metrics_bucket(&value), + PaymentMetrics::PaymentCount => { + metrics_builder.payment_count.add_metrics_bucket(&value) + } + PaymentMetrics::PaymentSuccessCount => { + metrics_builder.payment_success.add_metrics_bucket(&value) + } + PaymentMetrics::PaymentProcessedAmount => { + metrics_builder.processed_amount.add_metrics_bucket(&value) + } + PaymentMetrics::AvgTicketSize => { + metrics_builder.avg_ticket_size.add_metrics_bucket(&value) + } + PaymentMetrics::RetriesCount => { + metrics_builder.retries_count.add_metrics_bucket(&value); + metrics_builder + .retries_amount_processed + .add_metrics_bucket(&value) + } + PaymentMetrics::ConnectorSuccessRate => { + metrics_builder + .connector_success_rate + .add_metrics_bucket(&value); + } + } + } + + logger::debug!( + "Analytics Accumulated Results: metric: {}, results: {:#?}", + metric, + metrics_accumulator + ); + } + TaskType::DistributionTask(distribution, data) => { + let data = data?; + let attributes = &[ + metrics::request::add_attributes("distribution_type", distribution.to_string()), + metrics::request::add_attributes("source", pool.to_string()), + ]; + + let value = u64::try_from(data.len()); + if let Ok(val) = value { + metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes); + logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); + } + + for (id, value) in data { + logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for distribution {distribution}"); + let metrics_accumulator = metrics_accumulator.entry(id).or_default(); + match distribution { + PaymentDistributions::PaymentErrorMessage => metrics_accumulator + .payment_error_message + .add_distribution_bucket(&value), + } + } + + logger::debug!( + "Analytics Accumulated Results: distribution: {}, results: {:#?}", + distribution, + metrics_accumulator + ); + } + } + } + + let query_data: Vec = metrics_accumulator + .into_iter() + .map(|(id, val)| MetricsBucketResponse { + values: val.collect(), + dimensions: id, + }) + .collect(); + + Ok(MetricsResponse { + query_data, + meta_data: [AnalyticsMetadata { + current_time_range: req.time_range, + }], + }) +} + +pub async fn get_filters( + pool: &AnalyticsProvider, + req: GetPaymentFiltersRequest, + merchant_id: &String, +) -> AnalyticsResult { + let mut res = PaymentFiltersResponse::default(); + + for dim in req.group_by_names { + let values = match pool { + AnalyticsProvider::Sqlx(pool) => { + get_payment_filter_for_dimension(dim, merchant_id, &req.time_range, pool) + .await + } + AnalyticsProvider::Clickhouse(pool) => { + get_payment_filter_for_dimension(dim, merchant_id, &req.time_range, pool) + .await + } + AnalyticsProvider::CombinedCkh(sqlx_poll, ckh_pool) => { + let ckh_result = get_payment_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + ckh_pool, + ) + .await; + let sqlx_result = get_payment_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + sqlx_poll, + ) + .await; + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics filters") + }, + _ => {} + }; + ckh_result + } + AnalyticsProvider::CombinedSqlx(sqlx_poll, ckh_pool) => { + let ckh_result = get_payment_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + ckh_pool, + ) + .await; + let sqlx_result = get_payment_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + sqlx_poll, + ) + .await; + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics filters") + }, + _ => {} + }; + sqlx_result + } + } + .change_context(AnalyticsError::UnknownError)? + .into_iter() + .filter_map(|fil: FilterRow| match dim { + PaymentDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()), + PaymentDimensions::PaymentStatus => fil.status.map(|i| i.as_ref().to_string()), + PaymentDimensions::Connector => fil.connector, + PaymentDimensions::AuthType => fil.authentication_type.map(|i| i.as_ref().to_string()), + PaymentDimensions::PaymentMethod => fil.payment_method, + PaymentDimensions::PaymentMethodType => fil.payment_method_type, + }) + .collect::>(); + res.query_data.push(FilterValue { + dimension: dim, + values, + }) + } + Ok(res) +} diff --git a/crates/analytics/src/payments/distribution.rs b/crates/analytics/src/payments/distribution.rs new file mode 100644 index 000000000000..cf18c26310a7 --- /dev/null +++ b/crates/analytics/src/payments/distribution.rs @@ -0,0 +1,92 @@ +use api_models::analytics::{ + payments::{ + PaymentDimensions, PaymentDistributions, PaymentFilters, PaymentMetricsBucketIdentifier, + }, + Distribution, Granularity, TimeRange, +}; +use diesel_models::enums as storage_enums; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, +}; + +mod payment_error_message; + +use payment_error_message::PaymentErrorMessage; + +#[derive(Debug, PartialEq, Eq, serde::Deserialize)] +pub struct PaymentDistributionRow { + pub currency: Option>, + pub status: Option>, + pub connector: Option, + pub authentication_type: Option>, + pub payment_method: Option, + pub payment_method_type: Option, + pub total: Option, + pub count: Option, + pub error_message: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub start_bucket: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] + pub end_bucket: Option, +} + +pub trait PaymentDistributionAnalytics: LoadRow {} + +#[async_trait::async_trait] +pub trait PaymentDistribution +where + T: AnalyticsDataSource + PaymentDistributionAnalytics, +{ + #[allow(clippy::too_many_arguments)] + async fn load_distribution( + &self, + distribution: &Distribution, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult>; +} + +#[async_trait::async_trait] +impl PaymentDistribution for PaymentDistributions +where + T: AnalyticsDataSource + PaymentDistributionAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_distribution( + &self, + distribution: &Distribution, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + match self { + Self::PaymentErrorMessage => { + PaymentErrorMessage + .load_distribution( + distribution, + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + } + } +} diff --git a/crates/analytics/src/payments/distribution/payment_error_message.rs b/crates/analytics/src/payments/distribution/payment_error_message.rs new file mode 100644 index 000000000000..c70fc09aeac4 --- /dev/null +++ b/crates/analytics/src/payments/distribution/payment_error_message.rs @@ -0,0 +1,176 @@ +use api_models::analytics::{ + payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, + Distribution, Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::{PaymentDistribution, PaymentDistributionRow}; +use crate::{ + query::{ + Aggregate, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, + }, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentErrorMessage; + +#[async_trait::async_trait] +impl PaymentDistribution for PaymentErrorMessage +where + T: AnalyticsDataSource + super::PaymentDistributionAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_distribution( + &self, + distribution: &Distribution, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::Payment); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(&distribution.distribution_for) + .switch()?; + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + query_builder + .add_group_by_clause(&distribution.distribution_for) + .attach_printable("Error grouping by distribution_for") + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .add_filter_clause( + PaymentDimensions::PaymentStatus, + storage_enums::AttemptStatus::Failure, + ) + .switch()?; + + for dim in dimensions.iter() { + query_builder.add_outer_select_column(dim).switch()?; + } + + query_builder + .add_outer_select_column(&distribution.distribution_for) + .switch()?; + query_builder.add_outer_select_column("count").switch()?; + query_builder + .add_outer_select_column("start_bucket") + .switch()?; + query_builder + .add_outer_select_column("end_bucket") + .switch()?; + let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?; + + query_builder + .add_outer_select_column(Window::Sum { + field: "count", + partition_by: Some(sql_dimensions), + order_by: None, + alias: Some("total"), + }) + .switch()?; + + query_builder + .add_top_n_clause( + dimensions, + distribution.distribution_cardinality.into(), + "count", + Order::Descending, + ) + .switch()?; + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + PaymentMetricsBucketIdentifier::new( + i.currency.as_ref().map(|i| i.0), + i.status.as_ref().map(|i| i.0), + i.connector.clone(), + i.authentication_type.as_ref().map(|i| i.0), + i.payment_method.clone(), + i.payment_method_type.clone(), + TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }, + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/router/src/analytics/payments/filters.rs b/crates/analytics/src/payments/filters.rs similarity index 87% rename from crates/router/src/analytics/payments/filters.rs rename to crates/analytics/src/payments/filters.rs index f009aaa76329..6c165f78a8e4 100644 --- a/crates/router/src/analytics/payments/filters.rs +++ b/crates/analytics/src/payments/filters.rs @@ -1,11 +1,11 @@ use api_models::analytics::{payments::PaymentDimensions, Granularity, TimeRange}; -use common_enums::enums::{AttemptStatus, AuthenticationType, Currency}; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums::{AttemptStatus, AuthenticationType, Currency}; use error_stack::ResultExt; use time::PrimitiveDateTime; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, @@ -26,6 +26,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::Payment); @@ -48,11 +49,12 @@ where .change_context(FiltersError::QueryExecutionFailure) } -#[derive(Debug, serde::Serialize, Eq, PartialEq)] +#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct FilterRow { pub currency: Option>, pub status: Option>, pub connector: Option, pub authentication_type: Option>, pub payment_method: Option, + pub payment_method_type: Option, } diff --git a/crates/router/src/analytics/payments/metrics.rs b/crates/analytics/src/payments/metrics.rs similarity index 76% rename from crates/router/src/analytics/payments/metrics.rs rename to crates/analytics/src/payments/metrics.rs index f492e5bd4df9..6fe6b6260d48 100644 --- a/crates/router/src/analytics/payments/metrics.rs +++ b/crates/analytics/src/payments/metrics.rs @@ -2,36 +2,44 @@ use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetrics, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; +use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; -use crate::analytics::{ - query::{Aggregate, GroupByClause, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; mod avg_ticket_size; +mod connector_success_rate; mod payment_count; mod payment_processed_amount; mod payment_success_count; +mod retries_count; mod success_rate; use avg_ticket_size::AvgTicketSize; +use connector_success_rate::ConnectorSuccessRate; use payment_count::PaymentCount; use payment_processed_amount::PaymentProcessedAmount; use payment_success_count::PaymentSuccessCount; use success_rate::PaymentSuccessRate; -#[derive(Debug, PartialEq, Eq)] +use self::retries_count::RetriesCount; + +#[derive(Debug, PartialEq, Eq, serde::Deserialize)] pub struct PaymentMetricRow { pub currency: Option>, pub status: Option>, pub connector: Option, pub authentication_type: Option>, pub payment_method: Option, + pub payment_method_type: Option, pub total: Option, pub count: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option, } @@ -61,6 +69,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -132,6 +141,30 @@ where ) .await } + Self::RetriesCount => { + RetriesCount + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::ConnectorSuccessRate => { + ConnectorSuccessRate + .load_metrics( + dimensions, + merchant_id, + filters, + granularity, + time_range, + pool, + ) + .await + } } } } diff --git a/crates/router/src/analytics/payments/metrics/avg_ticket_size.rs b/crates/analytics/src/payments/metrics/avg_ticket_size.rs similarity index 90% rename from crates/router/src/analytics/payments/metrics/avg_ticket_size.rs rename to crates/analytics/src/payments/metrics/avg_ticket_size.rs index 2230d870e68a..9475d5288a64 100644 --- a/crates/router/src/analytics/payments/metrics/avg_ticket_size.rs +++ b/crates/analytics/src/payments/metrics/avg_ticket_size.rs @@ -3,12 +3,13 @@ use api_models::analytics::{ Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::{PaymentMetric, PaymentMetricRow}; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -23,6 +24,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -89,6 +91,13 @@ where .switch()?; } + query_builder + .add_filter_clause( + PaymentDimensions::PaymentStatus, + storage_enums::AttemptStatus::Charged, + ) + .switch()?; + query_builder .execute_query::(pool) .await @@ -103,6 +112,7 @@ where i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), + i.payment_method_type.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, @@ -119,7 +129,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/analytics/src/payments/metrics/connector_success_rate.rs b/crates/analytics/src/payments/metrics/connector_success_rate.rs new file mode 100644 index 000000000000..0c4d19b2e0ba --- /dev/null +++ b/crates/analytics/src/payments/metrics/connector_success_rate.rs @@ -0,0 +1,130 @@ +use api_models::analytics::{ + payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::PaymentMetricRow; +use crate::{ + query::{ + Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, + Window, + }, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct ConnectorSuccessRate; + +#[async_trait::async_trait] +impl super::PaymentMetric for ConnectorSuccessRate +where + T: AnalyticsDataSource + super::PaymentMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[PaymentDimensions], + merchant_id: &str, + filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::Payment); + let mut dimensions = dimensions.to_vec(); + + dimensions.push(PaymentDimensions::PaymentStatus); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + query_builder + .add_custom_filter_clause(PaymentDimensions::Connector, "NULL", FilterTypes::IsNotNull) + .switch()?; + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + PaymentMetricsBucketIdentifier::new( + i.currency.as_ref().map(|i| i.0), + None, + i.connector.clone(), + i.authentication_type.as_ref().map(|i| i.0), + i.payment_method.clone(), + i.payment_method_type.clone(), + TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }, + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/router/src/analytics/payments/metrics/payment_count.rs b/crates/analytics/src/payments/metrics/payment_count.rs similarity index 94% rename from crates/router/src/analytics/payments/metrics/payment_count.rs rename to crates/analytics/src/payments/metrics/payment_count.rs index 661cec3dac36..34e71f3da6fb 100644 --- a/crates/router/src/analytics/payments/metrics/payment_count.rs +++ b/crates/analytics/src/payments/metrics/payment_count.rs @@ -7,8 +7,8 @@ use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -23,6 +23,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -97,6 +98,7 @@ where i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), + i.payment_method_type.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, @@ -111,7 +113,7 @@ where i, )) }) - .collect::, crate::analytics::query::PostProcessingError>>() + .collect::, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } } diff --git a/crates/router/src/analytics/payments/metrics/payment_processed_amount.rs b/crates/analytics/src/payments/metrics/payment_processed_amount.rs similarity index 94% rename from crates/router/src/analytics/payments/metrics/payment_processed_amount.rs rename to crates/analytics/src/payments/metrics/payment_processed_amount.rs index 2ec0c6f18f9c..f2dbf97e0db9 100644 --- a/crates/router/src/analytics/payments/metrics/payment_processed_amount.rs +++ b/crates/analytics/src/payments/metrics/payment_processed_amount.rs @@ -2,14 +2,14 @@ use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -24,6 +24,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -105,6 +106,7 @@ where i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), + i.payment_method_type.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, @@ -121,7 +123,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/router/src/analytics/payments/metrics/payment_success_count.rs b/crates/analytics/src/payments/metrics/payment_success_count.rs similarity index 94% rename from crates/router/src/analytics/payments/metrics/payment_success_count.rs rename to crates/analytics/src/payments/metrics/payment_success_count.rs index 8245fe7aeb88..a6fb8ed2239d 100644 --- a/crates/router/src/analytics/payments/metrics/payment_success_count.rs +++ b/crates/analytics/src/payments/metrics/payment_success_count.rs @@ -2,14 +2,14 @@ use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -24,6 +24,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -104,6 +105,7 @@ where i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), + i.payment_method_type.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, @@ -120,7 +122,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/analytics/src/payments/metrics/retries_count.rs b/crates/analytics/src/payments/metrics/retries_count.rs new file mode 100644 index 000000000000..91952adb569a --- /dev/null +++ b/crates/analytics/src/payments/metrics/retries_count.rs @@ -0,0 +1,122 @@ +use api_models::analytics::{ + payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::PaymentMetricRow; +use crate::{ + query::{ + Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, + Window, + }, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct RetriesCount; + +#[async_trait::async_trait] +impl super::PaymentMetric for RetriesCount +where + T: AnalyticsDataSource + super::PaymentMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + _dimensions: &[PaymentDimensions], + merchant_id: &str, + _filters: &PaymentFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = + QueryBuilder::new(AnalyticsCollection::PaymentIntent); + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Sum { + field: "amount", + alias: Some("total"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Min { + field: "created_at", + alias: Some("start_bucket"), + }) + .switch()?; + query_builder + .add_select_column(Aggregate::Max { + field: "created_at", + alias: Some("end_bucket"), + }) + .switch()?; + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + query_builder + .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) + .switch()?; + query_builder + .add_custom_filter_clause("status", "succeeded", FilterTypes::Equal) + .switch()?; + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + granularity + .set_group_by_clause(&mut query_builder) + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + PaymentMetricsBucketIdentifier::new( + i.currency.as_ref().map(|i| i.0), + None, + i.connector.clone(), + i.authentication_type.as_ref().map(|i| i.0), + i.payment_method.clone(), + i.payment_method_type.clone(), + TimeRange { + start_time: match (granularity, i.start_bucket) { + (Some(g), Some(st)) => g.clip_to_start(st)?, + _ => time_range.start_time, + }, + end_time: granularity.as_ref().map_or_else( + || Ok(time_range.end_time), + |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), + )?, + }, + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/router/src/analytics/payments/metrics/success_rate.rs b/crates/analytics/src/payments/metrics/success_rate.rs similarity index 95% rename from crates/router/src/analytics/payments/metrics/success_rate.rs rename to crates/analytics/src/payments/metrics/success_rate.rs index c63956d4b157..9e688240ddbf 100644 --- a/crates/router/src/analytics/payments/metrics/success_rate.rs +++ b/crates/analytics/src/payments/metrics/success_rate.rs @@ -7,8 +7,8 @@ use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -23,6 +23,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -100,6 +101,7 @@ where i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), + i.payment_method_type.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, @@ -116,7 +118,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/router/src/analytics/payments/types.rs b/crates/analytics/src/payments/types.rs similarity index 82% rename from crates/router/src/analytics/payments/types.rs rename to crates/analytics/src/payments/types.rs index fdfbedef383d..d5d8eca13e58 100644 --- a/crates/router/src/analytics/payments/types.rs +++ b/crates/analytics/src/payments/types.rs @@ -1,7 +1,7 @@ use api_models::analytics::payments::{PaymentDimensions, PaymentFilters}; use error_stack::ResultExt; -use crate::analytics::{ +use crate::{ query::{QueryBuilder, QueryFilter, QueryResult, ToSql}, types::{AnalyticsCollection, AnalyticsDataSource}, }; @@ -41,6 +41,15 @@ where .add_filter_in_range_clause(PaymentDimensions::PaymentMethod, &self.payment_method) .attach_printable("Error adding payment method filter")?; } + + if !self.payment_method_type.is_empty() { + builder + .add_filter_in_range_clause( + PaymentDimensions::PaymentMethodType, + &self.payment_method_type, + ) + .attach_printable("Error adding payment method filter")?; + } Ok(()) } } diff --git a/crates/router/src/analytics/query.rs b/crates/analytics/src/query.rs similarity index 65% rename from crates/router/src/analytics/query.rs rename to crates/analytics/src/query.rs index b1f621d8153d..b924987f004c 100644 --- a/crates/router/src/analytics/query.rs +++ b/crates/analytics/src/query.rs @@ -1,26 +1,26 @@ -#![allow(dead_code)] use std::marker::PhantomData; use api_models::{ analytics::{ self as analytics_api, - payments::PaymentDimensions, + api_event::ApiEventDimensions, + payments::{PaymentDimensions, PaymentDistributions}, refunds::{RefundDimensions, RefundType}, + sdk_events::{SdkEventDimensions, SdkEventNames}, Granularity, }, - enums::Connector, + enums::{ + AttemptStatus, AuthenticationType, Connector, Currency, PaymentMethod, PaymentMethodType, + }, refunds::RefundStatus, }; -use common_enums::{ - enums as storage_enums, - enums::{AttemptStatus, AuthenticationType, Currency, PaymentMethod}, -}; use common_utils::errors::{CustomResult, ParsingError}; +use diesel_models::enums as storage_enums; use error_stack::{IntoReport, ResultExt}; -use router_env::logger; +use router_env::{logger, Flow}; -use super::types::{AnalyticsCollection, AnalyticsDataSource, LoadRow}; -use crate::analytics::types::QueryExecutionError; +use super::types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, TableEngine}; +use crate::types::QueryExecutionError; pub type QueryResult = error_stack::Result; pub trait QueryFilter where @@ -89,12 +89,12 @@ impl GroupByClause for Granularity { let granularity_divisor = self.get_bucket_size(); builder - .add_group_by_clause(format!("DATE_TRUNC('{trunc_scale}', modified_at)")) + .add_group_by_clause(format!("DATE_TRUNC('{trunc_scale}', created_at)")) .attach_printable("Error adding time prune group by")?; if let Some(scale) = granularity_bucket_scale { builder .add_group_by_clause(format!( - "FLOOR(DATE_PART('{scale}', modified_at)/{granularity_divisor})" + "FLOOR(DATE_PART('{scale}', created_at)/{granularity_divisor})" )) .attach_printable("Error adding time binning group by")?; } @@ -102,6 +102,26 @@ impl GroupByClause for Granularity { } } +impl GroupByClause for Granularity { + fn set_group_by_clause( + &self, + builder: &mut QueryBuilder, + ) -> QueryResult<()> { + let interval = match self { + Self::OneMin => "toStartOfMinute(created_at)", + Self::FiveMin => "toStartOfFiveMinutes(created_at)", + Self::FifteenMin => "toStartOfFifteenMinutes(created_at)", + Self::ThirtyMin => "toStartOfInterval(created_at, INTERVAL 30 minute)", + Self::OneHour => "toStartOfHour(created_at)", + Self::OneDay => "toStartOfDay(created_at)", + }; + + builder + .add_group_by_clause(interval) + .attach_printable("Error adding interval group by") + } +} + #[derive(strum::Display)] #[strum(serialize_all = "lowercase")] pub enum TimeGranularityLevel { @@ -229,6 +249,76 @@ pub enum Aggregate { }, } +// Window functions in query +// --- +// Description - +// field: to_sql type value used as expr in aggregation +// partition_by: partition by fields in window +// order_by: order by fields and order (Ascending / Descending) in window +// alias: alias of window expr in query +// --- +// Usage - +// Window::Sum { +// field: "count", +// partition_by: Some(query_builder.transform_to_sql_values(&dimensions).switch()?), +// order_by: Some(("value", Descending)), +// alias: Some("total"), +// } +#[derive(Debug)] +pub enum Window { + Sum { + field: R, + partition_by: Option, + order_by: Option<(String, Order)>, + alias: Option<&'static str>, + }, + RowNumber { + field: R, + partition_by: Option, + order_by: Option<(String, Order)>, + alias: Option<&'static str>, + }, +} + +#[derive(Debug, Clone, Copy)] +pub enum Order { + Ascending, + Descending, +} + +impl ToString for Order { + fn to_string(&self) -> String { + String::from(match self { + Self::Ascending => "asc", + Self::Descending => "desc", + }) + } +} + +// Select TopN values for a group based on a metric +// --- +// Description - +// columns: Columns in group to select TopN values for +// count: N in TopN +// order_column: metric used to sort and limit TopN +// order: sort order of metric (Ascending / Descending) +// --- +// Usage - +// Use via add_top_n_clause fn of query_builder +// add_top_n_clause( +// &dimensions, +// distribution.distribution_cardinality.into(), +// "count", +// Order::Descending, +// ) +#[derive(Debug)] +pub struct TopN { + pub columns: String, + pub count: u64, + pub order_column: String, + pub order: Order, +} + #[derive(Debug)] pub struct QueryBuilder where @@ -239,13 +329,16 @@ where filters: Vec<(String, FilterTypes, String)>, group_by: Vec, having: Option>, + outer_select: Vec, + top_n: Option, table: AnalyticsCollection, distinct: bool, db_type: PhantomData, + table_engine: TableEngine, } pub trait ToSql { - fn to_sql(&self) -> error_stack::Result; + fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result; } /// Implement `ToSql` on arrays of types that impl `ToString`. @@ -253,7 +346,7 @@ macro_rules! impl_to_sql_for_to_string { ($($type:ty),+) => { $( impl ToSql for $type { - fn to_sql(&self) -> error_stack::Result { + fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result { Ok(self.to_string()) } } @@ -267,8 +360,10 @@ impl_to_sql_for_to_string!( &PaymentDimensions, &RefundDimensions, PaymentDimensions, + &PaymentDistributions, RefundDimensions, PaymentMethod, + PaymentMethodType, AuthenticationType, Connector, AttemptStatus, @@ -276,12 +371,18 @@ impl_to_sql_for_to_string!( storage_enums::RefundStatus, Currency, RefundType, + Flow, &String, &bool, - &u64 + &u64, + u64, + Order ); -#[allow(dead_code)] +impl_to_sql_for_to_string!(&SdkEventDimensions, SdkEventDimensions, SdkEventNames); + +impl_to_sql_for_to_string!(&ApiEventDimensions, ApiEventDimensions); + #[derive(Debug)] pub enum FilterTypes { Equal, @@ -290,6 +391,23 @@ pub enum FilterTypes { Gte, Lte, Gt, + Like, + NotLike, + IsNotNull, +} + +pub fn filter_type_to_sql(l: &String, op: &FilterTypes, r: &String) -> String { + match op { + FilterTypes::EqualBool => format!("{l} = {r}"), + FilterTypes::Equal => format!("{l} = '{r}'"), + FilterTypes::In => format!("{l} IN ({r})"), + FilterTypes::Gte => format!("{l} >= '{r}'"), + FilterTypes::Gt => format!("{l} > {r}"), + FilterTypes::Lte => format!("{l} <= '{r}'"), + FilterTypes::Like => format!("{l} LIKE '%{r}%'"), + FilterTypes::NotLike => format!("{l} NOT LIKE '%{r}%'"), + FilterTypes::IsNotNull => format!("{l} IS NOT NULL"), + } } impl QueryBuilder @@ -303,22 +421,68 @@ where filters: Default::default(), group_by: Default::default(), having: Default::default(), + outer_select: Default::default(), + top_n: Default::default(), table, distinct: Default::default(), db_type: Default::default(), + table_engine: T::get_table_engine(table), } } pub fn add_select_column(&mut self, column: impl ToSql) -> QueryResult<()> { self.columns.push( column - .to_sql() + .to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing select column")?, ); Ok(()) } + pub fn transform_to_sql_values(&mut self, values: &[impl ToSql]) -> QueryResult { + let res = values + .iter() + .map(|i| i.to_sql(&self.table_engine)) + .collect::, ParsingError>>() + .change_context(QueryBuildingError::SqlSerializeError) + .attach_printable("Error serializing range filter value")? + .join(", "); + Ok(res) + } + + pub fn add_top_n_clause( + &mut self, + columns: &[impl ToSql], + count: u64, + order_column: impl ToSql, + order: Order, + ) -> QueryResult<()> + where + Window<&'static str>: ToSql, + { + let partition_by_columns = self.transform_to_sql_values(columns)?; + let order_by_column = order_column + .to_sql(&self.table_engine) + .change_context(QueryBuildingError::SqlSerializeError) + .attach_printable("Error serializing select column")?; + + self.add_outer_select_column(Window::RowNumber { + field: "", + partition_by: Some(partition_by_columns.clone()), + order_by: Some((order_by_column.clone(), order)), + alias: Some("top_n"), + })?; + + self.top_n = Some(TopN { + columns: partition_by_columns, + count, + order_column: order_by_column, + order, + }); + Ok(()) + } + pub fn set_distinct(&mut self) { self.distinct = true } @@ -346,11 +510,11 @@ where comparison: FilterTypes, ) -> QueryResult<()> { self.filters.push(( - lhs.to_sql() + lhs.to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing filter key")?, comparison, - rhs.to_sql() + rhs.to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing filter value")?, )); @@ -366,7 +530,7 @@ where .iter() .map(|i| { // trimming whitespaces from the filter values received in request, to prevent a possibility of an SQL injection - i.to_sql().map(|s| { + i.to_sql(&self.table_engine).map(|s| { let trimmed_str = s.replace(' ', ""); format!("'{trimmed_str}'") }) @@ -381,7 +545,7 @@ where pub fn add_group_by_clause(&mut self, column: impl ToSql) -> QueryResult<()> { self.group_by.push( column - .to_sql() + .to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing group by field")?, ); @@ -406,14 +570,7 @@ where fn get_filter_clause(&self) -> String { self.filters .iter() - .map(|(l, op, r)| match op { - FilterTypes::EqualBool => format!("{l} = {r}"), - FilterTypes::Equal => format!("{l} = '{r}'"), - FilterTypes::In => format!("{l} IN ({r})"), - FilterTypes::Gte => format!("{l} >= '{r}'"), - FilterTypes::Gt => format!("{l} > {r}"), - FilterTypes::Lte => format!("{l} <= '{r}'"), - }) + .map(|(l, op, r)| filter_type_to_sql(l, op, r)) .collect::>() .join(" AND ") } @@ -426,7 +583,10 @@ where self.group_by.join(", ") } - #[allow(dead_code)] + fn get_outer_select_clause(&self) -> String { + self.outer_select.join(", ") + } + pub fn add_having_clause( &mut self, aggregate: Aggregate, @@ -437,11 +597,11 @@ where Aggregate: ToSql, { let aggregate = aggregate - .to_sql() + .to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing having aggregate")?; let value = value - .to_sql() + .to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing having value")?; let entry = (aggregate, filter_type, value); @@ -453,16 +613,20 @@ where Ok(()) } + pub fn add_outer_select_column(&mut self, column: impl ToSql) -> QueryResult<()> { + self.outer_select.push( + column + .to_sql(&self.table_engine) + .change_context(QueryBuildingError::SqlSerializeError) + .attach_printable("Error serializing outer select column")?, + ); + Ok(()) + } + pub fn get_filter_type_clause(&self) -> Option { self.having.as_ref().map(|vec| { vec.iter() - .map(|(l, op, r)| match op { - FilterTypes::Equal | FilterTypes::EqualBool => format!("{l} = {r}"), - FilterTypes::In => format!("{l} IN ({r})"), - FilterTypes::Gte => format!("{l} >= {r}"), - FilterTypes::Lte => format!("{l} < {r}"), - FilterTypes::Gt => format!("{l} > {r}"), - }) + .map(|(l, op, r)| filter_type_to_sql(l, op, r)) .collect::>() .join(" AND ") }) @@ -471,6 +635,7 @@ where pub fn build_query(&mut self) -> QueryResult where Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { if self.columns.is_empty() { Err(QueryBuildingError::InvalidQuery( @@ -491,7 +656,7 @@ where query.push_str( &self .table - .to_sql() + .to_sql(&self.table_engine) .change_context(QueryBuildingError::SqlSerializeError) .attach_printable("Error serializing table value")?, ); @@ -504,6 +669,16 @@ where if !self.group_by.is_empty() { query.push_str(" GROUP BY "); query.push_str(&self.get_group_by_clause()); + if let TableEngine::CollapsingMergeTree { sign } = self.table_engine { + self.add_having_clause( + Aggregate::Count { + field: Some(sign), + alias: None, + }, + FilterTypes::Gte, + "1", + )?; + } } if self.having.is_some() { @@ -512,6 +687,22 @@ where query.push_str(condition.as_str()); } } + + if !self.outer_select.is_empty() { + query.insert_str( + 0, + format!("SELECT {} FROM (", &self.get_outer_select_clause()).as_str(), + ); + query.push_str(") _"); + } + + if let Some(top_n) = &self.top_n { + query.insert_str(0, "SELECT * FROM ("); + query.push_str(format!(") _ WHERE top_n <= {}", top_n.count).as_str()); + } + + println!("{}", query); + Ok(query) } @@ -522,6 +713,7 @@ where where P: LoadRow, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { let query = self .build_query() diff --git a/crates/router/src/analytics/refunds.rs b/crates/analytics/src/refunds.rs similarity index 81% rename from crates/router/src/analytics/refunds.rs rename to crates/analytics/src/refunds.rs index a8b52effe76d..53481e232817 100644 --- a/crates/router/src/analytics/refunds.rs +++ b/crates/analytics/src/refunds.rs @@ -7,4 +7,4 @@ pub mod types; pub use accumulator::{RefundMetricAccumulator, RefundMetricsAccumulator}; pub trait RefundAnalytics: metrics::RefundMetricAnalytics {} -pub use self::core::get_metrics; +pub use self::core::{get_filters, get_metrics}; diff --git a/crates/router/src/analytics/refunds/accumulator.rs b/crates/analytics/src/refunds/accumulator.rs similarity index 98% rename from crates/router/src/analytics/refunds/accumulator.rs rename to crates/analytics/src/refunds/accumulator.rs index 3d0c0e659f6c..9c51defdcf91 100644 --- a/crates/router/src/analytics/refunds/accumulator.rs +++ b/crates/analytics/src/refunds/accumulator.rs @@ -1,5 +1,5 @@ use api_models::analytics::refunds::RefundMetricsBucketValue; -use common_enums::enums as storage_enums; +use diesel_models::enums as storage_enums; use super::metrics::RefundMetricRow; #[derive(Debug, Default)] @@ -15,13 +15,11 @@ pub struct SuccessRateAccumulator { pub success: i64, pub total: i64, } - #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option, } - #[derive(Debug, Default)] #[repr(transparent)] pub struct SumAccumulator { diff --git a/crates/analytics/src/refunds/core.rs b/crates/analytics/src/refunds/core.rs new file mode 100644 index 000000000000..25a1e228f567 --- /dev/null +++ b/crates/analytics/src/refunds/core.rs @@ -0,0 +1,203 @@ +#![allow(dead_code)] +use std::collections::HashMap; + +use api_models::analytics::{ + refunds::{ + RefundDimensions, RefundMetrics, RefundMetricsBucketIdentifier, RefundMetricsBucketResponse, + }, + AnalyticsMetadata, GetRefundFilterRequest, GetRefundMetricRequest, MetricsResponse, + RefundFilterValue, RefundFiltersResponse, +}; +use error_stack::{IntoReport, ResultExt}; +use router_env::{ + logger, + tracing::{self, Instrument}, +}; + +use super::{ + filters::{get_refund_filter_for_dimension, RefundFilterRow}, + RefundMetricsAccumulator, +}; +use crate::{ + errors::{AnalyticsError, AnalyticsResult}, + metrics, + refunds::RefundMetricAccumulator, + AnalyticsProvider, +}; + +pub async fn get_metrics( + pool: &AnalyticsProvider, + merchant_id: &String, + req: GetRefundMetricRequest, +) -> AnalyticsResult> { + let mut metrics_accumulator: HashMap = + HashMap::new(); + let mut set = tokio::task::JoinSet::new(); + for metric_type in req.metrics.iter().cloned() { + let req = req.clone(); + let pool = pool.clone(); + let task_span = tracing::debug_span!( + "analytics_refund_query", + refund_metric = metric_type.as_ref() + ); + // Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference + // We can optimize away this clone once that is fixed + let merchant_id_scoped = merchant_id.to_owned(); + set.spawn( + async move { + let data = pool + .get_refund_metrics( + &metric_type, + &req.group_by_names.clone(), + &merchant_id_scoped, + &req.filters, + &req.time_series.map(|t| t.granularity), + &req.time_range, + ) + .await + .change_context(AnalyticsError::UnknownError); + (metric_type, data) + } + .instrument(task_span), + ); + } + + while let Some((metric, data)) = set + .join_next() + .await + .transpose() + .into_report() + .change_context(AnalyticsError::UnknownError)? + { + let data = data?; + let attributes = &[ + metrics::request::add_attributes("metric_type", metric.to_string()), + metrics::request::add_attributes("source", pool.to_string()), + ]; + + let value = u64::try_from(data.len()); + if let Ok(val) = value { + metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes); + logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); + } + + for (id, value) in data { + logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}"); + let metrics_builder = metrics_accumulator.entry(id).or_default(); + match metric { + RefundMetrics::RefundSuccessRate => metrics_builder + .refund_success_rate + .add_metrics_bucket(&value), + RefundMetrics::RefundCount => { + metrics_builder.refund_count.add_metrics_bucket(&value) + } + RefundMetrics::RefundSuccessCount => { + metrics_builder.refund_success.add_metrics_bucket(&value) + } + RefundMetrics::RefundProcessedAmount => { + metrics_builder.processed_amount.add_metrics_bucket(&value) + } + } + } + + logger::debug!( + "Analytics Accumulated Results: metric: {}, results: {:#?}", + metric, + metrics_accumulator + ); + } + let query_data: Vec = metrics_accumulator + .into_iter() + .map(|(id, val)| RefundMetricsBucketResponse { + values: val.collect(), + dimensions: id, + }) + .collect(); + + Ok(MetricsResponse { + query_data, + meta_data: [AnalyticsMetadata { + current_time_range: req.time_range, + }], + }) +} + +pub async fn get_filters( + pool: &AnalyticsProvider, + req: GetRefundFilterRequest, + merchant_id: &String, +) -> AnalyticsResult { + let mut res = RefundFiltersResponse::default(); + for dim in req.group_by_names { + let values = match pool { + AnalyticsProvider::Sqlx(pool) => { + get_refund_filter_for_dimension(dim, merchant_id, &req.time_range, pool) + .await + } + AnalyticsProvider::Clickhouse(pool) => { + get_refund_filter_for_dimension(dim, merchant_id, &req.time_range, pool) + .await + } + AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => { + let ckh_result = get_refund_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + ckh_pool, + ) + .await; + let sqlx_result = get_refund_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + sqlx_pool, + ) + .await; + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters") + }, + _ => {} + }; + ckh_result + } + AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => { + let ckh_result = get_refund_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + ckh_pool, + ) + .await; + let sqlx_result = get_refund_filter_for_dimension( + dim, + merchant_id, + &req.time_range, + sqlx_pool, + ) + .await; + match (&sqlx_result, &ckh_result) { + (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { + router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters") + }, + _ => {} + }; + sqlx_result + } + } + .change_context(AnalyticsError::UnknownError)? + .into_iter() + .filter_map(|fil: RefundFilterRow| match dim { + RefundDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()), + RefundDimensions::RefundStatus => fil.refund_status.map(|i| i.as_ref().to_string()), + RefundDimensions::Connector => fil.connector, + RefundDimensions::RefundType => fil.refund_type.map(|i| i.as_ref().to_string()), + }) + .collect::>(); + res.query_data.push(RefundFilterValue { + dimension: dim, + values, + }) + } + Ok(res) +} diff --git a/crates/router/src/analytics/refunds/filters.rs b/crates/analytics/src/refunds/filters.rs similarity index 90% rename from crates/router/src/analytics/refunds/filters.rs rename to crates/analytics/src/refunds/filters.rs index 6b45e9194fad..29375483eb9a 100644 --- a/crates/router/src/analytics/refunds/filters.rs +++ b/crates/analytics/src/refunds/filters.rs @@ -2,13 +2,13 @@ use api_models::analytics::{ refunds::{RefundDimensions, RefundType}, Granularity, TimeRange, }; -use common_enums::enums::{Currency, RefundStatus}; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums::{Currency, RefundStatus}; use error_stack::ResultExt; use time::PrimitiveDateTime; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, @@ -28,6 +28,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::Refund); @@ -49,8 +50,7 @@ where .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } - -#[derive(Debug, serde::Serialize, Eq, PartialEq)] +#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct RefundFilterRow { pub currency: Option>, pub refund_status: Option>, diff --git a/crates/router/src/analytics/refunds/metrics.rs b/crates/analytics/src/refunds/metrics.rs similarity index 91% rename from crates/router/src/analytics/refunds/metrics.rs rename to crates/analytics/src/refunds/metrics.rs index d4f509b4a1e3..10cd03546772 100644 --- a/crates/router/src/analytics/refunds/metrics.rs +++ b/crates/analytics/src/refunds/metrics.rs @@ -4,7 +4,7 @@ use api_models::analytics::{ }, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; +use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; mod refund_count; mod refund_processed_amount; @@ -15,12 +15,11 @@ use refund_processed_amount::RefundProcessedAmount; use refund_success_count::RefundSuccessCount; use refund_success_rate::RefundSuccessRate; -use crate::analytics::{ - query::{Aggregate, GroupByClause, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; - -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, serde::Deserialize)] pub struct RefundMetricRow { pub currency: Option>, pub refund_status: Option>, @@ -28,7 +27,9 @@ pub struct RefundMetricRow { pub refund_type: Option>, pub total: Option, pub count: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option, + #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option, } @@ -42,6 +43,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -62,6 +64,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, diff --git a/crates/router/src/analytics/refunds/metrics/refund_count.rs b/crates/analytics/src/refunds/metrics/refund_count.rs similarity index 94% rename from crates/router/src/analytics/refunds/metrics/refund_count.rs rename to crates/analytics/src/refunds/metrics/refund_count.rs index 471327235073..cf3c7a509278 100644 --- a/crates/router/src/analytics/refunds/metrics/refund_count.rs +++ b/crates/analytics/src/refunds/metrics/refund_count.rs @@ -7,8 +7,8 @@ use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -23,6 +23,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -93,7 +94,7 @@ where Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), - i.refund_status.as_ref().map(|i| i.0), + i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), TimeRange { @@ -110,7 +111,7 @@ where i, )) }) - .collect::, crate::analytics::query::PostProcessingError>>() + .collect::, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } } diff --git a/crates/router/src/analytics/refunds/metrics/refund_processed_amount.rs b/crates/analytics/src/refunds/metrics/refund_processed_amount.rs similarity index 95% rename from crates/router/src/analytics/refunds/metrics/refund_processed_amount.rs rename to crates/analytics/src/refunds/metrics/refund_processed_amount.rs index c5f3a706aaef..661fca57b282 100644 --- a/crates/router/src/analytics/refunds/metrics/refund_processed_amount.rs +++ b/crates/analytics/src/refunds/metrics/refund_processed_amount.rs @@ -2,14 +2,14 @@ use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] @@ -23,6 +23,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -116,7 +117,7 @@ where i, )) }) - .collect::, crate::analytics::query::PostProcessingError>>() + .collect::, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } } diff --git a/crates/router/src/analytics/refunds/metrics/refund_success_count.rs b/crates/analytics/src/refunds/metrics/refund_success_count.rs similarity index 95% rename from crates/router/src/analytics/refunds/metrics/refund_success_count.rs rename to crates/analytics/src/refunds/metrics/refund_success_count.rs index 0c8032908fd7..bc09d8b7ab64 100644 --- a/crates/router/src/analytics/refunds/metrics/refund_success_count.rs +++ b/crates/analytics/src/refunds/metrics/refund_success_count.rs @@ -2,14 +2,14 @@ use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; -use common_enums::enums as storage_enums; use common_utils::errors::ReportSwitchExt; +use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; @@ -24,6 +24,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -115,7 +116,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/router/src/analytics/refunds/metrics/refund_success_rate.rs b/crates/analytics/src/refunds/metrics/refund_success_rate.rs similarity index 96% rename from crates/router/src/analytics/refunds/metrics/refund_success_rate.rs rename to crates/analytics/src/refunds/metrics/refund_success_rate.rs index 42f9ccf8d3c0..29b73b885d8e 100644 --- a/crates/router/src/analytics/refunds/metrics/refund_success_rate.rs +++ b/crates/analytics/src/refunds/metrics/refund_success_rate.rs @@ -7,8 +7,8 @@ use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; -use crate::analytics::{ - query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql}, +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] @@ -22,6 +22,7 @@ where AnalyticsCollection: ToSql, Granularity: GroupByClause, Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, { async fn load_metrics( &self, @@ -110,7 +111,7 @@ where }) .collect::, - crate::analytics::query::PostProcessingError, + crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } diff --git a/crates/router/src/analytics/refunds/types.rs b/crates/analytics/src/refunds/types.rs similarity index 98% rename from crates/router/src/analytics/refunds/types.rs rename to crates/analytics/src/refunds/types.rs index fbfd69972671..d7d739e1aba7 100644 --- a/crates/router/src/analytics/refunds/types.rs +++ b/crates/analytics/src/refunds/types.rs @@ -1,7 +1,7 @@ use api_models::analytics::refunds::{RefundDimensions, RefundFilters}; use error_stack::ResultExt; -use crate::analytics::{ +use crate::{ query::{QueryBuilder, QueryFilter, QueryResult, ToSql}, types::{AnalyticsCollection, AnalyticsDataSource}, }; diff --git a/crates/analytics/src/sdk_events.rs b/crates/analytics/src/sdk_events.rs new file mode 100644 index 000000000000..fe8af7cfe2df --- /dev/null +++ b/crates/analytics/src/sdk_events.rs @@ -0,0 +1,14 @@ +pub mod accumulator; +mod core; +pub mod events; +pub mod filters; +pub mod metrics; +pub mod types; +pub use accumulator::{SdkEventMetricAccumulator, SdkEventMetricsAccumulator}; +pub trait SDKEventAnalytics: events::SdkEventsFilterAnalytics {} +pub trait SdkEventAnalytics: + metrics::SdkEventMetricAnalytics + filters::SdkEventFilterAnalytics +{ +} + +pub use self::core::{get_filters, get_metrics, sdk_events_core}; diff --git a/crates/analytics/src/sdk_events/accumulator.rs b/crates/analytics/src/sdk_events/accumulator.rs new file mode 100644 index 000000000000..ab9e9309434f --- /dev/null +++ b/crates/analytics/src/sdk_events/accumulator.rs @@ -0,0 +1,98 @@ +use api_models::analytics::sdk_events::SdkEventMetricsBucketValue; +use router_env::logger; + +use super::metrics::SdkEventMetricRow; + +#[derive(Debug, Default)] +pub struct SdkEventMetricsAccumulator { + pub payment_attempts: CountAccumulator, + pub payment_success: CountAccumulator, + pub payment_methods_call_count: CountAccumulator, + pub average_payment_time: AverageAccumulator, + pub sdk_initiated_count: CountAccumulator, + pub sdk_rendered_count: CountAccumulator, + pub payment_method_selected_count: CountAccumulator, + pub payment_data_filled_count: CountAccumulator, +} + +#[derive(Debug, Default)] +#[repr(transparent)] +pub struct CountAccumulator { + pub count: Option, +} + +#[derive(Debug, Default)] +pub struct AverageAccumulator { + pub total: u32, + pub count: u32, +} + +pub trait SdkEventMetricAccumulator { + type MetricOutput; + + fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow); + + fn collect(self) -> Self::MetricOutput; +} + +impl SdkEventMetricAccumulator for CountAccumulator { + type MetricOutput = Option; + #[inline] + fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) { + self.count = match (self.count, metrics.count) { + (None, None) => None, + (None, i @ Some(_)) | (i @ Some(_), None) => i, + (Some(a), Some(b)) => Some(a + b), + } + } + #[inline] + fn collect(self) -> Self::MetricOutput { + self.count.and_then(|i| u64::try_from(i).ok()) + } +} + +impl SdkEventMetricAccumulator for AverageAccumulator { + type MetricOutput = Option; + + fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) { + let total = metrics + .total + .as_ref() + .and_then(bigdecimal::ToPrimitive::to_u32); + let count = metrics.count.and_then(|total| u32::try_from(total).ok()); + + match (total, count) { + (Some(total), Some(count)) => { + self.total += total; + self.count += count; + } + _ => { + logger::error!(message="Dropping metrics for average accumulator", metric=?metrics); + } + } + } + + fn collect(self) -> Self::MetricOutput { + if self.count == 0 { + None + } else { + Some(f64::from(self.total) / f64::from(self.count)) + } + } +} + +impl SdkEventMetricsAccumulator { + #[allow(dead_code)] + pub fn collect(self) -> SdkEventMetricsBucketValue { + SdkEventMetricsBucketValue { + payment_attempts: self.payment_attempts.collect(), + payment_success_count: self.payment_success.collect(), + payment_methods_call_count: self.payment_methods_call_count.collect(), + average_payment_time: self.average_payment_time.collect(), + sdk_initiated_count: self.sdk_initiated_count.collect(), + sdk_rendered_count: self.sdk_rendered_count.collect(), + payment_method_selected_count: self.payment_method_selected_count.collect(), + payment_data_filled_count: self.payment_data_filled_count.collect(), + } + } +} diff --git a/crates/analytics/src/sdk_events/core.rs b/crates/analytics/src/sdk_events/core.rs new file mode 100644 index 000000000000..34f23c745b05 --- /dev/null +++ b/crates/analytics/src/sdk_events/core.rs @@ -0,0 +1,201 @@ +use std::collections::HashMap; + +use api_models::analytics::{ + sdk_events::{ + MetricsBucketResponse, SdkEventMetrics, SdkEventMetricsBucketIdentifier, SdkEventsRequest, + }, + AnalyticsMetadata, GetSdkEventFiltersRequest, GetSdkEventMetricRequest, MetricsResponse, + SdkEventFiltersResponse, +}; +use error_stack::{IntoReport, ResultExt}; +use router_env::{instrument, logger, tracing}; + +use super::{ + events::{get_sdk_event, SdkEventsResult}, + SdkEventMetricsAccumulator, +}; +use crate::{ + errors::{AnalyticsError, AnalyticsResult}, + sdk_events::SdkEventMetricAccumulator, + types::FiltersError, + AnalyticsProvider, +}; + +#[instrument(skip_all)] +pub async fn sdk_events_core( + pool: &AnalyticsProvider, + req: SdkEventsRequest, + publishable_key: String, +) -> AnalyticsResult> { + match pool { + AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented) + .into_report() + .attach_printable("SQL Analytics is not implemented for Sdk Events"), + AnalyticsProvider::Clickhouse(pool) => get_sdk_event(&publishable_key, req, pool).await, + AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool) + | AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => { + get_sdk_event(&publishable_key, req, ckh_pool).await + } + } + .change_context(AnalyticsError::UnknownError) +} + +#[instrument(skip_all)] +pub async fn get_metrics( + pool: &AnalyticsProvider, + publishable_key: Option<&String>, + req: GetSdkEventMetricRequest, +) -> AnalyticsResult> { + let mut metrics_accumulator: HashMap< + SdkEventMetricsBucketIdentifier, + SdkEventMetricsAccumulator, + > = HashMap::new(); + + if let Some(publishable_key) = publishable_key { + let mut set = tokio::task::JoinSet::new(); + for metric_type in req.metrics.iter().cloned() { + let req = req.clone(); + let publishable_key_scoped = publishable_key.to_owned(); + let pool = pool.clone(); + set.spawn(async move { + let data = pool + .get_sdk_event_metrics( + &metric_type, + &req.group_by_names.clone(), + &publishable_key_scoped, + &req.filters, + &req.time_series.map(|t| t.granularity), + &req.time_range, + ) + .await + .change_context(AnalyticsError::UnknownError); + (metric_type, data) + }); + } + + while let Some((metric, data)) = set + .join_next() + .await + .transpose() + .into_report() + .change_context(AnalyticsError::UnknownError)? + { + logger::info!("Logging Result {:?}", data); + for (id, value) in data? { + let metrics_builder = metrics_accumulator.entry(id).or_default(); + match metric { + SdkEventMetrics::PaymentAttempts => { + metrics_builder.payment_attempts.add_metrics_bucket(&value) + } + SdkEventMetrics::PaymentSuccessCount => { + metrics_builder.payment_success.add_metrics_bucket(&value) + } + SdkEventMetrics::PaymentMethodsCallCount => metrics_builder + .payment_methods_call_count + .add_metrics_bucket(&value), + SdkEventMetrics::SdkRenderedCount => metrics_builder + .sdk_rendered_count + .add_metrics_bucket(&value), + SdkEventMetrics::SdkInitiatedCount => metrics_builder + .sdk_initiated_count + .add_metrics_bucket(&value), + SdkEventMetrics::PaymentMethodSelectedCount => metrics_builder + .payment_method_selected_count + .add_metrics_bucket(&value), + SdkEventMetrics::PaymentDataFilledCount => metrics_builder + .payment_data_filled_count + .add_metrics_bucket(&value), + SdkEventMetrics::AveragePaymentTime => metrics_builder + .average_payment_time + .add_metrics_bucket(&value), + } + } + + logger::debug!( + "Analytics Accumulated Results: metric: {}, results: {:#?}", + metric, + metrics_accumulator + ); + } + + let query_data: Vec = metrics_accumulator + .into_iter() + .map(|(id, val)| MetricsBucketResponse { + values: val.collect(), + dimensions: id, + }) + .collect(); + + Ok(MetricsResponse { + query_data, + meta_data: [AnalyticsMetadata { + current_time_range: req.time_range, + }], + }) + } else { + logger::error!("Publishable key not present for merchant ID"); + Ok(MetricsResponse { + query_data: vec![], + meta_data: [AnalyticsMetadata { + current_time_range: req.time_range, + }], + }) + } +} + +#[allow(dead_code)] +pub async fn get_filters( + pool: &AnalyticsProvider, + req: GetSdkEventFiltersRequest, + publishable_key: Option<&String>, +) -> AnalyticsResult { + use api_models::analytics::{sdk_events::SdkEventDimensions, SdkEventFilterValue}; + + use super::filters::get_sdk_event_filter_for_dimension; + use crate::sdk_events::filters::SdkEventFilter; + + let mut res = SdkEventFiltersResponse::default(); + + if let Some(publishable_key) = publishable_key { + for dim in req.group_by_names { + let values = match pool { + AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented) + .into_report() + .attach_printable("SQL Analytics is not implemented for SDK Events"), + AnalyticsProvider::Clickhouse(pool) => { + get_sdk_event_filter_for_dimension(dim, publishable_key, &req.time_range, pool) + .await + } + AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool) + | AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => { + get_sdk_event_filter_for_dimension( + dim, + publishable_key, + &req.time_range, + ckh_pool, + ) + .await + } + } + .change_context(AnalyticsError::UnknownError)? + .into_iter() + .filter_map(|fil: SdkEventFilter| match dim { + SdkEventDimensions::PaymentMethod => fil.payment_method, + SdkEventDimensions::Platform => fil.platform, + SdkEventDimensions::BrowserName => fil.browser_name, + SdkEventDimensions::Source => fil.source, + SdkEventDimensions::Component => fil.component, + SdkEventDimensions::PaymentExperience => fil.payment_experience, + }) + .collect::>(); + res.query_data.push(SdkEventFilterValue { + dimension: dim, + values, + }) + } + } else { + router_env::logger::error!("Publishable key not found for merchant"); + } + + Ok(res) +} diff --git a/crates/analytics/src/sdk_events/events.rs b/crates/analytics/src/sdk_events/events.rs new file mode 100644 index 000000000000..a4d044267e94 --- /dev/null +++ b/crates/analytics/src/sdk_events/events.rs @@ -0,0 +1,80 @@ +use api_models::analytics::{ + sdk_events::{SdkEventNames, SdkEventsRequest}, + Granularity, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use strum::IntoEnumIterator; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, +}; +pub trait SdkEventsFilterAnalytics: LoadRow {} + +pub async fn get_sdk_event( + merchant_id: &str, + request: SdkEventsRequest, + pool: &T, +) -> FiltersResult> +where + T: AnalyticsDataSource + SdkEventsFilterAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + let static_event_list = SdkEventNames::iter() + .map(|i| format!("'{}'", i.as_ref())) + .collect::>() + .join(","); + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + query_builder.add_select_column("*").switch()?; + + query_builder + .add_filter_clause("merchant_id", merchant_id) + .switch()?; + query_builder + .add_filter_clause("payment_id", request.payment_id) + .switch()?; + query_builder + .add_custom_filter_clause("event_name", static_event_list, FilterTypes::In) + .switch()?; + let _ = &request + .time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + //TODO!: update the execute_query function to return reports instead of plain errors... + query_builder + .execute_query::(pool) + .await + .change_context(FiltersError::QueryBuildingError)? + .change_context(FiltersError::QueryExecutionFailure) +} +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct SdkEventsResult { + pub merchant_id: String, + pub payment_id: String, + pub event_name: Option, + pub log_type: Option, + pub first_event: bool, + pub browser_name: Option, + pub browser_version: Option, + pub source: Option, + pub category: Option, + pub version: Option, + pub value: Option, + pub platform: Option, + pub component: Option, + pub payment_method: Option, + pub payment_experience: Option, + pub latency: Option, + #[serde(with = "common_utils::custom_serde::iso8601")] + pub created_at_precise: PrimitiveDateTime, + #[serde(with = "common_utils::custom_serde::iso8601")] + pub created_at: PrimitiveDateTime, +} diff --git a/crates/analytics/src/sdk_events/filters.rs b/crates/analytics/src/sdk_events/filters.rs new file mode 100644 index 000000000000..9963f51ef947 --- /dev/null +++ b/crates/analytics/src/sdk_events/filters.rs @@ -0,0 +1,56 @@ +use api_models::analytics::{sdk_events::SdkEventDimensions, Granularity, TimeRange}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, +}; + +pub trait SdkEventFilterAnalytics: LoadRow {} + +pub async fn get_sdk_event_filter_for_dimension( + dimension: SdkEventDimensions, + publishable_key: &String, + time_range: &TimeRange, + pool: &T, +) -> FiltersResult> +where + T: AnalyticsDataSource + SdkEventFilterAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + + query_builder.add_select_column(dimension).switch()?; + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder.set_distinct(); + + query_builder + .execute_query::(pool) + .await + .change_context(FiltersError::QueryBuildingError)? + .change_context(FiltersError::QueryExecutionFailure) +} + +#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] +pub struct SdkEventFilter { + pub payment_method: Option, + pub platform: Option, + pub browser_name: Option, + pub source: Option, + pub component: Option, + pub payment_experience: Option, +} diff --git a/crates/analytics/src/sdk_events/metrics.rs b/crates/analytics/src/sdk_events/metrics.rs new file mode 100644 index 000000000000..354d2270d18a --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics.rs @@ -0,0 +1,181 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetrics, SdkEventMetricsBucketIdentifier, + }, + Granularity, TimeRange, +}; +use time::PrimitiveDateTime; + +use crate::{ + query::{Aggregate, GroupByClause, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult}, +}; + +mod average_payment_time; +mod payment_attempts; +mod payment_data_filled_count; +mod payment_method_selected_count; +mod payment_methods_call_count; +mod payment_success_count; +mod sdk_initiated_count; +mod sdk_rendered_count; + +use average_payment_time::AveragePaymentTime; +use payment_attempts::PaymentAttempts; +use payment_data_filled_count::PaymentDataFilledCount; +use payment_method_selected_count::PaymentMethodSelectedCount; +use payment_methods_call_count::PaymentMethodsCallCount; +use payment_success_count::PaymentSuccessCount; +use sdk_initiated_count::SdkInitiatedCount; +use sdk_rendered_count::SdkRenderedCount; + +#[derive(Debug, PartialEq, Eq, serde::Deserialize)] +pub struct SdkEventMetricRow { + pub total: Option, + pub count: Option, + pub time_bucket: Option, + pub payment_method: Option, + pub platform: Option, + pub browser_name: Option, + pub source: Option, + pub component: Option, + pub payment_experience: Option, +} + +pub trait SdkEventMetricAnalytics: LoadRow {} + +#[async_trait::async_trait] +pub trait SdkEventMetric +where + T: AnalyticsDataSource + SdkEventMetricAnalytics, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult>; +} + +#[async_trait::async_trait] +impl SdkEventMetric for SdkEventMetrics +where + T: AnalyticsDataSource + SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + match self { + Self::PaymentAttempts => { + PaymentAttempts + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::PaymentSuccessCount => { + PaymentSuccessCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::PaymentMethodsCallCount => { + PaymentMethodsCallCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::SdkRenderedCount => { + SdkRenderedCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::SdkInitiatedCount => { + SdkInitiatedCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::PaymentMethodSelectedCount => { + PaymentMethodSelectedCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::PaymentDataFilledCount => { + PaymentDataFilledCount + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + Self::AveragePaymentTime => { + AveragePaymentTime + .load_metrics( + dimensions, + publishable_key, + filters, + granularity, + time_range, + pool, + ) + .await + } + } + } +} diff --git a/crates/analytics/src/sdk_events/metrics/average_payment_time.rs b/crates/analytics/src/sdk_events/metrics/average_payment_time.rs new file mode 100644 index 000000000000..db7171524ae5 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/average_payment_time.rs @@ -0,0 +1,129 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct AveragePaymentTime; + +#[async_trait::async_trait] +impl super::SdkEventMetric for AveragePaymentTime +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + query_builder + .add_select_column(Aggregate::Sum { + field: "latency", + alias: Some("total"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentAttempt) + .switch()?; + + query_builder + .add_custom_filter_clause("latency", 0, FilterTypes::Gt) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/payment_attempts.rs b/crates/analytics/src/sdk_events/metrics/payment_attempts.rs new file mode 100644 index 000000000000..b2a78188c4f2 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/payment_attempts.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentAttempts; + +#[async_trait::async_trait] +impl super::SdkEventMetric for PaymentAttempts +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentAttempt) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs b/crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs new file mode 100644 index 000000000000..a3c94baeda26 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentDataFilledCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for PaymentDataFilledCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentDataFilled) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs b/crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs new file mode 100644 index 000000000000..11aeac5e6ff9 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentMethodSelectedCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for PaymentMethodSelectedCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentMethodChanged) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs b/crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs new file mode 100644 index 000000000000..7570f1292e5e --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs @@ -0,0 +1,126 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentMethodsCallCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for PaymentMethodsCallCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentMethodsCall) + .switch()?; + + query_builder + .add_filter_clause("log_type", "INFO") + .switch()?; + + query_builder + .add_filter_clause("category", "API") + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/payment_success_count.rs b/crates/analytics/src/sdk_events/metrics/payment_success_count.rs new file mode 100644 index 000000000000..3faf8213632f --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/payment_success_count.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct PaymentSuccessCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for PaymentSuccessCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::PaymentSuccess) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs b/crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs new file mode 100644 index 000000000000..a525e7890b75 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct SdkInitiatedCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for SdkInitiatedCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::StripeElementsCalled) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs b/crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs new file mode 100644 index 000000000000..ed9e776423a8 --- /dev/null +++ b/crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs @@ -0,0 +1,118 @@ +use api_models::analytics::{ + sdk_events::{ + SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, + }, + Granularity, TimeRange, +}; +use common_utils::errors::ReportSwitchExt; +use error_stack::ResultExt; +use time::PrimitiveDateTime; + +use super::SdkEventMetricRow; +use crate::{ + query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, + types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, +}; + +#[derive(Default)] +pub(super) struct SdkRenderedCount; + +#[async_trait::async_trait] +impl super::SdkEventMetric for SdkRenderedCount +where + T: AnalyticsDataSource + super::SdkEventMetricAnalytics, + PrimitiveDateTime: ToSql, + AnalyticsCollection: ToSql, + Granularity: GroupByClause, + Aggregate<&'static str>: ToSql, + Window<&'static str>: ToSql, +{ + async fn load_metrics( + &self, + dimensions: &[SdkEventDimensions], + publishable_key: &str, + filters: &SdkEventFilters, + granularity: &Option, + time_range: &TimeRange, + pool: &T, + ) -> MetricsResult> { + let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::SdkEvents); + let dimensions = dimensions.to_vec(); + + for dim in dimensions.iter() { + query_builder.add_select_column(dim).switch()?; + } + + query_builder + .add_select_column(Aggregate::Count { + field: None, + alias: Some("count"), + }) + .switch()?; + + if let Some(granularity) = granularity.as_ref() { + query_builder + .add_granularity_in_mins(granularity) + .switch()?; + } + + filters.set_filter_clause(&mut query_builder).switch()?; + + query_builder + .add_filter_clause("merchant_id", publishable_key) + .switch()?; + + query_builder + .add_bool_filter_clause("first_event", 1) + .switch()?; + + query_builder + .add_filter_clause("event_name", SdkEventNames::AppRendered) + .switch()?; + + time_range + .set_filter_clause(&mut query_builder) + .attach_printable("Error filtering time range") + .switch()?; + + for dim in dimensions.iter() { + query_builder + .add_group_by_clause(dim) + .attach_printable("Error grouping by dimensions") + .switch()?; + } + + if let Some(_granularity) = granularity.as_ref() { + query_builder + .add_group_by_clause("time_bucket") + .attach_printable("Error adding granularity") + .switch()?; + } + + query_builder + .execute_query::(pool) + .await + .change_context(MetricsError::QueryBuildingError)? + .change_context(MetricsError::QueryExecutionFailure)? + .into_iter() + .map(|i| { + Ok(( + SdkEventMetricsBucketIdentifier::new( + i.payment_method.clone(), + i.platform.clone(), + i.browser_name.clone(), + i.source.clone(), + i.component.clone(), + i.payment_experience.clone(), + i.time_bucket.clone(), + ), + i, + )) + }) + .collect::, + crate::query::PostProcessingError, + >>() + .change_context(MetricsError::PostProcessingFailure) + } +} diff --git a/crates/analytics/src/sdk_events/types.rs b/crates/analytics/src/sdk_events/types.rs new file mode 100644 index 000000000000..d631b3158ed4 --- /dev/null +++ b/crates/analytics/src/sdk_events/types.rs @@ -0,0 +1,50 @@ +use api_models::analytics::sdk_events::{SdkEventDimensions, SdkEventFilters}; +use error_stack::ResultExt; + +use crate::{ + query::{QueryBuilder, QueryFilter, QueryResult, ToSql}, + types::{AnalyticsCollection, AnalyticsDataSource}, +}; + +impl QueryFilter for SdkEventFilters +where + T: AnalyticsDataSource, + AnalyticsCollection: ToSql, +{ + fn set_filter_clause(&self, builder: &mut QueryBuilder) -> QueryResult<()> { + if !self.payment_method.is_empty() { + builder + .add_filter_in_range_clause(SdkEventDimensions::PaymentMethod, &self.payment_method) + .attach_printable("Error adding payment method filter")?; + } + if !self.platform.is_empty() { + builder + .add_filter_in_range_clause(SdkEventDimensions::Platform, &self.platform) + .attach_printable("Error adding platform filter")?; + } + if !self.browser_name.is_empty() { + builder + .add_filter_in_range_clause(SdkEventDimensions::BrowserName, &self.browser_name) + .attach_printable("Error adding browser name filter")?; + } + if !self.source.is_empty() { + builder + .add_filter_in_range_clause(SdkEventDimensions::Source, &self.source) + .attach_printable("Error adding source filter")?; + } + if !self.component.is_empty() { + builder + .add_filter_in_range_clause(SdkEventDimensions::Component, &self.component) + .attach_printable("Error adding component filter")?; + } + if !self.payment_experience.is_empty() { + builder + .add_filter_in_range_clause( + SdkEventDimensions::PaymentExperience, + &self.payment_experience, + ) + .attach_printable("Error adding payment experience filter")?; + } + Ok(()) + } +} diff --git a/crates/router/src/analytics/sqlx.rs b/crates/analytics/src/sqlx.rs similarity index 64% rename from crates/router/src/analytics/sqlx.rs rename to crates/analytics/src/sqlx.rs index b88a2065f0b0..cdd2647e4e71 100644 --- a/crates/router/src/analytics/sqlx.rs +++ b/crates/analytics/src/sqlx.rs @@ -1,14 +1,11 @@ use std::{fmt::Display, str::FromStr}; use api_models::analytics::refunds::RefundType; -use common_enums::enums::{ +use common_utils::errors::{CustomResult, ParsingError}; +use diesel_models::enums::{ AttemptStatus, AuthenticationType, Currency, PaymentMethod, RefundStatus, }; -use common_utils::errors::{CustomResult, ParsingError}; use error_stack::{IntoReport, ResultExt}; -#[cfg(feature = "kms")] -use external_services::{kms, kms::decrypt::KmsDecrypt}; -#[cfg(not(feature = "kms"))] use masking::PeekInterface; use sqlx::{ postgres::{PgArgumentBuffer, PgPoolOptions, PgRow, PgTypeInfo, PgValueRef}, @@ -16,15 +13,16 @@ use sqlx::{ Error::ColumnNotFound, FromRow, Pool, Postgres, Row, }; +use storage_impl::config::Database; use time::PrimitiveDateTime; use super::{ - query::{Aggregate, ToSql}, + query::{Aggregate, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, QueryExecutionError, + TableEngine, }, }; -use crate::configs::settings::Database; #[derive(Debug, Clone)] pub struct SqlxClient { @@ -47,19 +45,7 @@ impl Default for SqlxClient { } impl SqlxClient { - pub async fn from_conf( - conf: &Database, - #[cfg(feature = "kms")] kms_client: &kms::KmsClient, - ) -> Self { - #[cfg(feature = "kms")] - #[allow(clippy::expect_used)] - let password = conf - .password - .decrypt_inner(kms_client) - .await - .expect("Failed to KMS decrypt database password"); - - #[cfg(not(feature = "kms"))] + pub async fn from_conf(conf: &Database) -> Self { let password = &conf.password.peek(); let database_url = format!( "postgres://{}:{}@{}:{}/{}", @@ -154,6 +140,7 @@ where impl super::payments::filters::PaymentFilterAnalytics for SqlxClient {} impl super::payments::metrics::PaymentMetricAnalytics for SqlxClient {} +impl super::payments::distribution::PaymentDistributionAnalytics for SqlxClient {} impl super::refunds::metrics::RefundMetricAnalytics for SqlxClient {} impl super::refunds::filters::RefundFilterAnalytics for SqlxClient {} @@ -207,7 +194,7 @@ impl<'a> FromRow<'a, PgRow> for super::refunds::metrics::RefundMetricRow { ColumnNotFound(_) => Ok(Default::default()), e => Err(e), })?; - + // Removing millisecond precision to get accurate diffs against clickhouse let start_bucket: Option = row .try_get::, _>("start_bucket")? .and_then(|dt| dt.replace_millisecond(0).ok()); @@ -253,6 +240,11 @@ impl<'a> FromRow<'a, PgRow> for super::payments::metrics::PaymentMetricRow { ColumnNotFound(_) => Ok(Default::default()), e => Err(e), })?; + let payment_method_type: Option = + row.try_get("payment_method_type").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; let total: Option = row.try_get("total").or_else(|e| match e { ColumnNotFound(_) => Ok(Default::default()), e => Err(e), @@ -261,7 +253,72 @@ impl<'a> FromRow<'a, PgRow> for super::payments::metrics::PaymentMetricRow { ColumnNotFound(_) => Ok(Default::default()), e => Err(e), })?; + // Removing millisecond precision to get accurate diffs against clickhouse + let start_bucket: Option = row + .try_get::, _>("start_bucket")? + .and_then(|dt| dt.replace_millisecond(0).ok()); + let end_bucket: Option = row + .try_get::, _>("end_bucket")? + .and_then(|dt| dt.replace_millisecond(0).ok()); + Ok(Self { + currency, + status, + connector, + authentication_type, + payment_method, + payment_method_type, + total, + count, + start_bucket, + end_bucket, + }) + } +} +impl<'a> FromRow<'a, PgRow> for super::payments::distribution::PaymentDistributionRow { + fn from_row(row: &'a PgRow) -> sqlx::Result { + let currency: Option> = + row.try_get("currency").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let status: Option> = + row.try_get("status").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let connector: Option = row.try_get("connector").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let authentication_type: Option> = + row.try_get("authentication_type").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let payment_method: Option = + row.try_get("payment_method").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let payment_method_type: Option = + row.try_get("payment_method_type").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let total: Option = row.try_get("total").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let count: Option = row.try_get("count").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + let error_message: Option = row.try_get("error_message").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; + // Removing millisecond precision to get accurate diffs against clickhouse let start_bucket: Option = row .try_get::, _>("start_bucket")? .and_then(|dt| dt.replace_millisecond(0).ok()); @@ -274,8 +331,10 @@ impl<'a> FromRow<'a, PgRow> for super::payments::metrics::PaymentMetricRow { connector, authentication_type, payment_method, + payment_method_type, total, count, + error_message, start_bucket, end_bucket, }) @@ -308,12 +367,18 @@ impl<'a> FromRow<'a, PgRow> for super::payments::filters::FilterRow { ColumnNotFound(_) => Ok(Default::default()), e => Err(e), })?; + let payment_method_type: Option = + row.try_get("payment_method_type").or_else(|e| match e { + ColumnNotFound(_) => Ok(Default::default()), + e => Err(e), + })?; Ok(Self { currency, status, connector, authentication_type, payment_method, + payment_method_type, }) } } @@ -349,16 +414,21 @@ impl<'a> FromRow<'a, PgRow> for super::refunds::filters::RefundFilterRow { } impl ToSql for PrimitiveDateTime { - fn to_sql(&self) -> error_stack::Result { + fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result { Ok(self.to_string()) } } impl ToSql for AnalyticsCollection { - fn to_sql(&self) -> error_stack::Result { + fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result { match self { Self::Payment => Ok("payment_attempt".to_string()), Self::Refund => Ok("refund".to_string()), + Self::SdkEvents => Err(error_stack::report!(ParsingError::UnknownError) + .attach_printable("SdkEvents table is not implemented for Sqlx"))?, + Self::ApiEvents => Err(error_stack::report!(ParsingError::UnknownError) + .attach_printable("ApiEvents table is not implemented for Sqlx"))?, + Self::PaymentIntent => Ok("payment_intent".to_string()), } } } @@ -367,7 +437,7 @@ impl ToSql for Aggregate where T: ToSql, { - fn to_sql(&self) -> error_stack::Result { + fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result { Ok(match self { Self::Count { field: _, alias } => { format!( @@ -378,21 +448,86 @@ where Self::Sum { field, alias } => { format!( "sum({}){}", - field.to_sql().attach_printable("Failed to sum aggregate")?, + field + .to_sql(table_engine) + .attach_printable("Failed to sum aggregate")?, alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) ) } Self::Min { field, alias } => { format!( "min({}){}", - field.to_sql().attach_printable("Failed to min aggregate")?, + field + .to_sql(table_engine) + .attach_printable("Failed to min aggregate")?, alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) ) } Self::Max { field, alias } => { format!( "max({}){}", - field.to_sql().attach_printable("Failed to max aggregate")?, + field + .to_sql(table_engine) + .attach_printable("Failed to max aggregate")?, + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + }) + } +} + +impl ToSql for Window +where + T: ToSql, +{ + fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result { + Ok(match self { + Self::Sum { + field, + partition_by, + order_by, + alias, + } => { + format!( + "sum({}) over ({}{}){}", + field + .to_sql(table_engine) + .attach_printable("Failed to sum window")?, + partition_by.as_ref().map_or_else( + || "".to_owned(), + |partition_by| format!("partition by {}", partition_by.to_owned()) + ), + order_by.as_ref().map_or_else( + || "".to_owned(), + |(order_column, order)| format!( + " order by {} {}", + order_column.to_owned(), + order.to_string() + ) + ), + alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) + ) + } + Self::RowNumber { + field: _, + partition_by, + order_by, + alias, + } => { + format!( + "row_number() over ({}{}){}", + partition_by.as_ref().map_or_else( + || "".to_owned(), + |partition_by| format!("partition by {}", partition_by.to_owned()) + ), + order_by.as_ref().map_or_else( + || "".to_owned(), + |(order_column, order)| format!( + " order by {} {}", + order_column.to_owned(), + order.to_string() + ) + ), alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias)) ) } diff --git a/crates/router/src/analytics/types.rs b/crates/analytics/src/types.rs similarity index 83% rename from crates/router/src/analytics/types.rs rename to crates/analytics/src/types.rs index fe20e812a9b8..16d342d3d2ee 100644 --- a/crates/router/src/analytics/types.rs +++ b/crates/analytics/src/types.rs @@ -2,25 +2,36 @@ use std::{fmt::Display, str::FromStr}; use common_utils::{ errors::{CustomResult, ErrorSwitch, ParsingError}, - events::ApiEventMetric, + events::{ApiEventMetric, ApiEventsType}, + impl_misc_api_event_type, }; use error_stack::{report, Report, ResultExt}; use super::query::QueryBuildingError; -#[derive(serde::Deserialize, Debug, masking::Serialize)] +#[derive(serde::Deserialize, Debug, serde::Serialize)] #[serde(rename_all = "snake_case")] pub enum AnalyticsDomain { Payments, Refunds, + SdkEvents, + ApiEvents, } -impl ApiEventMetric for AnalyticsDomain {} - #[derive(Debug, strum::AsRefStr, strum::Display, Clone, Copy)] pub enum AnalyticsCollection { Payment, Refund, + SdkEvents, + ApiEvents, + PaymentIntent, +} + +#[allow(dead_code)] +#[derive(Debug)] +pub enum TableEngine { + CollapsingMergeTree { sign: &'static str }, + BasicTree, } #[derive(Debug, serde::Serialize, serde::Deserialize, Eq, PartialEq)] @@ -50,6 +61,7 @@ where // Analytics Framework pub trait RefundAnalytics {} +pub trait SdkEventAnalytics {} #[async_trait::async_trait] pub trait AnalyticsDataSource @@ -60,6 +72,10 @@ where async fn load_results(&self, query: &str) -> CustomResult, QueryExecutionError> where Self: LoadRow; + + fn get_table_engine(_table: AnalyticsCollection) -> TableEngine { + TableEngine::BasicTree + } } pub trait LoadRow @@ -117,3 +133,5 @@ impl ErrorSwitch for QueryBuildingError { FiltersError::QueryBuildingError } } + +impl_misc_api_event_type!(AnalyticsDomain); diff --git a/crates/router/src/analytics/utils.rs b/crates/analytics/src/utils.rs similarity index 52% rename from crates/router/src/analytics/utils.rs rename to crates/analytics/src/utils.rs index f7e6ea69dc37..6a0aa973a1e7 100644 --- a/crates/router/src/analytics/utils.rs +++ b/crates/analytics/src/utils.rs @@ -1,6 +1,8 @@ use api_models::analytics::{ + api_event::{ApiEventDimensions, ApiEventMetrics}, payments::{PaymentDimensions, PaymentMetrics}, refunds::{RefundDimensions, RefundMetrics}, + sdk_events::{SdkEventDimensions, SdkEventMetrics}, NameDescription, }; use strum::IntoEnumIterator; @@ -13,6 +15,14 @@ pub fn get_refund_dimensions() -> Vec { RefundDimensions::iter().map(Into::into).collect() } +pub fn get_sdk_event_dimensions() -> Vec { + SdkEventDimensions::iter().map(Into::into).collect() +} + +pub fn get_api_event_dimensions() -> Vec { + ApiEventDimensions::iter().map(Into::into).collect() +} + pub fn get_payment_metrics_info() -> Vec { PaymentMetrics::iter().map(Into::into).collect() } @@ -20,3 +30,11 @@ pub fn get_payment_metrics_info() -> Vec { pub fn get_refund_metrics_info() -> Vec { RefundMetrics::iter().map(Into::into).collect() } + +pub fn get_sdk_event_metrics_info() -> Vec { + SdkEventMetrics::iter().map(Into::into).collect() +} + +pub fn get_api_event_metrics_info() -> Vec { + ApiEventMetrics::iter().map(Into::into).collect() +} diff --git a/crates/api_models/src/analytics.rs b/crates/api_models/src/analytics.rs index 0358b6b313cf..0263427b0fde 100644 --- a/crates/api_models/src/analytics.rs +++ b/crates/api_models/src/analytics.rs @@ -1,15 +1,20 @@ use std::collections::HashSet; -use common_utils::events::ApiEventMetric; -use time::PrimitiveDateTime; +use common_utils::pii::EmailStrategy; +use masking::Secret; use self::{ - payments::{PaymentDimensions, PaymentMetrics}, + api_event::{ApiEventDimensions, ApiEventMetrics}, + payments::{PaymentDimensions, PaymentDistributions, PaymentMetrics}, refunds::{RefundDimensions, RefundMetrics}, + sdk_events::{SdkEventDimensions, SdkEventMetrics}, }; +pub use crate::payments::TimeRange; +pub mod api_event; pub mod payments; pub mod refunds; +pub mod sdk_events; #[derive(Debug, serde::Serialize)] pub struct NameDescription { @@ -25,23 +30,12 @@ pub struct GetInfoResponse { pub dimensions: Vec, } -impl ApiEventMetric for GetInfoResponse {} - -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, PartialEq, Eq, Hash)] -#[serde(rename_all = "camelCase")] -pub struct TimeRange { - #[serde(with = "common_utils::custom_serde::iso8601")] - pub start_time: PrimitiveDateTime, - #[serde(default, with = "common_utils::custom_serde::iso8601::option")] - pub end_time: Option, -} - -#[derive(Clone, Copy, Debug, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Copy, Debug, serde::Deserialize, serde::Serialize)] pub struct TimeSeries { pub granularity: Granularity, } -#[derive(Clone, Copy, Debug, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Copy, Debug, serde::Deserialize, serde::Serialize)] pub enum Granularity { #[serde(rename = "G_ONEMIN")] OneMin, @@ -57,7 +51,7 @@ pub enum Granularity { OneDay, } -#[derive(Clone, Debug, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct GetPaymentMetricRequest { pub time_series: Option, @@ -67,13 +61,51 @@ pub struct GetPaymentMetricRequest { #[serde(default)] pub filters: payments::PaymentFilters, pub metrics: HashSet, + pub distribution: Option, #[serde(default)] pub delta: bool, } -impl ApiEventMetric for GetPaymentMetricRequest {} +#[derive(Clone, Copy, Debug, serde::Deserialize, serde::Serialize)] +pub enum QueryLimit { + #[serde(rename = "TOP_5")] + Top5, + #[serde(rename = "TOP_10")] + Top10, +} + +#[allow(clippy::from_over_into)] +impl Into for QueryLimit { + fn into(self) -> u64 { + match self { + Self::Top5 => 5, + Self::Top10 => 10, + } + } +} + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Distribution { + pub distribution_for: PaymentDistributions, + pub distribution_cardinality: QueryLimit, +} + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ReportRequest { + pub time_range: TimeRange, +} + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GenerateReportRequest { + pub request: ReportRequest, + pub merchant_id: String, + pub email: Secret, +} -#[derive(Clone, Debug, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct GetRefundMetricRequest { pub time_series: Option, @@ -87,14 +119,26 @@ pub struct GetRefundMetricRequest { pub delta: bool, } -impl ApiEventMetric for GetRefundMetricRequest {} +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSdkEventMetricRequest { + pub time_series: Option, + pub time_range: TimeRange, + #[serde(default)] + pub group_by_names: Vec, + #[serde(default)] + pub filters: sdk_events::SdkEventFilters, + pub metrics: HashSet, + #[serde(default)] + pub delta: bool, +} #[derive(Debug, serde::Serialize)] pub struct AnalyticsMetadata { pub current_time_range: TimeRange, } -#[derive(Debug, serde::Deserialize, masking::Serialize)] +#[derive(Debug, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct GetPaymentFiltersRequest { pub time_range: TimeRange, @@ -102,16 +146,12 @@ pub struct GetPaymentFiltersRequest { pub group_by_names: Vec, } -impl ApiEventMetric for GetPaymentFiltersRequest {} - #[derive(Debug, Default, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct PaymentFiltersResponse { pub query_data: Vec, } -impl ApiEventMetric for PaymentFiltersResponse {} - #[derive(Debug, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct FilterValue { @@ -119,34 +159,88 @@ pub struct FilterValue { pub values: Vec, } -#[derive(Debug, serde::Deserialize, masking::Serialize)] +#[derive(Debug, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "camelCase")] + pub struct GetRefundFilterRequest { pub time_range: TimeRange, #[serde(default)] pub group_by_names: Vec, } -impl ApiEventMetric for GetRefundFilterRequest {} - #[derive(Debug, Default, serde::Serialize, Eq, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RefundFiltersResponse { pub query_data: Vec, } -impl ApiEventMetric for RefundFiltersResponse {} - #[derive(Debug, serde::Serialize, Eq, PartialEq)] #[serde(rename_all = "camelCase")] + pub struct RefundFilterValue { pub dimension: RefundDimensions, pub values: Vec, } +#[derive(Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSdkEventFiltersRequest { + pub time_range: TimeRange, + #[serde(default)] + pub group_by_names: Vec, +} + +#[derive(Debug, Default, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SdkEventFiltersResponse { + pub query_data: Vec, +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SdkEventFilterValue { + pub dimension: SdkEventDimensions, + pub values: Vec, +} + #[derive(Debug, serde::Serialize)] #[serde(rename_all = "camelCase")] pub struct MetricsResponse { pub query_data: Vec, pub meta_data: [AnalyticsMetadata; 1], } + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GetApiEventFiltersRequest { + pub time_range: TimeRange, + #[serde(default)] + pub group_by_names: Vec, +} + +#[derive(Debug, Default, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ApiEventFiltersResponse { + pub query_data: Vec, +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ApiEventFilterValue { + pub dimension: ApiEventDimensions, + pub values: Vec, +} + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GetApiEventMetricRequest { + pub time_series: Option, + pub time_range: TimeRange, + #[serde(default)] + pub group_by_names: Vec, + #[serde(default)] + pub filters: api_event::ApiEventFilters, + pub metrics: HashSet, + #[serde(default)] + pub delta: bool, +} diff --git a/crates/api_models/src/analytics/api_event.rs b/crates/api_models/src/analytics/api_event.rs new file mode 100644 index 000000000000..62fe829f01b9 --- /dev/null +++ b/crates/api_models/src/analytics/api_event.rs @@ -0,0 +1,148 @@ +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + +use super::{NameDescription, TimeRange}; +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +pub struct ApiLogsRequest { + #[serde(flatten)] + pub query_param: QueryType, + pub api_name_filter: Option>, +} + +pub enum FilterType { + ApiCountFilter, + LatencyFilter, + StatusCodeFilter, +} + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(tag = "type")] +pub enum QueryType { + Payment { + payment_id: String, + }, + Refund { + payment_id: String, + refund_id: String, + }, +} + +#[derive( + Debug, + serde::Serialize, + serde::Deserialize, + strum::AsRefStr, + PartialEq, + PartialOrd, + Eq, + Ord, + strum::Display, + strum::EnumIter, + Clone, + Copy, +)] +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +pub enum ApiEventDimensions { + // Do not change the order of these enums + // Consult the Dashboard FE folks since these also affects the order of metrics on FE + StatusCode, + FlowType, + ApiFlow, +} + +impl From for NameDescription { + fn from(value: ApiEventDimensions) -> Self { + Self { + name: value.to_string(), + desc: String::new(), + } + } +} +#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)] +pub struct ApiEventFilters { + pub status_code: Vec, + pub flow_type: Vec, + pub api_flow: Vec, +} + +#[derive( + Clone, + Debug, + Hash, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::Display, + strum::EnumIter, + strum::AsRefStr, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum ApiEventMetrics { + Latency, + ApiCount, + StatusCodeCount, +} + +impl From for NameDescription { + fn from(value: ApiEventMetrics) -> Self { + Self { + name: value.to_string(), + desc: String::new(), + } + } +} + +#[derive(Debug, serde::Serialize, Eq)] +pub struct ApiEventMetricsBucketIdentifier { + #[serde(rename = "time_range")] + pub time_bucket: TimeRange, + // Coz FE sucks + #[serde(rename = "time_bucket")] + #[serde(with = "common_utils::custom_serde::iso8601custom")] + pub start_time: time::PrimitiveDateTime, +} + +impl ApiEventMetricsBucketIdentifier { + pub fn new(normalized_time_range: TimeRange) -> Self { + Self { + time_bucket: normalized_time_range, + start_time: normalized_time_range.start_time, + } + } +} + +impl Hash for ApiEventMetricsBucketIdentifier { + fn hash(&self, state: &mut H) { + self.time_bucket.hash(state); + } +} + +impl PartialEq for ApiEventMetricsBucketIdentifier { + fn eq(&self, other: &Self) -> bool { + let mut left = DefaultHasher::new(); + self.hash(&mut left); + let mut right = DefaultHasher::new(); + other.hash(&mut right); + left.finish() == right.finish() + } +} + +#[derive(Debug, serde::Serialize)] +pub struct ApiEventMetricsBucketValue { + pub latency: Option, + pub api_count: Option, + pub status_code_count: Option, +} + +#[derive(Debug, serde::Serialize)] +pub struct ApiMetricsBucketResponse { + #[serde(flatten)] + pub values: ApiEventMetricsBucketValue, + #[serde(flatten)] + pub dimensions: ApiEventMetricsBucketIdentifier, +} diff --git a/crates/api_models/src/analytics/payments.rs b/crates/api_models/src/analytics/payments.rs index b5e5852d6283..2d7ae262f489 100644 --- a/crates/api_models/src/analytics/payments.rs +++ b/crates/api_models/src/analytics/payments.rs @@ -3,13 +3,12 @@ use std::{ hash::{Hash, Hasher}, }; -use common_enums::enums::{AttemptStatus, AuthenticationType, Currency, PaymentMethod}; -use common_utils::events::ApiEventMetric; - use super::{NameDescription, TimeRange}; -use crate::{analytics::MetricsResponse, enums::Connector}; +use crate::enums::{ + AttemptStatus, AuthenticationType, Connector, Currency, PaymentMethod, PaymentMethodType, +}; -#[derive(Clone, Debug, Default, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)] pub struct PaymentFilters { #[serde(default)] pub currency: Vec, @@ -21,6 +20,8 @@ pub struct PaymentFilters { pub auth_type: Vec, #[serde(default)] pub payment_method: Vec, + #[serde(default)] + pub payment_method_type: Vec, } #[derive( @@ -44,6 +45,7 @@ pub enum PaymentDimensions { // Consult the Dashboard FE folks since these also affects the order of metrics on FE Connector, PaymentMethod, + PaymentMethodType, Currency, #[strum(serialize = "authentication_type")] #[serde(rename = "authentication_type")] @@ -73,6 +75,35 @@ pub enum PaymentMetrics { PaymentSuccessCount, PaymentProcessedAmount, AvgTicketSize, + RetriesCount, + ConnectorSuccessRate, +} + +#[derive(Debug, Default, serde::Serialize)] +pub struct ErrorResult { + pub reason: String, + pub count: i64, + pub percentage: f64, +} + +#[derive( + Clone, + Copy, + Debug, + Hash, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::Display, + strum::EnumIter, + strum::AsRefStr, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum PaymentDistributions { + #[strum(serialize = "error_message")] + PaymentErrorMessage, } pub mod metric_behaviour { @@ -109,6 +140,7 @@ pub struct PaymentMetricsBucketIdentifier { #[serde(rename = "authentication_type")] pub auth_type: Option, pub payment_method: Option, + pub payment_method_type: Option, #[serde(rename = "time_range")] pub time_bucket: TimeRange, // Coz FE sucks @@ -124,6 +156,7 @@ impl PaymentMetricsBucketIdentifier { connector: Option, auth_type: Option, payment_method: Option, + payment_method_type: Option, normalized_time_range: TimeRange, ) -> Self { Self { @@ -132,6 +165,7 @@ impl PaymentMetricsBucketIdentifier { connector, auth_type, payment_method, + payment_method_type, time_bucket: normalized_time_range, start_time: normalized_time_range.start_time, } @@ -145,6 +179,7 @@ impl Hash for PaymentMetricsBucketIdentifier { self.connector.hash(state); self.auth_type.map(|i| i.to_string()).hash(state); self.payment_method.hash(state); + self.payment_method_type.hash(state); self.time_bucket.hash(state); } } @@ -166,6 +201,10 @@ pub struct PaymentMetricsBucketValue { pub payment_success_count: Option, pub payment_processed_amount: Option, pub avg_ticket_size: Option, + pub payment_error_message: Option>, + pub retries_count: Option, + pub retries_amount_processed: Option, + pub connector_success_rate: Option, } #[derive(Debug, serde::Serialize)] @@ -175,6 +214,3 @@ pub struct MetricsBucketResponse { #[serde(flatten)] pub dimensions: PaymentMetricsBucketIdentifier, } - -impl ApiEventMetric for MetricsBucketResponse {} -impl ApiEventMetric for MetricsResponse {} diff --git a/crates/api_models/src/analytics/refunds.rs b/crates/api_models/src/analytics/refunds.rs index c5d444338d38..5ecdf1cecb3f 100644 --- a/crates/api_models/src/analytics/refunds.rs +++ b/crates/api_models/src/analytics/refunds.rs @@ -3,10 +3,7 @@ use std::{ hash::{Hash, Hasher}, }; -use common_enums::enums::{Currency, RefundStatus}; -use common_utils::events::ApiEventMetric; - -use crate::analytics::MetricsResponse; +use crate::{enums::Currency, refunds::RefundStatus}; #[derive( Clone, @@ -20,7 +17,7 @@ use crate::analytics::MetricsResponse; strum::Display, strum::EnumString, )] -// TODO RefundType common_enums need to mapped to storage_model +// TODO RefundType api_models_oss need to mapped to storage_model #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] pub enum RefundType { @@ -31,7 +28,7 @@ pub enum RefundType { } use super::{NameDescription, TimeRange}; -#[derive(Clone, Debug, Default, serde::Deserialize, masking::Serialize)] +#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)] pub struct RefundFilters { #[serde(default)] pub currency: Vec, @@ -115,8 +112,9 @@ impl From for NameDescription { #[derive(Debug, serde::Serialize, Eq)] pub struct RefundMetricsBucketIdentifier { pub currency: Option, - pub refund_status: Option, + pub refund_status: Option, pub connector: Option, + pub refund_type: Option, #[serde(rename = "time_range")] pub time_bucket: TimeRange, @@ -128,7 +126,7 @@ pub struct RefundMetricsBucketIdentifier { impl Hash for RefundMetricsBucketIdentifier { fn hash(&self, state: &mut H) { self.currency.hash(state); - self.refund_status.map(|i| i.to_string()).hash(state); + self.refund_status.hash(state); self.connector.hash(state); self.refund_type.hash(state); self.time_bucket.hash(state); @@ -147,7 +145,7 @@ impl PartialEq for RefundMetricsBucketIdentifier { impl RefundMetricsBucketIdentifier { pub fn new( currency: Option, - refund_status: Option, + refund_status: Option, connector: Option, refund_type: Option, normalized_time_range: TimeRange, @@ -162,7 +160,6 @@ impl RefundMetricsBucketIdentifier { } } } - #[derive(Debug, serde::Serialize)] pub struct RefundMetricsBucketValue { pub refund_success_rate: Option, @@ -170,7 +167,6 @@ pub struct RefundMetricsBucketValue { pub refund_success_count: Option, pub refund_processed_amount: Option, } - #[derive(Debug, serde::Serialize)] pub struct RefundMetricsBucketResponse { #[serde(flatten)] @@ -178,6 +174,3 @@ pub struct RefundMetricsBucketResponse { #[serde(flatten)] pub dimensions: RefundMetricsBucketIdentifier, } - -impl ApiEventMetric for RefundMetricsBucketResponse {} -impl ApiEventMetric for MetricsResponse {} diff --git a/crates/api_models/src/analytics/sdk_events.rs b/crates/api_models/src/analytics/sdk_events.rs new file mode 100644 index 000000000000..76ccb29867f2 --- /dev/null +++ b/crates/api_models/src/analytics/sdk_events.rs @@ -0,0 +1,215 @@ +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + +use super::{NameDescription, TimeRange}; + +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SdkEventsRequest { + pub payment_id: String, + pub time_range: TimeRange, +} + +#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)] +pub struct SdkEventFilters { + #[serde(default)] + pub payment_method: Vec, + #[serde(default)] + pub platform: Vec, + #[serde(default)] + pub browser_name: Vec, + #[serde(default)] + pub source: Vec, + #[serde(default)] + pub component: Vec, + #[serde(default)] + pub payment_experience: Vec, +} + +#[derive( + Debug, + serde::Serialize, + serde::Deserialize, + strum::AsRefStr, + PartialEq, + PartialOrd, + Eq, + Ord, + strum::Display, + strum::EnumIter, + Clone, + Copy, +)] +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +pub enum SdkEventDimensions { + // Do not change the order of these enums + // Consult the Dashboard FE folks since these also affects the order of metrics on FE + PaymentMethod, + Platform, + BrowserName, + Source, + Component, + PaymentExperience, +} + +#[derive( + Clone, + Debug, + Hash, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::Display, + strum::EnumIter, + strum::AsRefStr, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum SdkEventMetrics { + PaymentAttempts, + PaymentSuccessCount, + PaymentMethodsCallCount, + SdkRenderedCount, + SdkInitiatedCount, + PaymentMethodSelectedCount, + PaymentDataFilledCount, + AveragePaymentTime, +} + +#[derive( + Clone, + Debug, + Hash, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::Display, + strum::EnumIter, + strum::AsRefStr, +)] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum SdkEventNames { + StripeElementsCalled, + AppRendered, + PaymentMethodChanged, + PaymentDataFilled, + PaymentAttempt, + PaymentSuccess, + PaymentMethodsCall, + ConfirmCall, + SessionsCall, + CustomerPaymentMethodsCall, + RedirectingUser, + DisplayBankTransferInfoPage, + DisplayQrCodeInfoPage, +} + +pub mod metric_behaviour { + pub struct PaymentAttempts; + pub struct PaymentSuccessCount; + pub struct PaymentMethodsCallCount; + pub struct SdkRenderedCount; + pub struct SdkInitiatedCount; + pub struct PaymentMethodSelectedCount; + pub struct PaymentDataFilledCount; + pub struct AveragePaymentTime; +} + +impl From for NameDescription { + fn from(value: SdkEventMetrics) -> Self { + Self { + name: value.to_string(), + desc: String::new(), + } + } +} + +impl From for NameDescription { + fn from(value: SdkEventDimensions) -> Self { + Self { + name: value.to_string(), + desc: String::new(), + } + } +} + +#[derive(Debug, serde::Serialize, Eq)] +pub struct SdkEventMetricsBucketIdentifier { + pub payment_method: Option, + pub platform: Option, + pub browser_name: Option, + pub source: Option, + pub component: Option, + pub payment_experience: Option, + pub time_bucket: Option, +} + +impl SdkEventMetricsBucketIdentifier { + pub fn new( + payment_method: Option, + platform: Option, + browser_name: Option, + source: Option, + component: Option, + payment_experience: Option, + time_bucket: Option, + ) -> Self { + Self { + payment_method, + platform, + browser_name, + source, + component, + payment_experience, + time_bucket, + } + } +} + +impl Hash for SdkEventMetricsBucketIdentifier { + fn hash(&self, state: &mut H) { + self.payment_method.hash(state); + self.platform.hash(state); + self.browser_name.hash(state); + self.source.hash(state); + self.component.hash(state); + self.payment_experience.hash(state); + self.time_bucket.hash(state); + } +} + +impl PartialEq for SdkEventMetricsBucketIdentifier { + fn eq(&self, other: &Self) -> bool { + let mut left = DefaultHasher::new(); + self.hash(&mut left); + let mut right = DefaultHasher::new(); + other.hash(&mut right); + left.finish() == right.finish() + } +} + +#[derive(Debug, serde::Serialize)] +pub struct SdkEventMetricsBucketValue { + pub payment_attempts: Option, + pub payment_success_count: Option, + pub payment_methods_call_count: Option, + pub average_payment_time: Option, + pub sdk_rendered_count: Option, + pub sdk_initiated_count: Option, + pub payment_method_selected_count: Option, + pub payment_data_filled_count: Option, +} + +#[derive(Debug, serde::Serialize)] +pub struct MetricsBucketResponse { + #[serde(flatten)] + pub values: SdkEventMetricsBucketValue, + #[serde(flatten)] + pub dimensions: SdkEventMetricsBucketIdentifier, +} diff --git a/crates/api_models/src/events.rs b/crates/api_models/src/events.rs index 782c02be7a3a..345f827daeac 100644 --- a/crates/api_models/src/events.rs +++ b/crates/api_models/src/events.rs @@ -14,8 +14,16 @@ use common_utils::{ }; use crate::{ - admin::*, api_keys::*, cards_info::*, disputes::*, files::*, mandates::*, payment_methods::*, - payments::*, verifications::*, + admin::*, + analytics::{api_event::*, sdk_events::*, *}, + api_keys::*, + cards_info::*, + disputes::*, + files::*, + mandates::*, + payment_methods::*, + payments::*, + verifications::*, }; impl ApiEventMetric for TimeRange {} @@ -63,7 +71,23 @@ impl_misc_api_event_type!( ApplepayMerchantVerificationRequest, ApplepayMerchantResponse, ApplepayVerifiedDomainsResponse, - UpdateApiKeyRequest + UpdateApiKeyRequest, + GetApiEventFiltersRequest, + ApiEventFiltersResponse, + GetInfoResponse, + GetPaymentMetricRequest, + GetRefundMetricRequest, + GetSdkEventMetricRequest, + GetPaymentFiltersRequest, + PaymentFiltersResponse, + GetRefundFilterRequest, + RefundFiltersResponse, + GetSdkEventFiltersRequest, + SdkEventFiltersResponse, + ApiLogsRequest, + GetApiEventMetricRequest, + SdkEventsRequest, + ReportRequest ); #[cfg(feature = "stripe")] @@ -76,3 +100,9 @@ impl_misc_api_event_type!( CustomerPaymentMethodListResponse, CreateCustomerResponse ); + +impl ApiEventMetric for MetricsResponse { + fn get_api_event_type(&self) -> Option { + Some(ApiEventsType::Miscellaneous) + } +} diff --git a/crates/api_models/src/payments.rs b/crates/api_models/src/payments.rs index acb9bbdd6cd4..bd4c59211e24 100644 --- a/crates/api_models/src/payments.rs +++ b/crates/api_models/src/payments.rs @@ -2339,9 +2339,11 @@ pub struct PaymentListFilters { pub struct TimeRange { /// The start time to filter payments list or to get list of filters. To get list of filters start time is needed to be passed #[serde(with = "common_utils::custom_serde::iso8601")] + #[serde(alias = "startTime")] pub start_time: PrimitiveDateTime, /// The end time to filter payments list or to get list of filters. If not passed the default time is now #[serde(default, with = "common_utils::custom_serde::iso8601::option")] + #[serde(alias = "endTime")] pub end_time: Option, } diff --git a/crates/data_models/Cargo.toml b/crates/data_models/Cargo.toml index 57ae1ec1ec87..857d53b6999e 100644 --- a/crates/data_models/Cargo.toml +++ b/crates/data_models/Cargo.toml @@ -18,7 +18,6 @@ common_enums = { version = "0.1.0", path = "../common_enums" } common_utils = { version = "0.1.0", path = "../common_utils" } masking = { version = "0.1.0", path = "../masking" } - # Third party deps async-trait = "0.1.68" error-stack = "0.3.1" diff --git a/crates/router/Cargo.toml b/crates/router/Cargo.toml index b51dc045b20d..f508460574dd 100644 --- a/crates/router/Cargo.toml +++ b/crates/router/Cargo.toml @@ -16,7 +16,7 @@ email = ["external_services/email", "dep:aws-config", "olap"] basilisk = ["kms"] stripe = ["dep:serde_qs"] release = ["kms", "stripe", "basilisk", "s3", "email", "business_profile_routing", "accounts_cache", "kv_store", "profile_specific_fallback_routing"] -olap = ["data_models/olap", "storage_impl/olap", "scheduler/olap"] +olap = ["data_models/olap", "storage_impl/olap", "scheduler/olap", "dep:analytics"] oltp = ["storage_impl/oltp"] kv_store = ["scheduler/kv_store"] accounts_cache = [] @@ -102,6 +102,7 @@ tracing-futures = { version = "0.2.5", features = ["tokio"] } # First party crates api_models = { version = "0.1.0", path = "../api_models", features = ["errors"] } +analytics = { version = "0.1.0", path = "../analytics", optional = true } cards = { version = "0.1.0", path = "../cards" } common_enums = { version = "0.1.0", path = "../common_enums" } common_utils = { version = "0.1.0", path = "../common_utils", features = ["signals", "async_ext", "logs"] } @@ -118,6 +119,7 @@ router_env = { version = "0.1.0", path = "../router_env", features = ["log_extra scheduler = { version = "0.1.0", path = "../scheduler", default-features = false } storage_impl = { version = "0.1.0", path = "../storage_impl", default-features = false } erased-serde = "0.3.31" +rdkafka = "0.36.0" [build-dependencies] router_env = { version = "0.1.0", path = "../router_env", default-features = false } diff --git a/crates/router/src/analytics.rs b/crates/router/src/analytics.rs index d57403d92989..f31e908e0dc3 100644 --- a/crates/router/src/analytics.rs +++ b/crates/router/src/analytics.rs @@ -1,129 +1,560 @@ -mod core; -mod errors; -pub mod metrics; -mod payments; -mod query; -mod refunds; -pub mod routes; - -mod sqlx; -mod types; -mod utils; - -use api_models::analytics::{ - payments::{PaymentDimensions, PaymentFilters, PaymentMetrics, PaymentMetricsBucketIdentifier}, - refunds::{RefundDimensions, RefundFilters, RefundMetrics, RefundMetricsBucketIdentifier}, - Granularity, TimeRange, -}; -use router_env::{instrument, tracing}; - -use self::{ - payments::metrics::{PaymentMetric, PaymentMetricRow}, - refunds::metrics::{RefundMetric, RefundMetricRow}, - sqlx::SqlxClient, -}; -use crate::configs::settings::Database; - -#[derive(Clone, Debug)] -pub enum AnalyticsProvider { - Sqlx(SqlxClient), -} +pub use analytics::*; + +pub mod routes { + use actix_web::{web, Responder, Scope}; + use analytics::{ + api_event::api_events_core, errors::AnalyticsError, lambda_utils::invoke_lambda, + sdk_events::sdk_events_core, + }; + use api_models::analytics::{ + GenerateReportRequest, GetApiEventFiltersRequest, GetApiEventMetricRequest, + GetPaymentFiltersRequest, GetPaymentMetricRequest, GetRefundFilterRequest, + GetRefundMetricRequest, GetSdkEventFiltersRequest, GetSdkEventMetricRequest, ReportRequest, + }; + use error_stack::ResultExt; + use router_env::AnalyticsFlow; + + use crate::{ + core::api_locking, + db::user::UserInterface, + routes::AppState, + services::{ + api, + authentication::{self as auth, AuthToken, AuthenticationData}, + authorization::permissions::Permission, + ApplicationResponse, + }, + types::domain::UserEmail, + }; + + pub struct Analytics; + + impl Analytics { + pub fn server(state: AppState) -> Scope { + let mut route = web::scope("/analytics/v1").app_data(web::Data::new(state)); + { + route = route + .service( + web::resource("metrics/payments") + .route(web::post().to(get_payment_metrics)), + ) + .service( + web::resource("metrics/refunds").route(web::post().to(get_refunds_metrics)), + ) + .service( + web::resource("filters/payments") + .route(web::post().to(get_payment_filters)), + ) + .service( + web::resource("filters/refunds").route(web::post().to(get_refund_filters)), + ) + .service(web::resource("{domain}/info").route(web::get().to(get_info))) + .service( + web::resource("report/dispute") + .route(web::post().to(generate_dispute_report)), + ) + .service( + web::resource("report/refunds") + .route(web::post().to(generate_refund_report)), + ) + .service( + web::resource("report/payments") + .route(web::post().to(generate_payment_report)), + ) + .service( + web::resource("metrics/sdk_events") + .route(web::post().to(get_sdk_event_metrics)), + ) + .service( + web::resource("filters/sdk_events") + .route(web::post().to(get_sdk_event_filters)), + ) + .service(web::resource("api_event_logs").route(web::get().to(get_api_events))) + .service(web::resource("sdk_event_logs").route(web::post().to(get_sdk_events))) + .service( + web::resource("filters/api_events") + .route(web::post().to(get_api_event_filters)), + ) + .service( + web::resource("metrics/api_events") + .route(web::post().to(get_api_events_metrics)), + ) + } + route + } + } -impl Default for AnalyticsProvider { - fn default() -> Self { - Self::Sqlx(SqlxClient::default()) + pub async fn get_info( + state: web::Data, + req: actix_web::HttpRequest, + domain: actix_web::web::Path, + ) -> impl Responder { + let flow = AnalyticsFlow::GetInfo; + Box::pin(api::server_wrap( + flow, + state, + &req, + domain.into_inner(), + |_, _, domain| async { + analytics::core::get_domain_info(domain) + .await + .map(ApplicationResponse::Json) + }, + &auth::NoAuth, + api_locking::LockAction::NotApplicable, + )) + .await } -} -impl AnalyticsProvider { - #[instrument(skip_all)] + /// # Panics + /// + /// Panics if `json_payload` array does not contain one `GetPaymentMetricRequest` element. pub async fn get_payment_metrics( - &self, - metric: &PaymentMetrics, - dimensions: &[PaymentDimensions], - merchant_id: &str, - filters: &PaymentFilters, - granularity: &Option, - time_range: &TimeRange, - ) -> types::MetricsResult> { - // Metrics to get the fetch time for each payment metric - metrics::request::record_operation_time( - async { - match self { - Self::Sqlx(pool) => { - metric - .load_metrics( - dimensions, - merchant_id, - filters, - granularity, - time_range, - pool, - ) - .await - } - } + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json<[GetPaymentMetricRequest; 1]>, + ) -> impl Responder { + // safety: This shouldn't panic owing to the data type + #[allow(clippy::expect_used)] + let payload = json_payload + .into_inner() + .to_vec() + .pop() + .expect("Couldn't get GetPaymentMetricRequest"); + let flow = AnalyticsFlow::GetPaymentMetrics; + Box::pin(api::server_wrap( + flow, + state, + &req, + payload, + |state, auth: AuthenticationData, req| async move { + analytics::payments::get_metrics( + &state.pool, + &auth.merchant_account.merchant_id, + req, + ) + .await + .map(ApplicationResponse::Json) }, - &metrics::METRIC_FETCH_TIME, - metric, - self, - ) + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) .await } - pub async fn get_refund_metrics( - &self, - metric: &RefundMetrics, - dimensions: &[RefundDimensions], - merchant_id: &str, - filters: &RefundFilters, - granularity: &Option, - time_range: &TimeRange, - ) -> types::MetricsResult> { - match self { - Self::Sqlx(pool) => { - metric - .load_metrics( - dimensions, - merchant_id, - filters, - granularity, - time_range, - pool, - ) + /// # Panics + /// + /// Panics if `json_payload` array does not contain one `GetRefundMetricRequest` element. + pub async fn get_refunds_metrics( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json<[GetRefundMetricRequest; 1]>, + ) -> impl Responder { + #[allow(clippy::expect_used)] + // safety: This shouldn't panic owing to the data type + let payload = json_payload + .into_inner() + .to_vec() + .pop() + .expect("Couldn't get GetRefundMetricRequest"); + let flow = AnalyticsFlow::GetRefundsMetrics; + Box::pin(api::server_wrap( + flow, + state, + &req, + payload, + |state, auth: AuthenticationData, req| async move { + analytics::refunds::get_metrics( + &state.pool, + &auth.merchant_account.merchant_id, + req, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + /// # Panics + /// + /// Panics if `json_payload` array does not contain one `GetSdkEventMetricRequest` element. + pub async fn get_sdk_event_metrics( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json<[GetSdkEventMetricRequest; 1]>, + ) -> impl Responder { + // safety: This shouldn't panic owing to the data type + #[allow(clippy::expect_used)] + let payload = json_payload + .into_inner() + .to_vec() + .pop() + .expect("Couldn't get GetSdkEventMetricRequest"); + let flow = AnalyticsFlow::GetSdkMetrics; + Box::pin(api::server_wrap( + flow, + state, + &req, + payload, + |state, auth: AuthenticationData, req| async move { + analytics::sdk_events::get_metrics( + &state.pool, + auth.merchant_account.publishable_key.as_ref(), + req, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn get_payment_filters( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let flow = AnalyticsFlow::GetPaymentFilters; + Box::pin(api::server_wrap( + flow, + state, + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req| async move { + analytics::payments::get_filters( + &state.pool, + req, + &auth.merchant_account.merchant_id, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn get_refund_filters( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let flow = AnalyticsFlow::GetRefundFilters; + Box::pin(api::server_wrap( + flow, + state, + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req: GetRefundFilterRequest| async move { + analytics::refunds::get_filters( + &state.pool, + req, + &auth.merchant_account.merchant_id, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn get_sdk_event_filters( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let flow = AnalyticsFlow::GetSdkEventFilters; + Box::pin(api::server_wrap( + flow, + state, + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req| async move { + analytics::sdk_events::get_filters( + &state.pool, + req, + auth.merchant_account.publishable_key.as_ref(), + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn get_api_events( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Query, + ) -> impl Responder { + let flow = AnalyticsFlow::GetApiEvents; + Box::pin(api::server_wrap( + flow, + state, + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req| async move { + api_events_core(&state.pool, req, auth.merchant_account.merchant_id) .await - } - } + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await } - pub async fn from_conf( - config: &AnalyticsConfig, - #[cfg(feature = "kms")] kms_client: &external_services::kms::KmsClient, - ) -> Self { - match config { - AnalyticsConfig::Sqlx { sqlx } => Self::Sqlx( - SqlxClient::from_conf( - sqlx, - #[cfg(feature = "kms")] - kms_client, + pub async fn get_sdk_events( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let flow = AnalyticsFlow::GetSdkEvents; + Box::pin(api::server_wrap( + flow, + state, + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req| async move { + sdk_events_core( + &state.pool, + req, + auth.merchant_account.publishable_key.unwrap_or_default(), ) - .await, - ), - } + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await } -} -#[derive(Clone, Debug, serde::Deserialize)] -#[serde(tag = "source")] -#[serde(rename_all = "lowercase")] -pub enum AnalyticsConfig { - Sqlx { sqlx: Database }, -} + pub async fn generate_refund_report( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let state_ref = &state; + let req_headers = &req.headers(); -impl Default for AnalyticsConfig { - fn default() -> Self { - Self::Sqlx { - sqlx: Database::default(), - } + let flow = AnalyticsFlow::GenerateRefundReport; + Box::pin(api::server_wrap( + flow, + state.clone(), + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, payload| async move { + let jwt_payload = + auth::parse_jwt_payload::(req_headers, state_ref).await; + + let user_id = jwt_payload + .change_context(AnalyticsError::UnknownError)? + .user_id; + + let user = UserInterface::find_user_by_id(&*state.store, &user_id) + .await + .change_context(AnalyticsError::UnknownError)?; + + let user_email = UserEmail::from_pii_email(user.email) + .change_context(AnalyticsError::UnknownError)? + .get_secret(); + + let lambda_req = GenerateReportRequest { + request: payload, + merchant_id: auth.merchant_account.merchant_id.to_string(), + email: user_email, + }; + + let json_bytes = + serde_json::to_vec(&lambda_req).map_err(|_| AnalyticsError::UnknownError)?; + invoke_lambda( + &state.conf.report_download_config.refund_function, + &state.conf.report_download_config.region, + &json_bytes, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn generate_dispute_report( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let state_ref = &state; + let req_headers = &req.headers(); + + let flow = AnalyticsFlow::GenerateDisputeReport; + Box::pin(api::server_wrap( + flow, + state.clone(), + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, payload| async move { + let jwt_payload = + auth::parse_jwt_payload::(req_headers, state_ref).await; + + let user_id = jwt_payload + .change_context(AnalyticsError::UnknownError)? + .user_id; + + let user = UserInterface::find_user_by_id(&*state.store, &user_id) + .await + .change_context(AnalyticsError::UnknownError)?; + + let user_email = UserEmail::from_pii_email(user.email) + .change_context(AnalyticsError::UnknownError)? + .get_secret(); + + let lambda_req = GenerateReportRequest { + request: payload, + merchant_id: auth.merchant_account.merchant_id.to_string(), + email: user_email, + }; + + let json_bytes = + serde_json::to_vec(&lambda_req).map_err(|_| AnalyticsError::UnknownError)?; + invoke_lambda( + &state.conf.report_download_config.dispute_function, + &state.conf.report_download_config.region, + &json_bytes, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn generate_payment_report( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let state_ref = &state; + let req_headers = &req.headers(); + + let flow = AnalyticsFlow::GeneratePaymentReport; + Box::pin(api::server_wrap( + flow, + state.clone(), + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, payload| async move { + let jwt_payload = + auth::parse_jwt_payload::(req_headers, state_ref).await; + + let user_id = jwt_payload + .change_context(AnalyticsError::UnknownError)? + .user_id; + + let user = UserInterface::find_user_by_id(&*state.store, &user_id) + .await + .change_context(AnalyticsError::UnknownError)?; + + let user_email = UserEmail::from_pii_email(user.email) + .change_context(AnalyticsError::UnknownError)? + .get_secret(); + + let lambda_req = GenerateReportRequest { + request: payload, + merchant_id: auth.merchant_account.merchant_id.to_string(), + email: user_email, + }; + + let json_bytes = + serde_json::to_vec(&lambda_req).map_err(|_| AnalyticsError::UnknownError)?; + invoke_lambda( + &state.conf.report_download_config.payment_function, + &state.conf.report_download_config.region, + &json_bytes, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + /// # Panics + /// + /// Panics if `json_payload` array does not contain one `GetApiEventMetricRequest` element. + pub async fn get_api_events_metrics( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json<[GetApiEventMetricRequest; 1]>, + ) -> impl Responder { + // safety: This shouldn't panic owing to the data type + #[allow(clippy::expect_used)] + let payload = json_payload + .into_inner() + .to_vec() + .pop() + .expect("Couldn't get GetApiEventMetricRequest"); + let flow = AnalyticsFlow::GetApiEventMetrics; + Box::pin(api::server_wrap( + flow, + state.clone(), + &req, + payload, + |state, auth: AuthenticationData, req| async move { + analytics::api_event::get_api_event_metrics( + &state.pool, + &auth.merchant_account.merchant_id, + req, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await + } + + pub async fn get_api_event_filters( + state: web::Data, + req: actix_web::HttpRequest, + json_payload: web::Json, + ) -> impl Responder { + let flow = AnalyticsFlow::GetApiEventFilters; + Box::pin(api::server_wrap( + flow, + state.clone(), + &req, + json_payload.into_inner(), + |state, auth: AuthenticationData, req| async move { + analytics::api_event::get_filters( + &state.pool, + req, + auth.merchant_account.merchant_id, + ) + .await + .map(ApplicationResponse::Json) + }, + &auth::JWTAuth(Permission::Analytics), + api_locking::LockAction::NotApplicable, + )) + .await } } diff --git a/crates/router/src/analytics/core.rs b/crates/router/src/analytics/core.rs deleted file mode 100644 index bf124a6c0e85..000000000000 --- a/crates/router/src/analytics/core.rs +++ /dev/null @@ -1,96 +0,0 @@ -use api_models::analytics::{ - payments::PaymentDimensions, refunds::RefundDimensions, FilterValue, GetInfoResponse, - GetPaymentFiltersRequest, GetRefundFilterRequest, PaymentFiltersResponse, RefundFilterValue, - RefundFiltersResponse, -}; -use error_stack::ResultExt; - -use super::{ - errors::{self, AnalyticsError}, - payments::filters::{get_payment_filter_for_dimension, FilterRow}, - refunds::filters::{get_refund_filter_for_dimension, RefundFilterRow}, - types::AnalyticsDomain, - utils, AnalyticsProvider, -}; -use crate::{services::ApplicationResponse, types::domain}; - -pub type AnalyticsApiResponse = errors::AnalyticsResult>; - -pub async fn get_domain_info(domain: AnalyticsDomain) -> AnalyticsApiResponse { - let info = match domain { - AnalyticsDomain::Payments => GetInfoResponse { - metrics: utils::get_payment_metrics_info(), - download_dimensions: None, - dimensions: utils::get_payment_dimensions(), - }, - AnalyticsDomain::Refunds => GetInfoResponse { - metrics: utils::get_refund_metrics_info(), - download_dimensions: None, - dimensions: utils::get_refund_dimensions(), - }, - }; - Ok(ApplicationResponse::Json(info)) -} - -pub async fn payment_filters_core( - pool: AnalyticsProvider, - req: GetPaymentFiltersRequest, - merchant: domain::MerchantAccount, -) -> AnalyticsApiResponse { - let mut res = PaymentFiltersResponse::default(); - - for dim in req.group_by_names { - let values = match pool.clone() { - AnalyticsProvider::Sqlx(pool) => { - get_payment_filter_for_dimension(dim, &merchant.merchant_id, &req.time_range, &pool) - .await - } - } - .change_context(AnalyticsError::UnknownError)? - .into_iter() - .filter_map(|fil: FilterRow| match dim { - PaymentDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()), - PaymentDimensions::PaymentStatus => fil.status.map(|i| i.as_ref().to_string()), - PaymentDimensions::Connector => fil.connector, - PaymentDimensions::AuthType => fil.authentication_type.map(|i| i.as_ref().to_string()), - PaymentDimensions::PaymentMethod => fil.payment_method, - }) - .collect::>(); - res.query_data.push(FilterValue { - dimension: dim, - values, - }) - } - - Ok(ApplicationResponse::Json(res)) -} - -pub async fn refund_filter_core( - pool: AnalyticsProvider, - req: GetRefundFilterRequest, - merchant: domain::MerchantAccount, -) -> AnalyticsApiResponse { - let mut res = RefundFiltersResponse::default(); - for dim in req.group_by_names { - let values = match pool.clone() { - AnalyticsProvider::Sqlx(pool) => { - get_refund_filter_for_dimension(dim, &merchant.merchant_id, &req.time_range, &pool) - .await - } - } - .change_context(AnalyticsError::UnknownError)? - .into_iter() - .filter_map(|fil: RefundFilterRow| match dim { - RefundDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()), - RefundDimensions::RefundStatus => fil.refund_status.map(|i| i.as_ref().to_string()), - RefundDimensions::Connector => fil.connector, - RefundDimensions::RefundType => fil.refund_type.map(|i| i.as_ref().to_string()), - }) - .collect::>(); - res.query_data.push(RefundFilterValue { - dimension: dim, - values, - }) - } - Ok(ApplicationResponse::Json(res)) -} diff --git a/crates/router/src/analytics/payments.rs b/crates/router/src/analytics/payments.rs deleted file mode 100644 index 527bf75a3c72..000000000000 --- a/crates/router/src/analytics/payments.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub mod accumulator; -mod core; -pub mod filters; -pub mod metrics; -pub mod types; -pub use accumulator::{PaymentMetricAccumulator, PaymentMetricsAccumulator}; - -pub trait PaymentAnalytics: - metrics::PaymentMetricAnalytics + filters::PaymentFilterAnalytics -{ -} - -pub use self::core::get_metrics; diff --git a/crates/router/src/analytics/payments/core.rs b/crates/router/src/analytics/payments/core.rs deleted file mode 100644 index 23eca8879a70..000000000000 --- a/crates/router/src/analytics/payments/core.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::collections::HashMap; - -use api_models::analytics::{ - payments::{MetricsBucketResponse, PaymentMetrics, PaymentMetricsBucketIdentifier}, - AnalyticsMetadata, GetPaymentMetricRequest, MetricsResponse, -}; -use error_stack::{IntoReport, ResultExt}; -use router_env::{ - instrument, logger, - tracing::{self, Instrument}, -}; - -use super::PaymentMetricsAccumulator; -use crate::{ - analytics::{ - core::AnalyticsApiResponse, errors::AnalyticsError, metrics, - payments::PaymentMetricAccumulator, AnalyticsProvider, - }, - services::ApplicationResponse, - types::domain, -}; - -#[instrument(skip_all)] -pub async fn get_metrics( - pool: AnalyticsProvider, - merchant_account: domain::MerchantAccount, - req: GetPaymentMetricRequest, -) -> AnalyticsApiResponse> { - let mut metrics_accumulator: HashMap< - PaymentMetricsBucketIdentifier, - PaymentMetricsAccumulator, - > = HashMap::new(); - - let mut set = tokio::task::JoinSet::new(); - for metric_type in req.metrics.iter().cloned() { - let req = req.clone(); - let merchant_id = merchant_account.merchant_id.clone(); - let pool = pool.clone(); - let task_span = tracing::debug_span!( - "analytics_payments_query", - payment_metric = metric_type.as_ref() - ); - set.spawn( - async move { - let data = pool - .get_payment_metrics( - &metric_type, - &req.group_by_names.clone(), - &merchant_id, - &req.filters, - &req.time_series.map(|t| t.granularity), - &req.time_range, - ) - .await - .change_context(AnalyticsError::UnknownError); - (metric_type, data) - } - .instrument(task_span), - ); - } - - while let Some((metric, data)) = set - .join_next() - .await - .transpose() - .into_report() - .change_context(AnalyticsError::UnknownError)? - { - let data = data?; - let attributes = &[ - metrics::request::add_attributes("metric_type", metric.to_string()), - metrics::request::add_attributes( - "source", - match pool { - crate::analytics::AnalyticsProvider::Sqlx(_) => "Sqlx", - }, - ), - ]; - - let value = u64::try_from(data.len()); - if let Ok(val) = value { - metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes); - logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); - } - - for (id, value) in data { - logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}"); - let metrics_builder = metrics_accumulator.entry(id).or_default(); - match metric { - PaymentMetrics::PaymentSuccessRate => metrics_builder - .payment_success_rate - .add_metrics_bucket(&value), - PaymentMetrics::PaymentCount => { - metrics_builder.payment_count.add_metrics_bucket(&value) - } - PaymentMetrics::PaymentSuccessCount => { - metrics_builder.payment_success.add_metrics_bucket(&value) - } - PaymentMetrics::PaymentProcessedAmount => { - metrics_builder.processed_amount.add_metrics_bucket(&value) - } - PaymentMetrics::AvgTicketSize => { - metrics_builder.avg_ticket_size.add_metrics_bucket(&value) - } - } - } - - logger::debug!( - "Analytics Accumulated Results: metric: {}, results: {:#?}", - metric, - metrics_accumulator - ); - } - - let query_data: Vec = metrics_accumulator - .into_iter() - .map(|(id, val)| MetricsBucketResponse { - values: val.collect(), - dimensions: id, - }) - .collect(); - - Ok(ApplicationResponse::Json(MetricsResponse { - query_data, - meta_data: [AnalyticsMetadata { - current_time_range: req.time_range, - }], - })) -} diff --git a/crates/router/src/analytics/refunds/core.rs b/crates/router/src/analytics/refunds/core.rs deleted file mode 100644 index 4c2d2c394181..000000000000 --- a/crates/router/src/analytics/refunds/core.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::HashMap; - -use api_models::analytics::{ - refunds::{RefundMetrics, RefundMetricsBucketIdentifier, RefundMetricsBucketResponse}, - AnalyticsMetadata, GetRefundMetricRequest, MetricsResponse, -}; -use error_stack::{IntoReport, ResultExt}; -use router_env::{ - logger, - tracing::{self, Instrument}, -}; - -use super::RefundMetricsAccumulator; -use crate::{ - analytics::{ - core::AnalyticsApiResponse, errors::AnalyticsError, refunds::RefundMetricAccumulator, - AnalyticsProvider, - }, - services::ApplicationResponse, - types::domain, -}; - -pub async fn get_metrics( - pool: AnalyticsProvider, - merchant_account: domain::MerchantAccount, - req: GetRefundMetricRequest, -) -> AnalyticsApiResponse> { - let mut metrics_accumulator: HashMap = - HashMap::new(); - let mut set = tokio::task::JoinSet::new(); - for metric_type in req.metrics.iter().cloned() { - let req = req.clone(); - let merchant_id = merchant_account.merchant_id.clone(); - let pool = pool.clone(); - let task_span = tracing::debug_span!( - "analytics_refund_query", - refund_metric = metric_type.as_ref() - ); - set.spawn( - async move { - let data = pool - .get_refund_metrics( - &metric_type, - &req.group_by_names.clone(), - &merchant_id, - &req.filters, - &req.time_series.map(|t| t.granularity), - &req.time_range, - ) - .await - .change_context(AnalyticsError::UnknownError); - (metric_type, data) - } - .instrument(task_span), - ); - } - - while let Some((metric, data)) = set - .join_next() - .await - .transpose() - .into_report() - .change_context(AnalyticsError::UnknownError)? - { - for (id, value) in data? { - logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}"); - let metrics_builder = metrics_accumulator.entry(id).or_default(); - match metric { - RefundMetrics::RefundSuccessRate => metrics_builder - .refund_success_rate - .add_metrics_bucket(&value), - RefundMetrics::RefundCount => { - metrics_builder.refund_count.add_metrics_bucket(&value) - } - RefundMetrics::RefundSuccessCount => { - metrics_builder.refund_success.add_metrics_bucket(&value) - } - RefundMetrics::RefundProcessedAmount => { - metrics_builder.processed_amount.add_metrics_bucket(&value) - } - } - } - - logger::debug!( - "Analytics Accumulated Results: metric: {}, results: {:#?}", - metric, - metrics_accumulator - ); - } - let query_data: Vec = metrics_accumulator - .into_iter() - .map(|(id, val)| RefundMetricsBucketResponse { - values: val.collect(), - dimensions: id, - }) - .collect(); - - Ok(ApplicationResponse::Json(MetricsResponse { - query_data, - meta_data: [AnalyticsMetadata { - current_time_range: req.time_range, - }], - })) -} diff --git a/crates/router/src/analytics/routes.rs b/crates/router/src/analytics/routes.rs deleted file mode 100644 index 113312cdf10f..000000000000 --- a/crates/router/src/analytics/routes.rs +++ /dev/null @@ -1,164 +0,0 @@ -use actix_web::{web, Responder, Scope}; -use api_models::analytics::{ - GetPaymentFiltersRequest, GetPaymentMetricRequest, GetRefundFilterRequest, - GetRefundMetricRequest, -}; -use router_env::AnalyticsFlow; - -use super::{core::*, payments, refunds, types::AnalyticsDomain}; -use crate::{ - core::api_locking, - services::{ - api, authentication as auth, authentication::AuthenticationData, - authorization::permissions::Permission, - }, - AppState, -}; - -pub struct Analytics; - -impl Analytics { - pub fn server(state: AppState) -> Scope { - let route = web::scope("/analytics/v1").app_data(web::Data::new(state)); - route - .service(web::resource("metrics/payments").route(web::post().to(get_payment_metrics))) - .service(web::resource("metrics/refunds").route(web::post().to(get_refunds_metrics))) - .service(web::resource("filters/payments").route(web::post().to(get_payment_filters))) - .service(web::resource("filters/refunds").route(web::post().to(get_refund_filters))) - .service(web::resource("{domain}/info").route(web::get().to(get_info))) - } -} - -pub async fn get_info( - state: web::Data, - req: actix_web::HttpRequest, - domain: actix_web::web::Path, -) -> impl Responder { - let flow = AnalyticsFlow::GetInfo; - api::server_wrap( - flow, - state, - &req, - domain.into_inner(), - |_, _, domain| get_domain_info(domain), - &auth::NoAuth, - api_locking::LockAction::NotApplicable, - ) - .await -} - -/// # Panics -/// -/// Panics if `json_payload` array does not contain one `GetPaymentMetricRequest` element. -pub async fn get_payment_metrics( - state: web::Data, - req: actix_web::HttpRequest, - json_payload: web::Json<[GetPaymentMetricRequest; 1]>, -) -> impl Responder { - // safety: This shouldn't panic owing to the data type - #[allow(clippy::expect_used)] - let payload = json_payload - .into_inner() - .to_vec() - .pop() - .expect("Couldn't get GetPaymentMetricRequest"); - let flow = AnalyticsFlow::GetPaymentMetrics; - api::server_wrap( - flow, - state, - &req, - payload, - |state, auth: AuthenticationData, req| { - payments::get_metrics(state.pool.clone(), auth.merchant_account, req) - }, - auth::auth_type( - &auth::ApiKeyAuth, - &auth::JWTAuth(Permission::Analytics), - req.headers(), - ), - api_locking::LockAction::NotApplicable, - ) - .await -} - -/// # Panics -/// -/// Panics if `json_payload` array does not contain one `GetRefundMetricRequest` element. -pub async fn get_refunds_metrics( - state: web::Data, - req: actix_web::HttpRequest, - json_payload: web::Json<[GetRefundMetricRequest; 1]>, -) -> impl Responder { - #[allow(clippy::expect_used)] - // safety: This shouldn't panic owing to the data type - let payload = json_payload - .into_inner() - .to_vec() - .pop() - .expect("Couldn't get GetRefundMetricRequest"); - let flow = AnalyticsFlow::GetRefundsMetrics; - api::server_wrap( - flow, - state, - &req, - payload, - |state, auth: AuthenticationData, req| { - refunds::get_metrics(state.pool.clone(), auth.merchant_account, req) - }, - auth::auth_type( - &auth::ApiKeyAuth, - &auth::JWTAuth(Permission::Analytics), - req.headers(), - ), - api_locking::LockAction::NotApplicable, - ) - .await -} - -pub async fn get_payment_filters( - state: web::Data, - req: actix_web::HttpRequest, - json_payload: web::Json, -) -> impl Responder { - let flow = AnalyticsFlow::GetPaymentFilters; - api::server_wrap( - flow, - state, - &req, - json_payload.into_inner(), - |state, auth: AuthenticationData, req| { - payment_filters_core(state.pool.clone(), req, auth.merchant_account) - }, - auth::auth_type( - &auth::ApiKeyAuth, - &auth::JWTAuth(Permission::Analytics), - req.headers(), - ), - api_locking::LockAction::NotApplicable, - ) - .await -} - -pub async fn get_refund_filters( - state: web::Data, - req: actix_web::HttpRequest, - json_payload: web::Json, -) -> impl Responder { - let flow = AnalyticsFlow::GetRefundFilters; - api::server_wrap( - flow, - state, - &req, - json_payload.into_inner(), - |state, auth: AuthenticationData, req: GetRefundFilterRequest| { - refund_filter_core(state.pool.clone(), req, auth.merchant_account) - }, - auth::auth_type( - &auth::ApiKeyAuth, - &auth::JWTAuth(Permission::Analytics), - req.headers(), - ), - api_locking::LockAction::NotApplicable, - ) - .await -} diff --git a/crates/router/src/bin/scheduler.rs b/crates/router/src/bin/scheduler.rs index 4c19408582bc..32e9cfc6ca29 100644 --- a/crates/router/src/bin/scheduler.rs +++ b/crates/router/src/bin/scheduler.rs @@ -20,7 +20,6 @@ use strum::EnumString; use tokio::sync::{mpsc, oneshot}; const SCHEDULER_FLOW: &str = "SCHEDULER_FLOW"; - #[tokio::main] async fn main() -> CustomResult<(), ProcessTrackerError> { // console_subscriber::init(); @@ -30,7 +29,6 @@ async fn main() -> CustomResult<(), ProcessTrackerError> { #[allow(clippy::expect_used)] let conf = Settings::with_config_path(cmd_line.config_path) .expect("Unable to construct application configuration"); - let api_client = Box::new( services::ProxyClient::new( conf.proxy.clone(), diff --git a/crates/router/src/configs/kms.rs b/crates/router/src/configs/kms.rs index c2f159d16cf1..37f2d15774a5 100644 --- a/crates/router/src/configs/kms.rs +++ b/crates/router/src/configs/kms.rs @@ -63,7 +63,7 @@ impl KmsDecrypt for settings::Database { password: self.password.decrypt_inner(kms_client).await?.into(), pool_size: self.pool_size, connection_timeout: self.connection_timeout, - queue_strategy: self.queue_strategy.into(), + queue_strategy: self.queue_strategy, min_idle: self.min_idle, max_lifetime: self.max_lifetime, }) diff --git a/crates/router/src/configs/settings.rs b/crates/router/src/configs/settings.rs index 918ae6647eef..f2d962b0abee 100644 --- a/crates/router/src/configs/settings.rs +++ b/crates/router/src/configs/settings.rs @@ -4,6 +4,8 @@ use std::{ str::FromStr, }; +#[cfg(feature = "olap")] +use analytics::ReportConfig; use api_models::{enums, payment_methods::RequiredFieldInfo}; use common_utils::ext_traits::ConfigExt; use config::{Environment, File}; @@ -16,12 +18,14 @@ pub use router_env::config::{Log, LogConsole, LogFile, LogTelemetry}; use rust_decimal::Decimal; use scheduler::SchedulerSettings; use serde::{de::Error, Deserialize, Deserializer}; +use storage_impl::config::QueueStrategy; #[cfg(feature = "olap")] use crate::analytics::AnalyticsConfig; use crate::{ core::errors::{ApplicationError, ApplicationResult}, env::{self, logger, Env}, + events::EventsConfig, }; #[cfg(feature = "kms")] pub type Password = kms::KmsValue; @@ -109,6 +113,9 @@ pub struct Settings { pub analytics: AnalyticsConfig, #[cfg(feature = "kv_store")] pub kv_config: KvConfig, + #[cfg(feature = "olap")] + pub report_download_config: ReportConfig, + pub events: EventsConfig, } #[derive(Debug, Deserialize, Clone)] @@ -521,23 +528,6 @@ pub struct Database { pub max_lifetime: Option, } -#[derive(Debug, Deserialize, Clone, Default)] -#[serde(rename_all = "PascalCase")] -pub enum QueueStrategy { - #[default] - Fifo, - Lifo, -} - -impl From for bb8::QueueStrategy { - fn from(value: QueueStrategy) -> Self { - match value { - QueueStrategy::Fifo => Self::Fifo, - QueueStrategy::Lifo => Self::Lifo, - } - } -} - #[cfg(not(feature = "kms"))] impl From for storage_impl::config::Database { fn from(val: Database) -> Self { @@ -837,6 +827,7 @@ impl Settings { #[cfg(feature = "s3")] self.file_upload_config.validate()?; self.lock_settings.validate()?; + self.events.validate()?; Ok(()) } } diff --git a/crates/router/src/core/refunds.rs b/crates/router/src/core/refunds.rs index 2d572cee9513..33435bb0ad96 100644 --- a/crates/router/src/core/refunds.rs +++ b/crates/router/src/core/refunds.rs @@ -927,7 +927,9 @@ pub async fn start_refund_workflow( ) -> Result<(), errors::ProcessTrackerError> { match refund_tracker.name.as_deref() { Some("EXECUTE_REFUND") => trigger_refund_execute_workflow(state, refund_tracker).await, - Some("SYNC_REFUND") => sync_refund_with_gateway_workflow(state, refund_tracker).await, + Some("SYNC_REFUND") => { + Box::pin(sync_refund_with_gateway_workflow(state, refund_tracker)).await + } _ => Err(errors::ProcessTrackerError::JobNotFound), } } diff --git a/crates/router/src/core/webhooks.rs b/crates/router/src/core/webhooks.rs index 67154ae33aef..be8d118a47c2 100644 --- a/crates/router/src/core/webhooks.rs +++ b/crates/router/src/core/webhooks.rs @@ -905,6 +905,7 @@ pub async fn webhooks_wrapper { diff --git a/crates/router/src/db.rs b/crates/router/src/db.rs index 9687f7f97c92..549bda78eda8 100644 --- a/crates/router/src/db.rs +++ b/crates/router/src/db.rs @@ -12,6 +12,7 @@ pub mod events; pub mod file; pub mod fraud_check; pub mod gsm; +mod kafka_store; pub mod locker_mock_up; pub mod mandate; pub mod merchant_account; @@ -31,11 +32,24 @@ pub mod user_role; use data_models::payments::{ payment_attempt::PaymentAttemptInterface, payment_intent::PaymentIntentInterface, }; +use diesel_models::{ + fraud_check::{FraudCheck, FraudCheckNew, FraudCheckUpdate}, + organization::{Organization, OrganizationNew, OrganizationUpdate}, +}; +use error_stack::ResultExt; use masking::PeekInterface; use redis_interface::errors::RedisError; -use storage_impl::{redis::kv_store::RedisConnInterface, MockDb}; - -use crate::{errors::CustomResult, services::Store}; +use storage_impl::{errors::StorageError, redis::kv_store::RedisConnInterface, MockDb}; + +pub use self::kafka_store::KafkaStore; +use self::{fraud_check::FraudCheckInterface, organization::OrganizationInterface}; +pub use crate::{ + errors::CustomResult, + services::{ + kafka::{KafkaError, KafkaProducer, MQResult}, + Store, + }, +}; #[derive(PartialEq, Eq)] pub enum StorageImpl { @@ -58,7 +72,7 @@ pub trait StorageInterface: + ephemeral_key::EphemeralKeyInterface + events::EventInterface + file::FileMetadataInterface - + fraud_check::FraudCheckInterface + + FraudCheckInterface + locker_mock_up::LockerMockUpInterface + mandate::MandateInterface + merchant_account::MerchantAccountInterface @@ -79,7 +93,7 @@ pub trait StorageInterface: + RedisConnInterface + RequestIdStore + business_profile::BusinessProfileInterface - + organization::OrganizationInterface + + OrganizationInterface + routing_algorithm::RoutingAlgorithmInterface + gsm::GsmInterface + user::UserInterface @@ -151,7 +165,6 @@ where T: serde::de::DeserializeOwned, { use common_utils::ext_traits::ByteSliceExt; - use error_stack::ResultExt; let bytes = db.get_key(key).await?; bytes @@ -160,3 +173,72 @@ where } dyn_clone::clone_trait_object!(StorageInterface); + +impl RequestIdStore for KafkaStore { + fn add_request_id(&mut self, request_id: String) { + self.diesel_store.add_request_id(request_id) + } +} + +#[async_trait::async_trait] +impl FraudCheckInterface for KafkaStore { + async fn insert_fraud_check_response( + &self, + new: FraudCheckNew, + ) -> CustomResult { + self.diesel_store.insert_fraud_check_response(new).await + } + async fn update_fraud_check_response_with_attempt_id( + &self, + fraud_check: FraudCheck, + fraud_check_update: FraudCheckUpdate, + ) -> CustomResult { + self.diesel_store + .update_fraud_check_response_with_attempt_id(fraud_check, fraud_check_update) + .await + } + async fn find_fraud_check_by_payment_id( + &self, + payment_id: String, + merchant_id: String, + ) -> CustomResult { + self.diesel_store + .find_fraud_check_by_payment_id(payment_id, merchant_id) + .await + } + async fn find_fraud_check_by_payment_id_if_present( + &self, + payment_id: String, + merchant_id: String, + ) -> CustomResult, StorageError> { + self.diesel_store + .find_fraud_check_by_payment_id_if_present(payment_id, merchant_id) + .await + } +} + +#[async_trait::async_trait] +impl OrganizationInterface for KafkaStore { + async fn insert_organization( + &self, + organization: OrganizationNew, + ) -> CustomResult { + self.diesel_store.insert_organization(organization).await + } + async fn find_organization_by_org_id( + &self, + org_id: &str, + ) -> CustomResult { + self.diesel_store.find_organization_by_org_id(org_id).await + } + + async fn update_organization_by_org_id( + &self, + org_id: &str, + update: OrganizationUpdate, + ) -> CustomResult { + self.diesel_store + .update_organization_by_org_id(org_id, update) + .await + } +} diff --git a/crates/router/src/db/kafka_store.rs b/crates/router/src/db/kafka_store.rs new file mode 100644 index 000000000000..9cf1a7b80b8b --- /dev/null +++ b/crates/router/src/db/kafka_store.rs @@ -0,0 +1,1917 @@ +use std::sync::Arc; + +use common_enums::enums::MerchantStorageScheme; +use common_utils::errors::CustomResult; +use data_models::payments::{ + payment_attempt::PaymentAttemptInterface, payment_intent::PaymentIntentInterface, +}; +use diesel_models::{ + enums::ProcessTrackerStatus, + ephemeral_key::{EphemeralKey, EphemeralKeyNew}, + reverse_lookup::{ReverseLookup, ReverseLookupNew}, + user_role as user_storage, +}; +use masking::Secret; +use redis_interface::{errors::RedisError, RedisConnectionPool, RedisEntryId}; +use router_env::logger; +use scheduler::{ + db::{process_tracker::ProcessTrackerInterface, queue::QueueInterface}, + SchedulerInterface, +}; +use storage_impl::redis::kv_store::RedisConnInterface; +use time::PrimitiveDateTime; + +use super::{user::UserInterface, user_role::UserRoleInterface}; +use crate::{ + core::errors::{self, ProcessTrackerError}, + db::{ + address::AddressInterface, + api_keys::ApiKeyInterface, + business_profile::BusinessProfileInterface, + capture::CaptureInterface, + cards_info::CardsInfoInterface, + configs::ConfigInterface, + customers::CustomerInterface, + dispute::DisputeInterface, + ephemeral_key::EphemeralKeyInterface, + events::EventInterface, + file::FileMetadataInterface, + gsm::GsmInterface, + locker_mock_up::LockerMockUpInterface, + mandate::MandateInterface, + merchant_account::MerchantAccountInterface, + merchant_connector_account::{ConnectorAccessToken, MerchantConnectorAccountInterface}, + merchant_key_store::MerchantKeyStoreInterface, + payment_link::PaymentLinkInterface, + payment_method::PaymentMethodInterface, + payout_attempt::PayoutAttemptInterface, + payouts::PayoutsInterface, + refund::RefundInterface, + reverse_lookup::ReverseLookupInterface, + routing_algorithm::RoutingAlgorithmInterface, + MasterKeyInterface, StorageInterface, + }, + services::{authentication, kafka::KafkaProducer, Store}, + types::{ + domain, + storage::{self, business_profile}, + AccessToken, + }, +}; + +#[derive(Clone)] +pub struct KafkaStore { + kafka_producer: KafkaProducer, + pub diesel_store: Store, +} + +impl KafkaStore { + pub async fn new(store: Store, kafka_producer: KafkaProducer) -> Self { + Self { + kafka_producer, + diesel_store: store, + } + } +} + +#[async_trait::async_trait] +impl AddressInterface for KafkaStore { + async fn find_address_by_address_id( + &self, + address_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_address_by_address_id(address_id, key_store) + .await + } + + async fn update_address( + &self, + address_id: String, + address: storage::AddressUpdate, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .update_address(address_id, address, key_store) + .await + } + + async fn update_address_for_payments( + &self, + this: domain::Address, + address: domain::AddressUpdate, + payment_id: String, + key_store: &domain::MerchantKeyStore, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .update_address_for_payments(this, address, payment_id, key_store, storage_scheme) + .await + } + + async fn insert_address_for_payments( + &self, + payment_id: &str, + address: domain::Address, + key_store: &domain::MerchantKeyStore, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .insert_address_for_payments(payment_id, address, key_store, storage_scheme) + .await + } + + async fn find_address_by_merchant_id_payment_id_address_id( + &self, + merchant_id: &str, + payment_id: &str, + address_id: &str, + key_store: &domain::MerchantKeyStore, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_address_by_merchant_id_payment_id_address_id( + merchant_id, + payment_id, + address_id, + key_store, + storage_scheme, + ) + .await + } + + async fn insert_address_for_customers( + &self, + address: domain::Address, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .insert_address_for_customers(address, key_store) + .await + } + + async fn update_address_by_merchant_id_customer_id( + &self, + customer_id: &str, + merchant_id: &str, + address: storage::AddressUpdate, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .update_address_by_merchant_id_customer_id(customer_id, merchant_id, address, key_store) + .await + } +} + +#[async_trait::async_trait] +impl ApiKeyInterface for KafkaStore { + async fn insert_api_key( + &self, + api_key: storage::ApiKeyNew, + ) -> CustomResult { + self.diesel_store.insert_api_key(api_key).await + } + + async fn update_api_key( + &self, + merchant_id: String, + key_id: String, + api_key: storage::ApiKeyUpdate, + ) -> CustomResult { + self.diesel_store + .update_api_key(merchant_id, key_id, api_key) + .await + } + + async fn revoke_api_key( + &self, + merchant_id: &str, + key_id: &str, + ) -> CustomResult { + self.diesel_store.revoke_api_key(merchant_id, key_id).await + } + + async fn find_api_key_by_merchant_id_key_id_optional( + &self, + merchant_id: &str, + key_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_api_key_by_merchant_id_key_id_optional(merchant_id, key_id) + .await + } + + async fn find_api_key_by_hash_optional( + &self, + hashed_api_key: storage::HashedApiKey, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_api_key_by_hash_optional(hashed_api_key) + .await + } + + async fn list_api_keys_by_merchant_id( + &self, + merchant_id: &str, + limit: Option, + offset: Option, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_api_keys_by_merchant_id(merchant_id, limit, offset) + .await + } +} + +#[async_trait::async_trait] +impl CardsInfoInterface for KafkaStore { + async fn get_card_info( + &self, + card_iin: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store.get_card_info(card_iin).await + } +} + +#[async_trait::async_trait] +impl ConfigInterface for KafkaStore { + async fn insert_config( + &self, + config: storage::ConfigNew, + ) -> CustomResult { + self.diesel_store.insert_config(config).await + } + + async fn find_config_by_key( + &self, + key: &str, + ) -> CustomResult { + self.diesel_store.find_config_by_key(key).await + } + + async fn find_config_by_key_from_db( + &self, + key: &str, + ) -> CustomResult { + self.diesel_store.find_config_by_key_from_db(key).await + } + + async fn update_config_in_database( + &self, + key: &str, + config_update: storage::ConfigUpdate, + ) -> CustomResult { + self.diesel_store + .update_config_in_database(key, config_update) + .await + } + + async fn update_config_by_key( + &self, + key: &str, + config_update: storage::ConfigUpdate, + ) -> CustomResult { + self.diesel_store + .update_config_by_key(key, config_update) + .await + } + + async fn delete_config_by_key(&self, key: &str) -> CustomResult { + self.diesel_store.delete_config_by_key(key).await + } + + async fn find_config_by_key_unwrap_or( + &self, + key: &str, + default_config: Option, + ) -> CustomResult { + self.diesel_store + .find_config_by_key_unwrap_or(key, default_config) + .await + } +} + +#[async_trait::async_trait] +impl CustomerInterface for KafkaStore { + async fn delete_customer_by_customer_id_merchant_id( + &self, + customer_id: &str, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_customer_by_customer_id_merchant_id(customer_id, merchant_id) + .await + } + + async fn find_customer_optional_by_customer_id_merchant_id( + &self, + customer_id: &str, + merchant_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_customer_optional_by_customer_id_merchant_id(customer_id, merchant_id, key_store) + .await + } + + async fn update_customer_by_customer_id_merchant_id( + &self, + customer_id: String, + merchant_id: String, + customer: storage::CustomerUpdate, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .update_customer_by_customer_id_merchant_id( + customer_id, + merchant_id, + customer, + key_store, + ) + .await + } + + async fn list_customers_by_merchant_id( + &self, + merchant_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_customers_by_merchant_id(merchant_id, key_store) + .await + } + + async fn find_customer_by_customer_id_merchant_id( + &self, + customer_id: &str, + merchant_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_customer_by_customer_id_merchant_id(customer_id, merchant_id, key_store) + .await + } + + async fn insert_customer( + &self, + customer_data: domain::Customer, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .insert_customer(customer_data, key_store) + .await + } +} + +#[async_trait::async_trait] +impl DisputeInterface for KafkaStore { + async fn insert_dispute( + &self, + dispute: storage::DisputeNew, + ) -> CustomResult { + self.diesel_store.insert_dispute(dispute).await + } + + async fn find_by_merchant_id_payment_id_connector_dispute_id( + &self, + merchant_id: &str, + payment_id: &str, + connector_dispute_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_by_merchant_id_payment_id_connector_dispute_id( + merchant_id, + payment_id, + connector_dispute_id, + ) + .await + } + + async fn find_dispute_by_merchant_id_dispute_id( + &self, + merchant_id: &str, + dispute_id: &str, + ) -> CustomResult { + self.diesel_store + .find_dispute_by_merchant_id_dispute_id(merchant_id, dispute_id) + .await + } + + async fn find_disputes_by_merchant_id( + &self, + merchant_id: &str, + dispute_constraints: api_models::disputes::DisputeListConstraints, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_disputes_by_merchant_id(merchant_id, dispute_constraints) + .await + } + + async fn update_dispute( + &self, + this: storage::Dispute, + dispute: storage::DisputeUpdate, + ) -> CustomResult { + self.diesel_store.update_dispute(this, dispute).await + } + + async fn find_disputes_by_merchant_id_payment_id( + &self, + merchant_id: &str, + payment_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_disputes_by_merchant_id_payment_id(merchant_id, payment_id) + .await + } +} + +#[async_trait::async_trait] +impl EphemeralKeyInterface for KafkaStore { + async fn create_ephemeral_key( + &self, + ek: EphemeralKeyNew, + validity: i64, + ) -> CustomResult { + self.diesel_store.create_ephemeral_key(ek, validity).await + } + async fn get_ephemeral_key( + &self, + key: &str, + ) -> CustomResult { + self.diesel_store.get_ephemeral_key(key).await + } + async fn delete_ephemeral_key( + &self, + id: &str, + ) -> CustomResult { + self.diesel_store.delete_ephemeral_key(id).await + } +} + +#[async_trait::async_trait] +impl EventInterface for KafkaStore { + async fn insert_event( + &self, + event: storage::EventNew, + ) -> CustomResult { + self.diesel_store.insert_event(event).await + } + + async fn update_event( + &self, + event_id: String, + event: storage::EventUpdate, + ) -> CustomResult { + self.diesel_store.update_event(event_id, event).await + } +} + +#[async_trait::async_trait] +impl LockerMockUpInterface for KafkaStore { + async fn find_locker_by_card_id( + &self, + card_id: &str, + ) -> CustomResult { + self.diesel_store.find_locker_by_card_id(card_id).await + } + + async fn insert_locker_mock_up( + &self, + new: storage::LockerMockUpNew, + ) -> CustomResult { + self.diesel_store.insert_locker_mock_up(new).await + } + + async fn delete_locker_mock_up( + &self, + card_id: &str, + ) -> CustomResult { + self.diesel_store.delete_locker_mock_up(card_id).await + } +} + +#[async_trait::async_trait] +impl MandateInterface for KafkaStore { + async fn find_mandate_by_merchant_id_mandate_id( + &self, + merchant_id: &str, + mandate_id: &str, + ) -> CustomResult { + self.diesel_store + .find_mandate_by_merchant_id_mandate_id(merchant_id, mandate_id) + .await + } + + async fn find_mandate_by_merchant_id_connector_mandate_id( + &self, + merchant_id: &str, + connector_mandate_id: &str, + ) -> CustomResult { + self.diesel_store + .find_mandate_by_merchant_id_connector_mandate_id(merchant_id, connector_mandate_id) + .await + } + + async fn find_mandate_by_merchant_id_customer_id( + &self, + merchant_id: &str, + customer_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_mandate_by_merchant_id_customer_id(merchant_id, customer_id) + .await + } + + async fn update_mandate_by_merchant_id_mandate_id( + &self, + merchant_id: &str, + mandate_id: &str, + mandate: storage::MandateUpdate, + ) -> CustomResult { + self.diesel_store + .update_mandate_by_merchant_id_mandate_id(merchant_id, mandate_id, mandate) + .await + } + + async fn find_mandates_by_merchant_id( + &self, + merchant_id: &str, + mandate_constraints: api_models::mandates::MandateListConstraints, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_mandates_by_merchant_id(merchant_id, mandate_constraints) + .await + } + + async fn insert_mandate( + &self, + mandate: storage::MandateNew, + ) -> CustomResult { + self.diesel_store.insert_mandate(mandate).await + } +} + +#[async_trait::async_trait] +impl PaymentLinkInterface for KafkaStore { + async fn find_payment_link_by_payment_link_id( + &self, + payment_link_id: &str, + ) -> CustomResult { + self.diesel_store + .find_payment_link_by_payment_link_id(payment_link_id) + .await + } + + async fn insert_payment_link( + &self, + payment_link_object: storage::PaymentLinkNew, + ) -> CustomResult { + self.diesel_store + .insert_payment_link(payment_link_object) + .await + } + + async fn list_payment_link_by_merchant_id( + &self, + merchant_id: &str, + payment_link_constraints: api_models::payments::PaymentLinkListConstraints, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_payment_link_by_merchant_id(merchant_id, payment_link_constraints) + .await + } +} + +#[async_trait::async_trait] +impl MerchantAccountInterface for KafkaStore { + async fn insert_merchant( + &self, + merchant_account: domain::MerchantAccount, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .insert_merchant(merchant_account, key_store) + .await + } + + async fn find_merchant_account_by_merchant_id( + &self, + merchant_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_merchant_account_by_merchant_id(merchant_id, key_store) + .await + } + + async fn update_merchant( + &self, + this: domain::MerchantAccount, + merchant_account: storage::MerchantAccountUpdate, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .update_merchant(this, merchant_account, key_store) + .await + } + + async fn update_specific_fields_in_merchant( + &self, + merchant_id: &str, + merchant_account: storage::MerchantAccountUpdate, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .update_specific_fields_in_merchant(merchant_id, merchant_account, key_store) + .await + } + + async fn find_merchant_account_by_publishable_key( + &self, + publishable_key: &str, + ) -> CustomResult { + self.diesel_store + .find_merchant_account_by_publishable_key(publishable_key) + .await + } + + #[cfg(feature = "olap")] + async fn list_merchant_accounts_by_organization_id( + &self, + organization_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_merchant_accounts_by_organization_id(organization_id) + .await + } + + async fn delete_merchant_account_by_merchant_id( + &self, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_merchant_account_by_merchant_id(merchant_id) + .await + } +} + +#[async_trait::async_trait] +impl ConnectorAccessToken for KafkaStore { + async fn get_access_token( + &self, + merchant_id: &str, + connector_name: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .get_access_token(merchant_id, connector_name) + .await + } + + async fn set_access_token( + &self, + merchant_id: &str, + connector_name: &str, + access_token: AccessToken, + ) -> CustomResult<(), errors::StorageError> { + self.diesel_store + .set_access_token(merchant_id, connector_name, access_token) + .await + } +} + +#[async_trait::async_trait] +impl FileMetadataInterface for KafkaStore { + async fn insert_file_metadata( + &self, + file: storage::FileMetadataNew, + ) -> CustomResult { + self.diesel_store.insert_file_metadata(file).await + } + + async fn find_file_metadata_by_merchant_id_file_id( + &self, + merchant_id: &str, + file_id: &str, + ) -> CustomResult { + self.diesel_store + .find_file_metadata_by_merchant_id_file_id(merchant_id, file_id) + .await + } + + async fn delete_file_metadata_by_merchant_id_file_id( + &self, + merchant_id: &str, + file_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_file_metadata_by_merchant_id_file_id(merchant_id, file_id) + .await + } + + async fn update_file_metadata( + &self, + this: storage::FileMetadata, + file_metadata: storage::FileMetadataUpdate, + ) -> CustomResult { + self.diesel_store + .update_file_metadata(this, file_metadata) + .await + } +} + +#[async_trait::async_trait] +impl MerchantConnectorAccountInterface for KafkaStore { + async fn find_merchant_connector_account_by_merchant_id_connector_label( + &self, + merchant_id: &str, + connector: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_merchant_connector_account_by_merchant_id_connector_label( + merchant_id, + connector, + key_store, + ) + .await + } + + async fn find_merchant_connector_account_by_merchant_id_connector_name( + &self, + merchant_id: &str, + connector_name: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_merchant_connector_account_by_merchant_id_connector_name( + merchant_id, + connector_name, + key_store, + ) + .await + } + + async fn find_merchant_connector_account_by_profile_id_connector_name( + &self, + profile_id: &str, + connector_name: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_merchant_connector_account_by_profile_id_connector_name( + profile_id, + connector_name, + key_store, + ) + .await + } + + async fn insert_merchant_connector_account( + &self, + t: domain::MerchantConnectorAccount, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .insert_merchant_connector_account(t, key_store) + .await + } + + async fn find_by_merchant_connector_account_merchant_id_merchant_connector_id( + &self, + merchant_id: &str, + merchant_connector_id: &str, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .find_by_merchant_connector_account_merchant_id_merchant_connector_id( + merchant_id, + merchant_connector_id, + key_store, + ) + .await + } + + async fn find_merchant_connector_account_by_merchant_id_and_disabled_list( + &self, + merchant_id: &str, + get_disabled: bool, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_merchant_connector_account_by_merchant_id_and_disabled_list( + merchant_id, + get_disabled, + key_store, + ) + .await + } + + async fn update_merchant_connector_account( + &self, + this: domain::MerchantConnectorAccount, + merchant_connector_account: storage::MerchantConnectorAccountUpdateInternal, + key_store: &domain::MerchantKeyStore, + ) -> CustomResult { + self.diesel_store + .update_merchant_connector_account(this, merchant_connector_account, key_store) + .await + } + + async fn delete_merchant_connector_account_by_merchant_id_merchant_connector_id( + &self, + merchant_id: &str, + merchant_connector_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_merchant_connector_account_by_merchant_id_merchant_connector_id( + merchant_id, + merchant_connector_id, + ) + .await + } +} + +#[async_trait::async_trait] +impl QueueInterface for KafkaStore { + async fn fetch_consumer_tasks( + &self, + stream_name: &str, + group_name: &str, + consumer_name: &str, + ) -> CustomResult, ProcessTrackerError> { + self.diesel_store + .fetch_consumer_tasks(stream_name, group_name, consumer_name) + .await + } + + async fn consumer_group_create( + &self, + stream: &str, + group: &str, + id: &RedisEntryId, + ) -> CustomResult<(), RedisError> { + self.diesel_store + .consumer_group_create(stream, group, id) + .await + } + + async fn acquire_pt_lock( + &self, + tag: &str, + lock_key: &str, + lock_val: &str, + ttl: i64, + ) -> CustomResult { + self.diesel_store + .acquire_pt_lock(tag, lock_key, lock_val, ttl) + .await + } + + async fn release_pt_lock(&self, tag: &str, lock_key: &str) -> CustomResult { + self.diesel_store.release_pt_lock(tag, lock_key).await + } + + async fn stream_append_entry( + &self, + stream: &str, + entry_id: &RedisEntryId, + fields: Vec<(&str, String)>, + ) -> CustomResult<(), RedisError> { + self.diesel_store + .stream_append_entry(stream, entry_id, fields) + .await + } + + async fn get_key(&self, key: &str) -> CustomResult, RedisError> { + self.diesel_store.get_key(key).await + } +} + +#[async_trait::async_trait] +impl PaymentAttemptInterface for KafkaStore { + async fn insert_payment_attempt( + &self, + payment_attempt: storage::PaymentAttemptNew, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + let attempt = self + .diesel_store + .insert_payment_attempt(payment_attempt, storage_scheme) + .await?; + + if let Err(er) = self + .kafka_producer + .log_payment_attempt(&attempt, None) + .await + { + logger::error!(message="Failed to log analytics event for payment attempt {attempt:?}", error_message=?er) + } + + Ok(attempt) + } + + async fn update_payment_attempt_with_attempt_id( + &self, + this: storage::PaymentAttempt, + payment_attempt: storage::PaymentAttemptUpdate, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + let attempt = self + .diesel_store + .update_payment_attempt_with_attempt_id(this.clone(), payment_attempt, storage_scheme) + .await?; + + if let Err(er) = self + .kafka_producer + .log_payment_attempt(&attempt, Some(this)) + .await + { + logger::error!(message="Failed to log analytics event for payment attempt {attempt:?}", error_message=?er) + } + + Ok(attempt) + } + + async fn find_payment_attempt_by_connector_transaction_id_payment_id_merchant_id( + &self, + connector_transaction_id: &str, + payment_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_by_connector_transaction_id_payment_id_merchant_id( + connector_transaction_id, + payment_id, + merchant_id, + storage_scheme, + ) + .await + } + + async fn find_payment_attempt_by_merchant_id_connector_txn_id( + &self, + merchant_id: &str, + connector_txn_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_by_merchant_id_connector_txn_id( + merchant_id, + connector_txn_id, + storage_scheme, + ) + .await + } + + async fn find_payment_attempt_by_payment_id_merchant_id_attempt_id( + &self, + payment_id: &str, + merchant_id: &str, + attempt_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_by_payment_id_merchant_id_attempt_id( + payment_id, + merchant_id, + attempt_id, + storage_scheme, + ) + .await + } + + async fn find_payment_attempt_by_attempt_id_merchant_id( + &self, + attempt_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_by_attempt_id_merchant_id(attempt_id, merchant_id, storage_scheme) + .await + } + + async fn find_payment_attempt_last_successful_attempt_by_payment_id_merchant_id( + &self, + payment_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_last_successful_attempt_by_payment_id_merchant_id( + payment_id, + merchant_id, + storage_scheme, + ) + .await + } + + async fn find_payment_attempt_last_successful_or_partially_captured_attempt_by_payment_id_merchant_id( + &self, + payment_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_last_successful_or_partially_captured_attempt_by_payment_id_merchant_id( + payment_id, + merchant_id, + storage_scheme, + ) + .await + } + + async fn find_payment_attempt_by_preprocessing_id_merchant_id( + &self, + preprocessing_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_attempt_by_preprocessing_id_merchant_id( + preprocessing_id, + merchant_id, + storage_scheme, + ) + .await + } + + async fn get_filters_for_payments( + &self, + pi: &[data_models::payments::PaymentIntent], + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult< + data_models::payments::payment_attempt::PaymentListFilters, + errors::DataStorageError, + > { + self.diesel_store + .get_filters_for_payments(pi, merchant_id, storage_scheme) + .await + } + + async fn get_total_count_of_filtered_payment_attempts( + &self, + merchant_id: &str, + active_attempt_ids: &[String], + connector: Option>, + payment_method: Option>, + payment_method_type: Option>, + authentication_type: Option>, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .get_total_count_of_filtered_payment_attempts( + merchant_id, + active_attempt_ids, + connector, + payment_method, + payment_method_type, + authentication_type, + storage_scheme, + ) + .await + } + + async fn find_attempts_by_merchant_id_payment_id( + &self, + merchant_id: &str, + payment_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::DataStorageError> { + self.diesel_store + .find_attempts_by_merchant_id_payment_id(merchant_id, payment_id, storage_scheme) + .await + } +} + +#[async_trait::async_trait] +impl PaymentIntentInterface for KafkaStore { + async fn update_payment_intent( + &self, + this: storage::PaymentIntent, + payment_intent: storage::PaymentIntentUpdate, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + let intent = self + .diesel_store + .update_payment_intent(this.clone(), payment_intent, storage_scheme) + .await?; + + if let Err(er) = self + .kafka_producer + .log_payment_intent(&intent, Some(this)) + .await + { + logger::error!(message="Failed to add analytics entry for Payment Intent {intent:?}", error_message=?er); + }; + + Ok(intent) + } + + async fn insert_payment_intent( + &self, + new: storage::PaymentIntentNew, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + logger::debug!("Inserting PaymentIntent Via KafkaStore"); + let intent = self + .diesel_store + .insert_payment_intent(new, storage_scheme) + .await?; + + if let Err(er) = self.kafka_producer.log_payment_intent(&intent, None).await { + logger::error!(message="Failed to add analytics entry for Payment Intent {intent:?}", error_message=?er); + }; + + Ok(intent) + } + + async fn find_payment_intent_by_payment_id_merchant_id( + &self, + payment_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_payment_intent_by_payment_id_merchant_id(payment_id, merchant_id, storage_scheme) + .await + } + + #[cfg(feature = "olap")] + async fn filter_payment_intent_by_constraints( + &self, + merchant_id: &str, + filters: &data_models::payments::payment_intent::PaymentIntentFetchConstraints, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::DataStorageError> { + self.diesel_store + .filter_payment_intent_by_constraints(merchant_id, filters, storage_scheme) + .await + } + + #[cfg(feature = "olap")] + async fn filter_payment_intents_by_time_range_constraints( + &self, + merchant_id: &str, + time_range: &api_models::payments::TimeRange, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::DataStorageError> { + self.diesel_store + .filter_payment_intents_by_time_range_constraints( + merchant_id, + time_range, + storage_scheme, + ) + .await + } + + #[cfg(feature = "olap")] + async fn get_filtered_payment_intents_attempt( + &self, + merchant_id: &str, + constraints: &data_models::payments::payment_intent::PaymentIntentFetchConstraints, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult< + Vec<( + data_models::payments::PaymentIntent, + data_models::payments::payment_attempt::PaymentAttempt, + )>, + errors::DataStorageError, + > { + self.diesel_store + .get_filtered_payment_intents_attempt(merchant_id, constraints, storage_scheme) + .await + } + + #[cfg(feature = "olap")] + async fn get_filtered_active_attempt_ids_for_total_count( + &self, + merchant_id: &str, + constraints: &data_models::payments::payment_intent::PaymentIntentFetchConstraints, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::DataStorageError> { + self.diesel_store + .get_filtered_active_attempt_ids_for_total_count( + merchant_id, + constraints, + storage_scheme, + ) + .await + } + + async fn get_active_payment_attempt( + &self, + payment: &mut storage::PaymentIntent, + storage_scheme: MerchantStorageScheme, + ) -> error_stack::Result { + self.diesel_store + .get_active_payment_attempt(payment, storage_scheme) + .await + } +} + +#[async_trait::async_trait] +impl PaymentMethodInterface for KafkaStore { + async fn find_payment_method( + &self, + payment_method_id: &str, + ) -> CustomResult { + self.diesel_store + .find_payment_method(payment_method_id) + .await + } + + async fn find_payment_method_by_customer_id_merchant_id_list( + &self, + customer_id: &str, + merchant_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_payment_method_by_customer_id_merchant_id_list(customer_id, merchant_id) + .await + } + + async fn insert_payment_method( + &self, + m: storage::PaymentMethodNew, + ) -> CustomResult { + self.diesel_store.insert_payment_method(m).await + } + + async fn update_payment_method( + &self, + payment_method: storage::PaymentMethod, + payment_method_update: storage::PaymentMethodUpdate, + ) -> CustomResult { + self.diesel_store + .update_payment_method(payment_method, payment_method_update) + .await + } + + async fn delete_payment_method_by_merchant_id_payment_method_id( + &self, + merchant_id: &str, + payment_method_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_payment_method_by_merchant_id_payment_method_id(merchant_id, payment_method_id) + .await + } +} + +#[async_trait::async_trait] +impl PayoutAttemptInterface for KafkaStore { + async fn find_payout_attempt_by_merchant_id_payout_id( + &self, + merchant_id: &str, + payout_id: &str, + ) -> CustomResult { + self.diesel_store + .find_payout_attempt_by_merchant_id_payout_id(merchant_id, payout_id) + .await + } + + async fn update_payout_attempt_by_merchant_id_payout_id( + &self, + merchant_id: &str, + payout_id: &str, + payout: storage::PayoutAttemptUpdate, + ) -> CustomResult { + self.diesel_store + .update_payout_attempt_by_merchant_id_payout_id(merchant_id, payout_id, payout) + .await + } + + async fn insert_payout_attempt( + &self, + payout: storage::PayoutAttemptNew, + ) -> CustomResult { + self.diesel_store.insert_payout_attempt(payout).await + } +} + +#[async_trait::async_trait] +impl PayoutsInterface for KafkaStore { + async fn find_payout_by_merchant_id_payout_id( + &self, + merchant_id: &str, + payout_id: &str, + ) -> CustomResult { + self.diesel_store + .find_payout_by_merchant_id_payout_id(merchant_id, payout_id) + .await + } + + async fn update_payout_by_merchant_id_payout_id( + &self, + merchant_id: &str, + payout_id: &str, + payout: storage::PayoutsUpdate, + ) -> CustomResult { + self.diesel_store + .update_payout_by_merchant_id_payout_id(merchant_id, payout_id, payout) + .await + } + + async fn insert_payout( + &self, + payout: storage::PayoutsNew, + ) -> CustomResult { + self.diesel_store.insert_payout(payout).await + } +} + +#[async_trait::async_trait] +impl ProcessTrackerInterface for KafkaStore { + async fn reinitialize_limbo_processes( + &self, + ids: Vec, + schedule_time: PrimitiveDateTime, + ) -> CustomResult { + self.diesel_store + .reinitialize_limbo_processes(ids, schedule_time) + .await + } + + async fn find_process_by_id( + &self, + id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store.find_process_by_id(id).await + } + + async fn update_process( + &self, + this: storage::ProcessTracker, + process: storage::ProcessTrackerUpdate, + ) -> CustomResult { + self.diesel_store.update_process(this, process).await + } + + async fn process_tracker_update_process_status_by_ids( + &self, + task_ids: Vec, + task_update: storage::ProcessTrackerUpdate, + ) -> CustomResult { + self.diesel_store + .process_tracker_update_process_status_by_ids(task_ids, task_update) + .await + } + async fn update_process_tracker( + &self, + this: storage::ProcessTracker, + process: storage::ProcessTrackerUpdate, + ) -> CustomResult { + self.diesel_store + .update_process_tracker(this, process) + .await + } + + async fn insert_process( + &self, + new: storage::ProcessTrackerNew, + ) -> CustomResult { + self.diesel_store.insert_process(new).await + } + + async fn find_processes_by_time_status( + &self, + time_lower_limit: PrimitiveDateTime, + time_upper_limit: PrimitiveDateTime, + status: ProcessTrackerStatus, + limit: Option, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_processes_by_time_status(time_lower_limit, time_upper_limit, status, limit) + .await + } +} + +#[async_trait::async_trait] +impl CaptureInterface for KafkaStore { + async fn insert_capture( + &self, + capture: storage::CaptureNew, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .insert_capture(capture, storage_scheme) + .await + } + + async fn update_capture_with_capture_id( + &self, + this: storage::Capture, + capture: storage::CaptureUpdate, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .update_capture_with_capture_id(this, capture, storage_scheme) + .await + } + + async fn find_all_captures_by_merchant_id_payment_id_authorized_attempt_id( + &self, + merchant_id: &str, + payment_id: &str, + authorized_attempt_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_all_captures_by_merchant_id_payment_id_authorized_attempt_id( + merchant_id, + payment_id, + authorized_attempt_id, + storage_scheme, + ) + .await + } +} + +#[async_trait::async_trait] +impl RefundInterface for KafkaStore { + async fn find_refund_by_internal_reference_id_merchant_id( + &self, + internal_reference_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_refund_by_internal_reference_id_merchant_id( + internal_reference_id, + merchant_id, + storage_scheme, + ) + .await + } + + async fn find_refund_by_payment_id_merchant_id( + &self, + payment_id: &str, + merchant_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_refund_by_payment_id_merchant_id(payment_id, merchant_id, storage_scheme) + .await + } + + async fn find_refund_by_merchant_id_refund_id( + &self, + merchant_id: &str, + refund_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_refund_by_merchant_id_refund_id(merchant_id, refund_id, storage_scheme) + .await + } + + async fn find_refund_by_merchant_id_connector_refund_id_connector( + &self, + merchant_id: &str, + connector_refund_id: &str, + connector: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .find_refund_by_merchant_id_connector_refund_id_connector( + merchant_id, + connector_refund_id, + connector, + storage_scheme, + ) + .await + } + + async fn update_refund( + &self, + this: storage::Refund, + refund: storage::RefundUpdate, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + let refund = self + .diesel_store + .update_refund(this.clone(), refund, storage_scheme) + .await?; + + if let Err(er) = self.kafka_producer.log_refund(&refund, Some(this)).await { + logger::error!(message="Failed to insert analytics event for Refund Update {refund?}", error_message=?er); + } + Ok(refund) + } + + async fn find_refund_by_merchant_id_connector_transaction_id( + &self, + merchant_id: &str, + connector_transaction_id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .find_refund_by_merchant_id_connector_transaction_id( + merchant_id, + connector_transaction_id, + storage_scheme, + ) + .await + } + + async fn insert_refund( + &self, + new: storage::RefundNew, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + let refund = self.diesel_store.insert_refund(new, storage_scheme).await?; + + if let Err(er) = self.kafka_producer.log_refund(&refund, None).await { + logger::error!(message="Failed to insert analytics event for Refund Create {refund?}", error_message=?er); + } + Ok(refund) + } + + #[cfg(feature = "olap")] + async fn filter_refund_by_constraints( + &self, + merchant_id: &str, + refund_details: &api_models::refunds::RefundListRequest, + storage_scheme: MerchantStorageScheme, + limit: i64, + offset: i64, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .filter_refund_by_constraints( + merchant_id, + refund_details, + storage_scheme, + limit, + offset, + ) + .await + } + + #[cfg(feature = "olap")] + async fn filter_refund_by_meta_constraints( + &self, + merchant_id: &str, + refund_details: &api_models::payments::TimeRange, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .filter_refund_by_meta_constraints(merchant_id, refund_details, storage_scheme) + .await + } + + #[cfg(feature = "olap")] + async fn get_total_count_of_refunds( + &self, + merchant_id: &str, + refund_details: &api_models::refunds::RefundListRequest, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .get_total_count_of_refunds(merchant_id, refund_details, storage_scheme) + .await + } +} + +#[async_trait::async_trait] +impl MerchantKeyStoreInterface for KafkaStore { + async fn insert_merchant_key_store( + &self, + merchant_key_store: domain::MerchantKeyStore, + key: &Secret>, + ) -> CustomResult { + self.diesel_store + .insert_merchant_key_store(merchant_key_store, key) + .await + } + + async fn get_merchant_key_store_by_merchant_id( + &self, + merchant_id: &str, + key: &Secret>, + ) -> CustomResult { + self.diesel_store + .get_merchant_key_store_by_merchant_id(merchant_id, key) + .await + } + + async fn delete_merchant_key_store_by_merchant_id( + &self, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_merchant_key_store_by_merchant_id(merchant_id) + .await + } +} + +#[async_trait::async_trait] +impl BusinessProfileInterface for KafkaStore { + async fn insert_business_profile( + &self, + business_profile: business_profile::BusinessProfileNew, + ) -> CustomResult { + self.diesel_store + .insert_business_profile(business_profile) + .await + } + + async fn find_business_profile_by_profile_id( + &self, + profile_id: &str, + ) -> CustomResult { + self.diesel_store + .find_business_profile_by_profile_id(profile_id) + .await + } + + async fn update_business_profile_by_profile_id( + &self, + current_state: business_profile::BusinessProfile, + business_profile_update: business_profile::BusinessProfileUpdateInternal, + ) -> CustomResult { + self.diesel_store + .update_business_profile_by_profile_id(current_state, business_profile_update) + .await + } + + async fn delete_business_profile_by_profile_id_merchant_id( + &self, + profile_id: &str, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .delete_business_profile_by_profile_id_merchant_id(profile_id, merchant_id) + .await + } + + async fn list_business_profile_by_merchant_id( + &self, + merchant_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_business_profile_by_merchant_id(merchant_id) + .await + } + + async fn find_business_profile_by_profile_name_merchant_id( + &self, + profile_name: &str, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .find_business_profile_by_profile_name_merchant_id(profile_name, merchant_id) + .await + } +} + +#[async_trait::async_trait] +impl ReverseLookupInterface for KafkaStore { + async fn insert_reverse_lookup( + &self, + new: ReverseLookupNew, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .insert_reverse_lookup(new, storage_scheme) + .await + } + + async fn get_lookup_by_lookup_id( + &self, + id: &str, + storage_scheme: MerchantStorageScheme, + ) -> CustomResult { + self.diesel_store + .get_lookup_by_lookup_id(id, storage_scheme) + .await + } +} + +#[async_trait::async_trait] +impl RoutingAlgorithmInterface for KafkaStore { + async fn insert_routing_algorithm( + &self, + routing_algorithm: storage::RoutingAlgorithm, + ) -> CustomResult { + self.diesel_store + .insert_routing_algorithm(routing_algorithm) + .await + } + + async fn find_routing_algorithm_by_profile_id_algorithm_id( + &self, + profile_id: &str, + algorithm_id: &str, + ) -> CustomResult { + self.diesel_store + .find_routing_algorithm_by_profile_id_algorithm_id(profile_id, algorithm_id) + .await + } + + async fn find_routing_algorithm_by_algorithm_id_merchant_id( + &self, + algorithm_id: &str, + merchant_id: &str, + ) -> CustomResult { + self.diesel_store + .find_routing_algorithm_by_algorithm_id_merchant_id(algorithm_id, merchant_id) + .await + } + + async fn find_routing_algorithm_metadata_by_algorithm_id_profile_id( + &self, + algorithm_id: &str, + profile_id: &str, + ) -> CustomResult { + self.diesel_store + .find_routing_algorithm_metadata_by_algorithm_id_profile_id(algorithm_id, profile_id) + .await + } + + async fn list_routing_algorithm_metadata_by_profile_id( + &self, + profile_id: &str, + limit: i64, + offset: i64, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_routing_algorithm_metadata_by_profile_id(profile_id, limit, offset) + .await + } + + async fn list_routing_algorithm_metadata_by_merchant_id( + &self, + merchant_id: &str, + limit: i64, + offset: i64, + ) -> CustomResult, errors::StorageError> { + self.diesel_store + .list_routing_algorithm_metadata_by_merchant_id(merchant_id, limit, offset) + .await + } +} + +#[async_trait::async_trait] +impl GsmInterface for KafkaStore { + async fn add_gsm_rule( + &self, + rule: storage::GatewayStatusMappingNew, + ) -> CustomResult { + self.diesel_store.add_gsm_rule(rule).await + } + + async fn find_gsm_decision( + &self, + connector: String, + flow: String, + sub_flow: String, + code: String, + message: String, + ) -> CustomResult { + self.diesel_store + .find_gsm_decision(connector, flow, sub_flow, code, message) + .await + } + + async fn find_gsm_rule( + &self, + connector: String, + flow: String, + sub_flow: String, + code: String, + message: String, + ) -> CustomResult { + self.diesel_store + .find_gsm_rule(connector, flow, sub_flow, code, message) + .await + } + + async fn update_gsm_rule( + &self, + connector: String, + flow: String, + sub_flow: String, + code: String, + message: String, + data: storage::GatewayStatusMappingUpdate, + ) -> CustomResult { + self.diesel_store + .update_gsm_rule(connector, flow, sub_flow, code, message, data) + .await + } + + async fn delete_gsm_rule( + &self, + connector: String, + flow: String, + sub_flow: String, + code: String, + message: String, + ) -> CustomResult { + self.diesel_store + .delete_gsm_rule(connector, flow, sub_flow, code, message) + .await + } +} + +#[async_trait::async_trait] +impl StorageInterface for KafkaStore { + fn get_scheduler_db(&self) -> Box { + Box::new(self.clone()) + } +} + +#[async_trait::async_trait] +impl SchedulerInterface for KafkaStore {} + +impl MasterKeyInterface for KafkaStore { + fn get_master_key(&self) -> &[u8] { + self.diesel_store.get_master_key() + } +} +#[async_trait::async_trait] +impl UserInterface for KafkaStore { + async fn insert_user( + &self, + user_data: storage::UserNew, + ) -> CustomResult { + self.diesel_store.insert_user(user_data).await + } + + async fn find_user_by_email( + &self, + user_email: &str, + ) -> CustomResult { + self.diesel_store.find_user_by_email(user_email).await + } + + async fn find_user_by_id( + &self, + user_id: &str, + ) -> CustomResult { + self.diesel_store.find_user_by_id(user_id).await + } + + async fn update_user_by_user_id( + &self, + user_id: &str, + user: storage::UserUpdate, + ) -> CustomResult { + self.diesel_store + .update_user_by_user_id(user_id, user) + .await + } + + async fn delete_user_by_user_id( + &self, + user_id: &str, + ) -> CustomResult { + self.diesel_store.delete_user_by_user_id(user_id).await + } +} + +impl RedisConnInterface for KafkaStore { + fn get_redis_conn(&self) -> CustomResult, RedisError> { + self.diesel_store.get_redis_conn() + } +} + +#[async_trait::async_trait] +impl UserRoleInterface for KafkaStore { + async fn insert_user_role( + &self, + user_role: user_storage::UserRoleNew, + ) -> CustomResult { + self.diesel_store.insert_user_role(user_role).await + } + async fn find_user_role_by_user_id( + &self, + user_id: &str, + ) -> CustomResult { + self.diesel_store.find_user_role_by_user_id(user_id).await + } + async fn update_user_role_by_user_id_merchant_id( + &self, + user_id: &str, + merchant_id: &str, + update: user_storage::UserRoleUpdate, + ) -> CustomResult { + self.diesel_store + .update_user_role_by_user_id_merchant_id(user_id, merchant_id, update) + .await + } + async fn delete_user_role(&self, user_id: &str) -> CustomResult { + self.diesel_store.delete_user_role(user_id).await + } + async fn list_user_roles_by_user_id( + &self, + user_id: &str, + ) -> CustomResult, errors::StorageError> { + self.diesel_store.list_user_roles_by_user_id(user_id).await + } +} diff --git a/crates/router/src/events.rs b/crates/router/src/events.rs index 39a8543a68c4..8f980fee504a 100644 --- a/crates/router/src/events.rs +++ b/crates/router/src/events.rs @@ -1,15 +1,21 @@ -use serde::Serialize; +use data_models::errors::{StorageError, StorageResult}; +use error_stack::ResultExt; +use serde::{Deserialize, Serialize}; +use storage_impl::errors::ApplicationError; + +use crate::{db::KafkaProducer, services::kafka::KafkaSettings}; pub mod api_logs; pub mod event_logger; +pub mod kafka_handler; -pub trait EventHandler: Sync + Send + dyn_clone::DynClone { +pub(super) trait EventHandler: Sync + Send + dyn_clone::DynClone { fn log_event(&self, event: RawEvent); } dyn_clone::clone_trait_object!(EventHandler); -#[derive(Debug)] +#[derive(Debug, Serialize)] pub struct RawEvent { pub event_type: EventType, pub key: String, @@ -24,3 +30,55 @@ pub enum EventType { Refund, ApiLogs, } + +#[derive(Debug, Default, Deserialize, Clone)] +#[serde(tag = "source")] +#[serde(rename_all = "lowercase")] +pub enum EventsConfig { + Kafka { + kafka: KafkaSettings, + }, + #[default] + Logs, +} + +#[derive(Debug, Clone)] +pub enum EventsHandler { + Kafka(KafkaProducer), + Logs(event_logger::EventLogger), +} + +impl Default for EventsHandler { + fn default() -> Self { + Self::Logs(event_logger::EventLogger {}) + } +} + +impl EventsConfig { + pub async fn get_event_handler(&self) -> StorageResult { + Ok(match self { + Self::Kafka { kafka } => EventsHandler::Kafka( + KafkaProducer::create(kafka) + .await + .change_context(StorageError::InitializationError)?, + ), + Self::Logs => EventsHandler::Logs(event_logger::EventLogger::default()), + }) + } + + pub fn validate(&self) -> Result<(), ApplicationError> { + match self { + Self::Kafka { kafka } => kafka.validate(), + Self::Logs => Ok(()), + } + } +} + +impl EventsHandler { + pub fn log_event(&self, event: RawEvent) { + match self { + Self::Kafka(kafka) => kafka.log_event(event), + Self::Logs(logger) => logger.log_event(event), + } + } +} diff --git a/crates/router/src/events/api_logs.rs b/crates/router/src/events/api_logs.rs index 3f598e88394b..bfc10f722c1f 100644 --- a/crates/router/src/events/api_logs.rs +++ b/crates/router/src/events/api_logs.rs @@ -24,6 +24,7 @@ use crate::{ #[derive(Clone, Debug, Eq, PartialEq, Serialize)] #[serde(rename_all = "snake_case")] pub struct ApiEvent { + merchant_id: Option, api_flow: String, created_at_timestamp: i128, request_id: String, @@ -40,11 +41,13 @@ pub struct ApiEvent { #[serde(flatten)] event_type: ApiEventsType, hs_latency: Option, + http_method: Option, } impl ApiEvent { #[allow(clippy::too_many_arguments)] pub fn new( + merchant_id: Option, api_flow: &impl FlowMetric, request_id: &RequestId, latency: u128, @@ -56,8 +59,10 @@ impl ApiEvent { error: Option, event_type: ApiEventsType, http_req: &HttpRequest, + http_method: Option, ) -> Self { Self { + merchant_id, api_flow: api_flow.to_string(), created_at_timestamp: OffsetDateTime::now_utc().unix_timestamp_nanos() / 1_000_000, request_id: request_id.as_hyphenated().to_string(), @@ -78,6 +83,7 @@ impl ApiEvent { url_path: http_req.path().to_string(), event_type, hs_latency, + http_method, } } } diff --git a/crates/router/src/events/event_logger.rs b/crates/router/src/events/event_logger.rs index fda9b1a036ae..1bd75341be4a 100644 --- a/crates/router/src/events/event_logger.rs +++ b/crates/router/src/events/event_logger.rs @@ -7,6 +7,6 @@ pub struct EventLogger {} impl EventHandler for EventLogger { #[track_caller] fn log_event(&self, event: RawEvent) { - logger::info!(event = ?serde_json::to_string(&event.payload).unwrap_or(r#"{ "error": "Serialization failed" }"#.to_string()), event_type =? event.event_type, event_id =? event.key, log_type = "event"); + logger::info!(event = ?event.payload.to_string(), event_type =? event.event_type, event_id =? event.key, log_type = "event"); } } diff --git a/crates/router/src/events/kafka_handler.rs b/crates/router/src/events/kafka_handler.rs new file mode 100644 index 000000000000..d55847e6e8e7 --- /dev/null +++ b/crates/router/src/events/kafka_handler.rs @@ -0,0 +1,29 @@ +use error_stack::{IntoReport, ResultExt}; +use router_env::tracing; + +use super::{EventHandler, RawEvent}; +use crate::{ + db::MQResult, + services::kafka::{KafkaError, KafkaMessage, KafkaProducer}, +}; +impl EventHandler for KafkaProducer { + fn log_event(&self, event: RawEvent) { + let topic = self.get_topic(event.event_type); + if let Err(er) = self.log_kafka_event(topic, &event) { + tracing::error!("Failed to log event to kafka: {:?}", er); + } + } +} + +impl KafkaMessage for RawEvent { + fn key(&self) -> String { + self.key.clone() + } + + fn value(&self) -> MQResult> { + // Add better error logging here + serde_json::to_vec(&self.payload) + .into_report() + .change_context(KafkaError::GenericError) + } +} diff --git a/crates/router/src/lib.rs b/crates/router/src/lib.rs index 2b1f9c692d86..035314f71dfb 100644 --- a/crates/router/src/lib.rs +++ b/crates/router/src/lib.rs @@ -1,8 +1,6 @@ #![forbid(unsafe_code)] #![recursion_limit = "256"] -#[cfg(feature = "olap")] -pub mod analytics; #[cfg(feature = "stripe")] pub mod compatibility; pub mod configs; @@ -17,6 +15,8 @@ pub(crate) mod macros; pub mod routes; pub mod workflows; +#[cfg(feature = "olap")] +pub mod analytics; pub mod events; pub mod middleware; pub mod openapi; @@ -35,10 +35,7 @@ use storage_impl::errors::ApplicationResult; use tokio::sync::{mpsc, oneshot}; pub use self::env::logger; -use crate::{ - configs::settings, - core::errors::{self}, -}; +use crate::{configs::settings, core::errors}; #[cfg(feature = "mimalloc")] #[global_allocator] diff --git a/crates/router/src/routes/app.rs b/crates/router/src/routes/app.rs index 1a6f36363d1d..80993429c4e2 100644 --- a/crates/router/src/routes/app.rs +++ b/crates/router/src/routes/app.rs @@ -33,7 +33,7 @@ use super::{ephemeral_key::*, payment_methods::*, webhooks::*}; pub use crate::{ configs::settings, db::{StorageImpl, StorageInterface}, - events::{event_logger::EventLogger, EventHandler}, + events::EventsHandler, routes::cards_info::card_iin_info, services::get_store, }; @@ -43,7 +43,7 @@ pub struct AppState { pub flow_name: String, pub store: Box, pub conf: Arc, - pub event_handler: Box, + pub event_handler: EventsHandler, #[cfg(feature = "email")] pub email_client: Arc, #[cfg(feature = "kms")] @@ -62,7 +62,7 @@ impl scheduler::SchedulerAppState for AppState { pub trait AppStateInfo { fn conf(&self) -> settings::Settings; fn store(&self) -> Box; - fn event_handler(&self) -> Box; + fn event_handler(&self) -> EventsHandler; #[cfg(feature = "email")] fn email_client(&self) -> Arc; fn add_request_id(&mut self, request_id: RequestId); @@ -82,8 +82,8 @@ impl AppStateInfo for AppState { fn email_client(&self) -> Arc { self.email_client.to_owned() } - fn event_handler(&self) -> Box { - self.event_handler.to_owned() + fn event_handler(&self) -> EventsHandler { + self.event_handler.clone() } fn add_request_id(&mut self, request_id: RequestId) { self.api_client.add_request_id(request_id); @@ -130,13 +130,31 @@ impl AppState { #[cfg(feature = "kms")] let kms_client = kms::get_kms_client(&conf.kms).await; let testable = storage_impl == StorageImpl::PostgresqlTest; + #[allow(clippy::expect_used)] + let event_handler = conf + .events + .get_event_handler() + .await + .expect("Failed to create event handler"); let store: Box = match storage_impl { - StorageImpl::Postgresql | StorageImpl::PostgresqlTest => Box::new( - #[allow(clippy::expect_used)] - get_store(&conf, shut_down_signal, testable) - .await - .expect("Failed to create store"), - ), + StorageImpl::Postgresql | StorageImpl::PostgresqlTest => match &event_handler { + EventsHandler::Kafka(kafka_client) => Box::new( + crate::db::KafkaStore::new( + #[allow(clippy::expect_used)] + get_store(&conf.clone(), shut_down_signal, testable) + .await + .expect("Failed to create store"), + kafka_client.clone(), + ) + .await, + ), + EventsHandler::Logs(_) => Box::new( + #[allow(clippy::expect_used)] + get_store(&conf, shut_down_signal, testable) + .await + .expect("Failed to create store"), + ), + }, #[allow(clippy::expect_used)] StorageImpl::Mock => Box::new( MockDb::new(&conf.redis) @@ -146,12 +164,7 @@ impl AppState { }; #[cfg(feature = "olap")] - let pool = crate::analytics::AnalyticsProvider::from_conf( - &conf.analytics, - #[cfg(feature = "kms")] - kms_client, - ) - .await; + let pool = crate::analytics::AnalyticsProvider::from_conf(&conf.analytics).await; #[cfg(feature = "kms")] #[allow(clippy::expect_used)] @@ -174,7 +187,7 @@ impl AppState { #[cfg(feature = "kms")] kms_secrets: Arc::new(kms_secrets), api_client, - event_handler: Box::::default(), + event_handler, #[cfg(feature = "olap")] pool, } diff --git a/crates/router/src/services.rs b/crates/router/src/services.rs index faea707f2a14..e46612b95dfc 100644 --- a/crates/router/src/services.rs +++ b/crates/router/src/services.rs @@ -4,6 +4,7 @@ pub mod authorization; pub mod encryption; #[cfg(feature = "olap")] pub mod jwt; +pub mod kafka; pub mod logger; #[cfg(feature = "email")] diff --git a/crates/router/src/services/api.rs b/crates/router/src/services/api.rs index 5481d5c5cf9d..1ff46474db59 100644 --- a/crates/router/src/services/api.rs +++ b/crates/router/src/services/api.rs @@ -873,6 +873,7 @@ where }; let api_event = ApiEvent::new( + Some(merchant_id.clone()), flow, &request_id, request_duration, @@ -884,6 +885,7 @@ where error, event_type.unwrap_or(ApiEventsType::Miscellaneous), request, + Some(request.method().to_string()), ); match api_event.clone().try_into() { Ok(event) => { diff --git a/crates/router/src/services/kafka.rs b/crates/router/src/services/kafka.rs new file mode 100644 index 000000000000..497ac16721b5 --- /dev/null +++ b/crates/router/src/services/kafka.rs @@ -0,0 +1,314 @@ +use std::sync::Arc; + +use common_utils::errors::CustomResult; +use error_stack::{report, IntoReport, ResultExt}; +use rdkafka::{ + config::FromClientConfig, + producer::{BaseRecord, DefaultProducerContext, Producer, ThreadedProducer}, +}; + +use crate::events::EventType; +mod api_event; +pub mod outgoing_request; +mod payment_attempt; +mod payment_intent; +mod refund; +pub use api_event::{ApiCallEventType, ApiEvents, ApiEventsType}; +use data_models::payments::{payment_attempt::PaymentAttempt, PaymentIntent}; +use diesel_models::refund::Refund; +use serde::Serialize; +use time::OffsetDateTime; + +use self::{ + payment_attempt::KafkaPaymentAttempt, payment_intent::KafkaPaymentIntent, refund::KafkaRefund, +}; +// Using message queue result here to avoid confusion with Kafka result provided by library +pub type MQResult = CustomResult; + +pub trait KafkaMessage +where + Self: Serialize, +{ + fn value(&self) -> MQResult> { + // Add better error logging here + serde_json::to_vec(&self) + .into_report() + .change_context(KafkaError::GenericError) + } + + fn key(&self) -> String; + + fn creation_timestamp(&self) -> Option { + None + } +} + +#[derive(serde::Serialize, Debug)] +struct KafkaEvent<'a, T: KafkaMessage> { + #[serde(flatten)] + event: &'a T, + sign_flag: i32, +} + +impl<'a, T: KafkaMessage> KafkaEvent<'a, T> { + fn new(event: &'a T) -> Self { + Self { + event, + sign_flag: 1, + } + } + fn old(event: &'a T) -> Self { + Self { + event, + sign_flag: -1, + } + } +} + +impl<'a, T: KafkaMessage> KafkaMessage for KafkaEvent<'a, T> { + fn key(&self) -> String { + self.event.key() + } + + fn creation_timestamp(&self) -> Option { + self.event.creation_timestamp() + } +} + +#[derive(Debug, serde::Deserialize, Clone, Default)] +#[serde(default)] +pub struct KafkaSettings { + brokers: Vec, + intent_analytics_topic: String, + attempt_analytics_topic: String, + refund_analytics_topic: String, + api_logs_topic: String, +} + +impl KafkaSettings { + pub fn validate(&self) -> Result<(), crate::core::errors::ApplicationError> { + use common_utils::ext_traits::ConfigExt; + + use crate::core::errors::ApplicationError; + + common_utils::fp_utils::when(self.brokers.is_empty(), || { + Err(ApplicationError::InvalidConfigurationValueError( + "Kafka brokers must not be empty".into(), + )) + })?; + + common_utils::fp_utils::when(self.intent_analytics_topic.is_default_or_empty(), || { + Err(ApplicationError::InvalidConfigurationValueError( + "Kafka Intent Analytics topic must not be empty".into(), + )) + })?; + + common_utils::fp_utils::when(self.attempt_analytics_topic.is_default_or_empty(), || { + Err(ApplicationError::InvalidConfigurationValueError( + "Kafka Attempt Analytics topic must not be empty".into(), + )) + })?; + + common_utils::fp_utils::when(self.refund_analytics_topic.is_default_or_empty(), || { + Err(ApplicationError::InvalidConfigurationValueError( + "Kafka Refund Analytics topic must not be empty".into(), + )) + })?; + + common_utils::fp_utils::when(self.api_logs_topic.is_default_or_empty(), || { + Err(ApplicationError::InvalidConfigurationValueError( + "Kafka API event Analytics topic must not be empty".into(), + )) + }) + } +} + +#[derive(Clone, Debug)] +pub struct KafkaProducer { + producer: Arc, + intent_analytics_topic: String, + attempt_analytics_topic: String, + refund_analytics_topic: String, + api_logs_topic: String, +} + +struct RdKafkaProducer(ThreadedProducer); + +impl std::fmt::Debug for RdKafkaProducer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("RdKafkaProducer") + } +} + +#[derive(Debug, Clone, thiserror::Error)] +pub enum KafkaError { + #[error("Generic Kafka Error")] + GenericError, + #[error("Kafka not implemented")] + NotImplemented, + #[error("Kafka Initialization Error")] + InitializationError, +} + +#[allow(unused)] +impl KafkaProducer { + pub async fn create(conf: &KafkaSettings) -> MQResult { + Ok(Self { + producer: Arc::new(RdKafkaProducer( + ThreadedProducer::from_config( + rdkafka::ClientConfig::new().set("bootstrap.servers", conf.brokers.join(",")), + ) + .into_report() + .change_context(KafkaError::InitializationError)?, + )), + + intent_analytics_topic: conf.intent_analytics_topic.clone(), + attempt_analytics_topic: conf.attempt_analytics_topic.clone(), + refund_analytics_topic: conf.refund_analytics_topic.clone(), + api_logs_topic: conf.api_logs_topic.clone(), + }) + } + + pub fn log_kafka_event( + &self, + topic: &str, + event: &T, + ) -> MQResult<()> { + router_env::logger::debug!("Logging Kafka Event {event:?}"); + self.producer + .0 + .send( + BaseRecord::to(topic) + .key(&event.key()) + .payload(&event.value()?) + .timestamp( + event + .creation_timestamp() + .unwrap_or_else(|| OffsetDateTime::now_utc().unix_timestamp()), + ), + ) + .map_err(|(error, record)| report!(error).attach_printable(format!("{record:?}"))) + .change_context(KafkaError::GenericError) + } + + pub async fn log_payment_attempt( + &self, + attempt: &PaymentAttempt, + old_attempt: Option, + ) -> MQResult<()> { + if let Some(negative_event) = old_attempt { + self.log_kafka_event( + &self.attempt_analytics_topic, + &KafkaEvent::old(&KafkaPaymentAttempt::from_storage(&negative_event)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative attempt event {negative_event:?}") + })?; + }; + self.log_kafka_event( + &self.attempt_analytics_topic, + &KafkaEvent::new(&KafkaPaymentAttempt::from_storage(attempt)), + ) + .attach_printable_lazy(|| format!("Failed to add positive attempt event {attempt:?}")) + } + + pub async fn log_payment_attempt_delete( + &self, + delete_old_attempt: &PaymentAttempt, + ) -> MQResult<()> { + self.log_kafka_event( + &self.attempt_analytics_topic, + &KafkaEvent::old(&KafkaPaymentAttempt::from_storage(delete_old_attempt)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative attempt event {delete_old_attempt:?}") + }) + } + + pub async fn log_payment_intent( + &self, + intent: &PaymentIntent, + old_intent: Option, + ) -> MQResult<()> { + if let Some(negative_event) = old_intent { + self.log_kafka_event( + &self.intent_analytics_topic, + &KafkaEvent::old(&KafkaPaymentIntent::from_storage(&negative_event)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative intent event {negative_event:?}") + })?; + }; + self.log_kafka_event( + &self.intent_analytics_topic, + &KafkaEvent::new(&KafkaPaymentIntent::from_storage(intent)), + ) + .attach_printable_lazy(|| format!("Failed to add positive intent event {intent:?}")) + } + + pub async fn log_payment_intent_delete( + &self, + delete_old_intent: &PaymentIntent, + ) -> MQResult<()> { + self.log_kafka_event( + &self.intent_analytics_topic, + &KafkaEvent::old(&KafkaPaymentIntent::from_storage(delete_old_intent)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative intent event {delete_old_intent:?}") + }) + } + + pub async fn log_refund(&self, refund: &Refund, old_refund: Option) -> MQResult<()> { + if let Some(negative_event) = old_refund { + self.log_kafka_event( + &self.refund_analytics_topic, + &KafkaEvent::old(&KafkaRefund::from_storage(&negative_event)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative refund event {negative_event:?}") + })?; + }; + self.log_kafka_event( + &self.refund_analytics_topic, + &KafkaEvent::new(&KafkaRefund::from_storage(refund)), + ) + .attach_printable_lazy(|| format!("Failed to add positive refund event {refund:?}")) + } + + pub async fn log_refund_delete(&self, delete_old_refund: &Refund) -> MQResult<()> { + self.log_kafka_event( + &self.refund_analytics_topic, + &KafkaEvent::old(&KafkaRefund::from_storage(delete_old_refund)), + ) + .attach_printable_lazy(|| { + format!("Failed to add negative refund event {delete_old_refund:?}") + }) + } + + pub async fn log_api_event(&self, event: &ApiEvents) -> MQResult<()> { + self.log_kafka_event(&self.api_logs_topic, event) + .attach_printable_lazy(|| format!("Failed to add api log event {event:?}")) + } + + pub fn get_topic(&self, event: EventType) -> &str { + match event { + EventType::ApiLogs => &self.api_logs_topic, + EventType::PaymentAttempt => &self.attempt_analytics_topic, + EventType::PaymentIntent => &self.intent_analytics_topic, + EventType::Refund => &self.refund_analytics_topic, + } + } +} + +impl Drop for RdKafkaProducer { + fn drop(&mut self) { + // Flush the producer to send any pending messages + match self.0.flush(rdkafka::util::Timeout::After( + std::time::Duration::from_secs(5), + )) { + Ok(_) => router_env::logger::info!("Kafka events flush Successful"), + Err(error) => router_env::logger::error!("Failed to flush Kafka Events {error:?}"), + } + } +} diff --git a/crates/router/src/services/kafka/api_event.rs b/crates/router/src/services/kafka/api_event.rs new file mode 100644 index 000000000000..7de271915927 --- /dev/null +++ b/crates/router/src/services/kafka/api_event.rs @@ -0,0 +1,108 @@ +use api_models::enums as api_enums; +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(tag = "flow_type")] +pub enum ApiEventsType { + Payment { + payment_id: String, + }, + Refund { + payment_id: String, + refund_id: String, + }, + Default, + PaymentMethod { + payment_method_id: String, + payment_method: Option, + payment_method_type: Option, + }, + Customer { + customer_id: String, + }, + User { + //specified merchant_id will overridden on global defined + merchant_id: String, + user_id: String, + }, + Webhooks { + connector: String, + payment_id: Option, + }, + OutgoingEvent, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct ApiEvents { + pub api_name: String, + pub request_id: Option, + //It is require to solve ambiquity in case of event_type is User + #[serde(skip_serializing_if = "Option::is_none")] + pub merchant_id: Option, + pub request: String, + pub response: String, + pub status_code: u16, + #[serde(with = "time::serde::timestamp")] + pub created_at: OffsetDateTime, + pub latency: u128, + //conflicting fields underlying enums will be used + #[serde(flatten)] + pub event_type: ApiEventsType, + pub user_agent: Option, + pub ip_addr: Option, + pub url_path: Option, + pub api_event_type: Option, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub enum ApiCallEventType { + IncomingApiEvent, + OutgoingApiEvent, +} + +impl super::KafkaMessage for ApiEvents { + fn key(&self) -> String { + match &self.event_type { + ApiEventsType::Payment { payment_id } => format!( + "{}_{}", + self.merchant_id + .as_ref() + .unwrap_or(&"default_merchant_id".to_string()), + payment_id + ), + ApiEventsType::Refund { + payment_id, + refund_id, + } => format!("{payment_id}_{refund_id}"), + ApiEventsType::Default => "key".to_string(), + ApiEventsType::PaymentMethod { + payment_method_id, + payment_method, + payment_method_type, + } => format!( + "{:?}_{:?}_{:?}", + payment_method_id.clone(), + payment_method.clone(), + payment_method_type.clone(), + ), + ApiEventsType::Customer { customer_id } => customer_id.to_string(), + ApiEventsType::User { + merchant_id, + user_id, + } => format!("{}_{}", merchant_id, user_id), + ApiEventsType::Webhooks { + connector, + payment_id, + } => format!( + "webhook_{}_{connector}", + payment_id.clone().unwrap_or_default() + ), + ApiEventsType::OutgoingEvent => "outgoing_event".to_string(), + } + } + + fn creation_timestamp(&self) -> Option { + Some(self.created_at.unix_timestamp()) + } +} diff --git a/crates/router/src/services/kafka/outgoing_request.rs b/crates/router/src/services/kafka/outgoing_request.rs new file mode 100644 index 000000000000..bb09fe91fe6d --- /dev/null +++ b/crates/router/src/services/kafka/outgoing_request.rs @@ -0,0 +1,19 @@ +use reqwest::Url; + +pub struct OutgoingRequest { + pub url: Url, + pub latency: u128, +} + +// impl super::KafkaMessage for OutgoingRequest { +// fn key(&self) -> String { +// format!( +// "{}_{}", + +// ) +// } + +// fn creation_timestamp(&self) -> Option { +// Some(self.created_at.unix_timestamp()) +// } +// } diff --git a/crates/router/src/services/kafka/payment_attempt.rs b/crates/router/src/services/kafka/payment_attempt.rs new file mode 100644 index 000000000000..ea0721f418e5 --- /dev/null +++ b/crates/router/src/services/kafka/payment_attempt.rs @@ -0,0 +1,92 @@ +use data_models::payments::payment_attempt::PaymentAttempt; +use diesel_models::enums as storage_enums; +use time::OffsetDateTime; + +#[derive(serde::Serialize, Debug)] +pub struct KafkaPaymentAttempt<'a> { + pub payment_id: &'a String, + pub merchant_id: &'a String, + pub attempt_id: &'a String, + pub status: storage_enums::AttemptStatus, + pub amount: i64, + pub currency: Option, + pub save_to_locker: Option, + pub connector: Option<&'a String>, + pub error_message: Option<&'a String>, + pub offer_amount: Option, + pub surcharge_amount: Option, + pub tax_amount: Option, + pub payment_method_id: Option<&'a String>, + pub payment_method: Option, + pub connector_transaction_id: Option<&'a String>, + pub capture_method: Option, + #[serde(default, with = "time::serde::timestamp::option")] + pub capture_on: Option, + pub confirm: bool, + pub authentication_type: Option, + #[serde(with = "time::serde::timestamp")] + pub created_at: OffsetDateTime, + #[serde(with = "time::serde::timestamp")] + pub modified_at: OffsetDateTime, + #[serde(default, with = "time::serde::timestamp::option")] + pub last_synced: Option, + pub cancellation_reason: Option<&'a String>, + pub amount_to_capture: Option, + pub mandate_id: Option<&'a String>, + pub browser_info: Option, + pub error_code: Option<&'a String>, + pub connector_metadata: Option, + // TODO: These types should implement copy ideally + pub payment_experience: Option<&'a storage_enums::PaymentExperience>, + pub payment_method_type: Option<&'a storage_enums::PaymentMethodType>, +} + +impl<'a> KafkaPaymentAttempt<'a> { + pub fn from_storage(attempt: &'a PaymentAttempt) -> Self { + Self { + payment_id: &attempt.payment_id, + merchant_id: &attempt.merchant_id, + attempt_id: &attempt.attempt_id, + status: attempt.status, + amount: attempt.amount, + currency: attempt.currency, + save_to_locker: attempt.save_to_locker, + connector: attempt.connector.as_ref(), + error_message: attempt.error_message.as_ref(), + offer_amount: attempt.offer_amount, + surcharge_amount: attempt.surcharge_amount, + tax_amount: attempt.tax_amount, + payment_method_id: attempt.payment_method_id.as_ref(), + payment_method: attempt.payment_method, + connector_transaction_id: attempt.connector_transaction_id.as_ref(), + capture_method: attempt.capture_method, + capture_on: attempt.capture_on.map(|i| i.assume_utc()), + confirm: attempt.confirm, + authentication_type: attempt.authentication_type, + created_at: attempt.created_at.assume_utc(), + modified_at: attempt.modified_at.assume_utc(), + last_synced: attempt.last_synced.map(|i| i.assume_utc()), + cancellation_reason: attempt.cancellation_reason.as_ref(), + amount_to_capture: attempt.amount_to_capture, + mandate_id: attempt.mandate_id.as_ref(), + browser_info: attempt.browser_info.as_ref().map(|v| v.to_string()), + error_code: attempt.error_code.as_ref(), + connector_metadata: attempt.connector_metadata.as_ref().map(|v| v.to_string()), + payment_experience: attempt.payment_experience.as_ref(), + payment_method_type: attempt.payment_method_type.as_ref(), + } + } +} + +impl<'a> super::KafkaMessage for KafkaPaymentAttempt<'a> { + fn key(&self) -> String { + format!( + "{}_{}_{}", + self.merchant_id, self.payment_id, self.attempt_id + ) + } + + fn creation_timestamp(&self) -> Option { + Some(self.modified_at.unix_timestamp()) + } +} diff --git a/crates/router/src/services/kafka/payment_intent.rs b/crates/router/src/services/kafka/payment_intent.rs new file mode 100644 index 000000000000..70980a6e8652 --- /dev/null +++ b/crates/router/src/services/kafka/payment_intent.rs @@ -0,0 +1,71 @@ +use data_models::payments::PaymentIntent; +use diesel_models::enums as storage_enums; +use time::OffsetDateTime; + +#[derive(serde::Serialize, Debug)] +pub struct KafkaPaymentIntent<'a> { + pub payment_id: &'a String, + pub merchant_id: &'a String, + pub status: storage_enums::IntentStatus, + pub amount: i64, + pub currency: Option, + pub amount_captured: Option, + pub customer_id: Option<&'a String>, + pub description: Option<&'a String>, + pub return_url: Option<&'a String>, + pub connector_id: Option<&'a String>, + pub statement_descriptor_name: Option<&'a String>, + pub statement_descriptor_suffix: Option<&'a String>, + #[serde(with = "time::serde::timestamp")] + pub created_at: OffsetDateTime, + #[serde(with = "time::serde::timestamp")] + pub modified_at: OffsetDateTime, + #[serde(default, with = "time::serde::timestamp::option")] + pub last_synced: Option, + pub setup_future_usage: Option, + pub off_session: Option, + pub client_secret: Option<&'a String>, + pub active_attempt_id: String, + pub business_country: Option, + pub business_label: Option<&'a String>, + pub attempt_count: i16, +} + +impl<'a> KafkaPaymentIntent<'a> { + pub fn from_storage(intent: &'a PaymentIntent) -> Self { + Self { + payment_id: &intent.payment_id, + merchant_id: &intent.merchant_id, + status: intent.status, + amount: intent.amount, + currency: intent.currency, + amount_captured: intent.amount_captured, + customer_id: intent.customer_id.as_ref(), + description: intent.description.as_ref(), + return_url: intent.return_url.as_ref(), + connector_id: intent.connector_id.as_ref(), + statement_descriptor_name: intent.statement_descriptor_name.as_ref(), + statement_descriptor_suffix: intent.statement_descriptor_suffix.as_ref(), + created_at: intent.created_at.assume_utc(), + modified_at: intent.modified_at.assume_utc(), + last_synced: intent.last_synced.map(|i| i.assume_utc()), + setup_future_usage: intent.setup_future_usage, + off_session: intent.off_session, + client_secret: intent.client_secret.as_ref(), + active_attempt_id: intent.active_attempt.get_id(), + business_country: intent.business_country, + business_label: intent.business_label.as_ref(), + attempt_count: intent.attempt_count, + } + } +} + +impl<'a> super::KafkaMessage for KafkaPaymentIntent<'a> { + fn key(&self) -> String { + format!("{}_{}", self.merchant_id, self.payment_id) + } + + fn creation_timestamp(&self) -> Option { + Some(self.modified_at.unix_timestamp()) + } +} diff --git a/crates/router/src/services/kafka/refund.rs b/crates/router/src/services/kafka/refund.rs new file mode 100644 index 000000000000..0cc4865e7512 --- /dev/null +++ b/crates/router/src/services/kafka/refund.rs @@ -0,0 +1,68 @@ +use diesel_models::{enums as storage_enums, refund::Refund}; +use time::OffsetDateTime; + +#[derive(serde::Serialize, Debug)] +pub struct KafkaRefund<'a> { + pub internal_reference_id: &'a String, + pub refund_id: &'a String, //merchant_reference id + pub payment_id: &'a String, + pub merchant_id: &'a String, + pub connector_transaction_id: &'a String, + pub connector: &'a String, + pub connector_refund_id: Option<&'a String>, + pub external_reference_id: Option<&'a String>, + pub refund_type: &'a storage_enums::RefundType, + pub total_amount: &'a i64, + pub currency: &'a storage_enums::Currency, + pub refund_amount: &'a i64, + pub refund_status: &'a storage_enums::RefundStatus, + pub sent_to_gateway: &'a bool, + pub refund_error_message: Option<&'a String>, + pub refund_arn: Option<&'a String>, + #[serde(default, with = "time::serde::timestamp")] + pub created_at: OffsetDateTime, + #[serde(default, with = "time::serde::timestamp")] + pub modified_at: OffsetDateTime, + pub description: Option<&'a String>, + pub attempt_id: &'a String, + pub refund_reason: Option<&'a String>, + pub refund_error_code: Option<&'a String>, +} + +impl<'a> KafkaRefund<'a> { + pub fn from_storage(refund: &'a Refund) -> Self { + Self { + internal_reference_id: &refund.internal_reference_id, + refund_id: &refund.refund_id, + payment_id: &refund.payment_id, + merchant_id: &refund.merchant_id, + connector_transaction_id: &refund.connector_transaction_id, + connector: &refund.connector, + connector_refund_id: refund.connector_refund_id.as_ref(), + external_reference_id: refund.external_reference_id.as_ref(), + refund_type: &refund.refund_type, + total_amount: &refund.total_amount, + currency: &refund.currency, + refund_amount: &refund.refund_amount, + refund_status: &refund.refund_status, + sent_to_gateway: &refund.sent_to_gateway, + refund_error_message: refund.refund_error_message.as_ref(), + refund_arn: refund.refund_arn.as_ref(), + created_at: refund.created_at.assume_utc(), + modified_at: refund.updated_at.assume_utc(), + description: refund.description.as_ref(), + attempt_id: &refund.attempt_id, + refund_reason: refund.refund_reason.as_ref(), + refund_error_code: refund.refund_error_code.as_ref(), + } + } +} + +impl<'a> super::KafkaMessage for KafkaRefund<'a> { + fn key(&self) -> String { + format!( + "{}_{}_{}_{}", + self.merchant_id, self.payment_id, self.attempt_id, self.refund_id + ) + } +} diff --git a/crates/router/src/types/storage/payment_attempt.rs b/crates/router/src/types/storage/payment_attempt.rs index f94d06997ca9..13b9f3dd5d5c 100644 --- a/crates/router/src/types/storage/payment_attempt.rs +++ b/crates/router/src/types/storage/payment_attempt.rs @@ -7,7 +7,6 @@ use error_stack::ResultExt; use crate::{ core::errors, errors::RouterResult, types::transformers::ForeignFrom, utils::OptionExt, }; - pub trait PaymentAttemptExt { fn make_new_capture( &self, @@ -134,9 +133,7 @@ mod tests { use crate::configs::settings::Settings; let conf = Settings::new().expect("invalid settings"); let tx: oneshot::Sender<()> = oneshot::channel().0; - let api_client = Box::new(services::MockApiClient); - let state = routes::AppState::with_storage(conf, StorageImpl::PostgresqlTest, tx, api_client).await; @@ -187,7 +184,6 @@ mod tests { let tx: oneshot::Sender<()> = oneshot::channel().0; let api_client = Box::new(services::MockApiClient); - let state = routes::AppState::with_storage(conf, StorageImpl::PostgresqlTest, tx, api_client).await; let current_time = common_utils::date_time::now(); diff --git a/crates/router/tests/connectors/aci.rs b/crates/router/tests/connectors/aci.rs index c9ee3a34f2ef..e12e27708f87 100644 --- a/crates/router/tests/connectors/aci.rs +++ b/crates/router/tests/connectors/aci.rs @@ -160,6 +160,7 @@ fn construct_refund_router_data() -> types::RefundsRouterData { async fn payments_create_success() { let conf = Settings::new().unwrap(); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, @@ -204,6 +205,7 @@ async fn payments_create_failure() { let conf = Settings::new().unwrap(); static CV: aci::Aci = aci::Aci; let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, @@ -265,6 +267,7 @@ async fn refund_for_successful_payments() { merchant_connector_id: None, }; let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, @@ -333,6 +336,7 @@ async fn refunds_create_failure() { merchant_connector_id: None, }; let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, diff --git a/crates/router/tests/connectors/utils.rs b/crates/router/tests/connectors/utils.rs index 67a0625968fb..f325370e737f 100644 --- a/crates/router/tests/connectors/utils.rs +++ b/crates/router/tests/connectors/utils.rs @@ -96,6 +96,7 @@ pub trait ConnectorActions: Connector { payment_info, ); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -120,6 +121,7 @@ pub trait ConnectorActions: Connector { payment_info, ); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -148,6 +150,7 @@ pub trait ConnectorActions: Connector { payment_info, ); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -561,6 +564,7 @@ pub trait ConnectorActions: Connector { .get_connector_integration(); let mut request = self.get_payout_request(None, payout_type, payment_info); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -601,6 +605,7 @@ pub trait ConnectorActions: Connector { .get_connector_integration(); let mut request = self.get_payout_request(connector_payout_id, payout_type, payment_info); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -642,6 +647,7 @@ pub trait ConnectorActions: Connector { let mut request = self.get_payout_request(None, payout_type, payment_info); request.connector_customer = connector_customer; let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -683,6 +689,7 @@ pub trait ConnectorActions: Connector { let mut request = self.get_payout_request(Some(connector_payout_id), payout_type, payment_info); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -770,6 +777,7 @@ pub trait ConnectorActions: Connector { .get_connector_integration(); let mut request = self.get_payout_request(None, payout_type, payment_info); let tx = oneshot::channel().0; + let state = routes::AppState::with_storage( Settings::new().unwrap(), StorageImpl::PostgresqlTest, @@ -802,6 +810,7 @@ async fn call_connector< ) -> Result, Report> { let conf = Settings::new().unwrap(); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, diff --git a/crates/router/tests/payments2.rs b/crates/router/tests/payments2.rs index 5d4ca844061f..42e5524a15d5 100644 --- a/crates/router/tests/payments2.rs +++ b/crates/router/tests/payments2.rs @@ -217,6 +217,7 @@ async fn payments_create_core_adyen_no_redirect() { use router::configs::settings::Settings; let conf = Settings::new().expect("invalid settings"); let tx: oneshot::Sender<()> = oneshot::channel().0; + let state = routes::AppState::with_storage( conf, StorageImpl::PostgresqlTest, diff --git a/crates/router/tests/utils.rs b/crates/router/tests/utils.rs index 6cddbc043662..339eca6fa0fb 100644 --- a/crates/router/tests/utils.rs +++ b/crates/router/tests/utils.rs @@ -48,6 +48,7 @@ pub async fn mk_service( conf.connectors.stripe.base_url = url; } let tx: oneshot::Sender<()> = oneshot::channel().0; + let app_state = AppState::with_storage( conf, router::db::StorageImpl::Mock, diff --git a/crates/router_env/src/lib.rs b/crates/router_env/src/lib.rs index e75606aa1531..3c7ba8b93df7 100644 --- a/crates/router_env/src/lib.rs +++ b/crates/router_env/src/lib.rs @@ -39,10 +39,19 @@ use crate::types::FlowMetric; #[derive(Debug, Display, Clone, PartialEq, Eq)] pub enum AnalyticsFlow { GetInfo, + GetPaymentMetrics, + GetRefundsMetrics, + GetSdkMetrics, GetPaymentFilters, GetRefundFilters, - GetRefundsMetrics, - GetPaymentMetrics, + GetSdkEventFilters, + GetApiEvents, + GetSdkEvents, + GeneratePaymentReport, + GenerateDisputeReport, + GenerateRefundReport, + GetApiEventMetrics, + GetApiEventFilters, } impl FlowMetric for AnalyticsFlow {} diff --git a/crates/scheduler/Cargo.toml b/crates/scheduler/Cargo.toml index e0b68c709e8d..5e8674ab3814 100644 --- a/crates/scheduler/Cargo.toml +++ b/crates/scheduler/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [features] default = ["kv_store", "olap"] -olap = [] +olap = ["storage_impl/olap"] kv_store = [] [dependencies] diff --git a/crates/storage_impl/src/config.rs b/crates/storage_impl/src/config.rs index f53507831b11..fd95a6d315d6 100644 --- a/crates/storage_impl/src/config.rs +++ b/crates/storage_impl/src/config.rs @@ -1,6 +1,6 @@ use masking::Secret; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Deserialize)] pub struct Database { pub username: String, pub password: Secret, @@ -9,7 +9,41 @@ pub struct Database { pub dbname: String, pub pool_size: u32, pub connection_timeout: u64, - pub queue_strategy: bb8::QueueStrategy, + pub queue_strategy: QueueStrategy, pub min_idle: Option, pub max_lifetime: Option, } + +#[derive(Debug, serde::Deserialize, Clone, Copy, Default)] +#[serde(rename_all = "PascalCase")] +pub enum QueueStrategy { + #[default] + Fifo, + Lifo, +} + +impl From for bb8::QueueStrategy { + fn from(value: QueueStrategy) -> Self { + match value { + QueueStrategy::Fifo => Self::Fifo, + QueueStrategy::Lifo => Self::Lifo, + } + } +} + +impl Default for Database { + fn default() -> Self { + Self { + username: String::new(), + password: Secret::::default(), + host: "localhost".into(), + port: 5432, + dbname: String::new(), + pool_size: 5, + connection_timeout: 10, + queue_strategy: QueueStrategy::default(), + min_idle: None, + max_lifetime: None, + } + } +} diff --git a/crates/storage_impl/src/database/store.rs b/crates/storage_impl/src/database/store.rs index c36575e37c97..75c34af14ac1 100644 --- a/crates/storage_impl/src/database/store.rs +++ b/crates/storage_impl/src/database/store.rs @@ -89,7 +89,7 @@ pub async fn diesel_make_pg_pool( let mut pool = bb8::Pool::builder() .max_size(database.pool_size) .min_idle(database.min_idle) - .queue_strategy(database.queue_strategy) + .queue_strategy(database.queue_strategy.into()) .connection_timeout(std::time::Duration::from_secs(database.connection_timeout)) .max_lifetime(database.max_lifetime.map(std::time::Duration::from_secs)); diff --git a/docker-compose.yml b/docker-compose.yml index fd18906143f5..f51a47aee940 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -273,3 +273,66 @@ services: - "8001:8001" volumes: - redisinsight_store:/db + + kafka0: + image: confluentinc/cp-kafka:7.0.5 + hostname: kafka0 + networks: + - router_net + ports: + - 9092:9092 + - 9093 + - 9997 + - 29092 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' + KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' + JMX_PORT: 9997 + KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 + profiles: + - analytics + volumes: + - ./monitoring/kafka-script.sh:/tmp/update_run.sh + command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'" + + # Kafka UI for debugging kafka queues + kafka-ui: + image: provectuslabs/kafka-ui:latest + ports: + - 8090:8080 + networks: + - router_net + depends_on: + - kafka0 + profiles: + - analytics + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 + KAFKA_CLUSTERS_0_JMXPORT: 9997 + + clickhouse-server: + image: clickhouse/clickhouse-server:23.5 + networks: + - router_net + ports: + - "9000" + - "8123:8123" + profiles: + - analytics + ulimits: + nofile: + soft: 262144 + hard: 262144 \ No newline at end of file