diff --git a/Cargo.lock b/Cargo.lock
index 96bdcff3f86e..417e6d85db6d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -332,6 +332,36 @@ version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
+[[package]]
+name = "analytics"
+version = "0.1.0"
+dependencies = [
+ "actix-web",
+ "api_models",
+ "async-trait",
+ "aws-config",
+ "aws-sdk-lambda",
+ "aws-smithy-types",
+ "bigdecimal",
+ "common_utils",
+ "diesel_models",
+ "error-stack",
+ "external_services",
+ "futures 0.3.28",
+ "masking",
+ "once_cell",
+ "reqwest",
+ "router_env",
+ "serde",
+ "serde_json",
+ "sqlx",
+ "storage_impl",
+ "strum 0.25.0",
+ "thiserror",
+ "time",
+ "tokio 1.32.0",
+]
+
[[package]]
name = "android-tzdata"
version = "0.1.1"
@@ -729,6 +759,31 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "aws-sdk-lambda"
+version = "0.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3ad176ffaa3aafa532246eb6a9f18a7d68da19950704ecc95d33d9dc3c62a9b"
+dependencies = [
+ "aws-credential-types",
+ "aws-endpoint",
+ "aws-http",
+ "aws-sig-auth",
+ "aws-smithy-async",
+ "aws-smithy-client",
+ "aws-smithy-http",
+ "aws-smithy-http-tower",
+ "aws-smithy-json",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes 1.5.0",
+ "http",
+ "regex",
+ "tokio-stream",
+ "tower",
+ "tracing",
+]
+
[[package]]
name = "aws-sdk-s3"
version = "0.28.0"
@@ -1148,6 +1203,7 @@ dependencies = [
"num-bigint",
"num-integer",
"num-traits",
+ "serde",
]
[[package]]
@@ -1256,7 +1312,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3"
dependencies = [
"once_cell",
- "proc-macro-crate",
+ "proc-macro-crate 2.0.0",
"proc-macro2",
"quote",
"syn 2.0.38",
@@ -3862,6 +3918,27 @@ dependencies = [
"libc",
]
+[[package]]
+name = "num_enum"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
+dependencies = [
+ "num_enum_derive",
+]
+
+[[package]]
+name = "num_enum_derive"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
+dependencies = [
+ "proc-macro-crate 1.3.1",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
[[package]]
name = "object"
version = "0.32.1"
@@ -4395,6 +4472,16 @@ dependencies = [
"vcpkg",
]
+[[package]]
+name = "proc-macro-crate"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
+dependencies = [
+ "once_cell",
+ "toml_edit 0.19.10",
+]
+
[[package]]
name = "proc-macro-crate"
version = "2.0.0"
@@ -4688,6 +4775,36 @@ dependencies = [
"crossbeam-utils 0.8.16",
]
+[[package]]
+name = "rdkafka"
+version = "0.36.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d54f02a5a40220f8a2dfa47ddb38ba9064475a5807a69504b6f91711df2eea63"
+dependencies = [
+ "futures-channel",
+ "futures-util",
+ "libc",
+ "log",
+ "rdkafka-sys",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "slab",
+ "tokio 1.32.0",
+]
+
+[[package]]
+name = "rdkafka-sys"
+version = "4.7.0+2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55e0d2f9ba6253f6ec72385e453294f8618e9e15c2c6aba2a5c01ccf9622d615"
+dependencies = [
+ "libc",
+ "libz-sys",
+ "num_enum",
+ "pkg-config",
+]
+
[[package]]
name = "redis-protocol"
version = "4.1.0"
@@ -4939,6 +5056,7 @@ dependencies = [
"actix-multipart",
"actix-rt",
"actix-web",
+ "analytics",
"api_models",
"argon2",
"async-bb8-diesel",
@@ -4988,6 +5106,7 @@ dependencies = [
"qrcode",
"rand 0.8.5",
"rand_chacha 0.3.1",
+ "rdkafka",
"redis_interface",
"regex",
"reqwest",
diff --git a/Dockerfile b/Dockerfile
index 8eb321dd2afd..e9591e5e9f27 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM rust:slim-bookworm as builder
+FROM rust:bookworm as builder
ARG EXTRA_FEATURES=""
@@ -36,7 +36,7 @@ RUN cargo build --release --features release ${EXTRA_FEATURES}
-FROM debian:bookworm-slim
+FROM debian:bookworm
# Placing config and binary executable in different directories
ARG CONFIG_DIR=/local/config
diff --git a/config/development.toml b/config/development.toml
index f2620bd37135..fa5fddb0d60a 100644
--- a/config/development.toml
+++ b/config/development.toml
@@ -475,3 +475,33 @@ delay_between_retries_in_milliseconds = 500
[kv_config]
ttl = 900 # 15 * 60 seconds
+
+[events]
+source = "logs"
+
+[events.kafka]
+brokers = ["localhost:9092"]
+intent_analytics_topic = "hyperswitch-payment-intent-events"
+attempt_analytics_topic = "hyperswitch-payment-attempt-events"
+refund_analytics_topic = "hyperswitch-refund-events"
+api_logs_topic = "hyperswitch-api-log-events"
+connector_events_topic = "hyperswitch-connector-api-events"
+
+[analytics]
+source = "sqlx"
+
+[analytics.clickhouse]
+username = "default"
+# password = ""
+host = "http://localhost:8123"
+database_name = "default"
+
+[analytics.sqlx]
+username = "db_user"
+password = "db_pass"
+host = "localhost"
+port = 5432
+dbname = "hyperswitch_db"
+pool_size = 5
+connection_timeout = 10
+queue_strategy = "Fifo"
\ No newline at end of file
diff --git a/config/docker_compose.toml b/config/docker_compose.toml
index 445e1e856846..4d50600e1bf8 100644
--- a/config/docker_compose.toml
+++ b/config/docker_compose.toml
@@ -333,16 +333,32 @@ supported_connectors = "braintree"
redis_lock_expiry_seconds = 180 # 3 * 60 seconds
delay_between_retries_in_milliseconds = 500
+[events.kafka]
+brokers = ["localhost:9092"]
+intent_analytics_topic = "hyperswitch-payment-intent-events"
+attempt_analytics_topic = "hyperswitch-payment-attempt-events"
+refund_analytics_topic = "hyperswitch-refund-events"
+api_logs_topic = "hyperswitch-api-log-events"
+connector_events_topic = "hyperswitch-connector-api-events"
+
[analytics]
source = "sqlx"
+[analytics.clickhouse]
+username = "default"
+# password = ""
+host = "http://localhost:8123"
+database_name = "default"
+
[analytics.sqlx]
username = "db_user"
password = "db_pass"
-host = "pg"
+host = "localhost"
port = 5432
dbname = "hyperswitch_db"
pool_size = 5
+connection_timeout = 10
+queue_strategy = "Fifo"
[kv_config]
ttl = 900 # 15 * 60 seconds
diff --git a/crates/analytics/Cargo.toml b/crates/analytics/Cargo.toml
new file mode 100644
index 000000000000..f49fe322ae3b
--- /dev/null
+++ b/crates/analytics/Cargo.toml
@@ -0,0 +1,37 @@
+[package]
+name = "analytics"
+version = "0.1.0"
+description = "Analytics / Reports related functionality"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+
+[dependencies]
+# First party crates
+api_models = { version = "0.1.0", path = "../api_models" , features = ["errors"]}
+storage_impl = { version = "0.1.0", path = "../storage_impl", default-features = false }
+common_utils = { version = "0.1.0", path = "../common_utils"}
+external_services = { version = "0.1.0", path = "../external_services", default-features = false}
+masking = { version = "0.1.0", path = "../masking" }
+router_env = { version = "0.1.0", path = "../router_env", features = ["log_extra_implicit_fields", "log_custom_entries_to_extra"] }
+diesel_models = { version = "0.1.0", path = "../diesel_models", features = ["kv_store"] }
+
+#Third Party dependencies
+actix-web = "4.3.1"
+async-trait = "0.1.68"
+aws-config = { version = "0.55.3" }
+aws-sdk-lambda = { version = "0.28.0" }
+aws-smithy-types = { version = "0.55.3" }
+bigdecimal = { version = "0.3.1", features = ["serde"] }
+error-stack = "0.3.1"
+futures = "0.3.28"
+once_cell = "1.18.0"
+reqwest = { version = "0.11.18", features = ["serde_json"] }
+serde = { version = "1.0.163", features = ["derive", "rc"] }
+serde_json = "1.0.96"
+sqlx = { version = "0.6.3", features = ["postgres", "runtime-actix", "runtime-actix-native-tls", "time", "bigdecimal"] }
+strum = { version = "0.25.0", features = ["derive"] }
+thiserror = "1.0.43"
+time = { version = "0.3.21", features = ["serde", "serde-well-known", "std"] }
+tokio = { version = "1.28.2", features = ["macros", "rt-multi-thread"] }
diff --git a/crates/analytics/docs/clickhouse/README.md b/crates/analytics/docs/clickhouse/README.md
new file mode 100644
index 000000000000..2fd48a30c29f
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/README.md
@@ -0,0 +1,45 @@
+#### Starting the containers
+
+In our use case we rely on kafka for ingesting events.
+hence we can use docker compose to start all the components
+
+```
+docker compose up -d clickhouse-server kafka-ui
+```
+
+> kafka-ui is a visual tool for inspecting kafka on localhost:8090
+
+#### Setting up Clickhouse
+
+Once clickhouse is up & running you need to create the required tables for it
+
+you can either visit the url (http://localhost:8123/play) in which the clickhouse-server is running to get a playground
+Alternatively you can bash into the clickhouse container & execute commands manually
+```
+# On your local terminal
+docker compose exec clickhouse-server bash
+
+# Inside the clickhouse-server container shell
+clickhouse-client --user default
+
+# Inside the clickhouse-client shell
+SHOW TABLES;
+CREATE TABLE ......
+```
+
+The table creation scripts are provided [here](./scripts)
+
+#### Running/Debugging your application
+Once setup you can run your application either via docker compose or normally via cargo run
+
+Remember to enable the kafka_events via development.toml/docker_compose.toml files
+
+Inspect the [kafka-ui](http://localhost:8090) to check the messages being inserted in queue
+
+If the messages/topic are available then you can run select queries on your clickhouse table to ensure data is being populated...
+
+If the data is not being populated in clickhouse, you can check the error logs in clickhouse server via
+```
+# Inside the clickhouse-server container shell
+tail -f /var/log/clickhouse-server/clickhouse-server.err.log
+```
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/README.md b/crates/analytics/docs/clickhouse/cluster_setup/README.md
new file mode 100644
index 000000000000..cd5f2dfeb023
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/README.md
@@ -0,0 +1,347 @@
+# Tutorial for set up clickhouse server
+
+
+## Single server with docker
+
+
+- Run server
+
+```
+docker run -d --name clickhouse-server -p 9000:9000 --ulimit nofile=262144:262144 yandex/clickhouse-server
+
+```
+
+- Run client
+
+```
+docker run -it --rm --link clickhouse-server:clickhouse-server yandex/clickhouse-client --host clickhouse-server
+```
+
+Now you can see if it success setup or not.
+
+
+## Setup Cluster
+
+
+This part we will setup
+
+- 1 cluster, with 3 shards
+- Each shard has 2 replica server
+- Use ReplicatedMergeTree & Distributed table to setup our table.
+
+
+### Cluster
+
+Let's see our docker-compose.yml first.
+
+```
+version: '3'
+
+services:
+ clickhouse-zookeeper:
+ image: zookeeper
+ ports:
+ - "2181:2181"
+ - "2182:2182"
+ container_name: clickhouse-zookeeper
+ hostname: clickhouse-zookeeper
+
+ clickhouse-01:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-01
+ container_name: clickhouse-01
+ ports:
+ - 9001:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-01.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-01:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-02:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-02
+ container_name: clickhouse-02
+ ports:
+ - 9002:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-02.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-02:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-03:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-03
+ container_name: clickhouse-03
+ ports:
+ - 9003:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-03.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-03:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-04:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-04
+ container_name: clickhouse-04
+ ports:
+ - 9004:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-04.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-04:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-05:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-05
+ container_name: clickhouse-05
+ ports:
+ - 9005:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-05.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-05:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-06:
+ image: yandex/clickhouse-server
+ hostname: clickhouse-06
+ container_name: clickhouse-06
+ ports:
+ - 9006:9000
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-06.xml:/etc/clickhouse-server/config.d/macros.xml
+ # - ./data/server-06:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+networks:
+ default:
+ external:
+ name: clickhouse-net
+```
+
+
+We have 6 clickhouse server container and one zookeeper container.
+
+
+**To enable replication ZooKeeper is required. ClickHouse will take care of data consistency on all replicas and run restore procedure after failure automatically. It's recommended to deploy ZooKeeper cluster to separate servers.**
+
+**ZooKeeper is not a requirement — in some simple cases you can duplicate the data by writing it into all the replicas from your application code. This approach is not recommended — in this case ClickHouse is not able to guarantee data consistency on all replicas. This remains the responsibility of your application.**
+
+
+Let's see config file.
+
+`./config/clickhouse_config.xml` is the default config file in docker, we copy it out and add this line
+
+```
+
+ /etc/clickhouse-server/metrika.xml
+```
+
+
+So lets see `clickhouse_metrika.xml`
+
+```
+
+
+
+
+ 1
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-06
+ 9000
+
+
+
+ 1
+ true
+
+ clickhouse-02
+ 9000
+
+
+ clickhouse-03
+ 9000
+
+
+
+ 1
+ true
+
+
+ clickhouse-04
+ 9000
+
+
+ clickhouse-05
+ 9000
+
+
+
+
+
+
+ clickhouse-zookeeper
+ 2181
+
+
+
+ ::/0
+
+
+
+ 10000000000
+ 0.01
+ lz4
+
+
+
+```
+
+and macros.xml, each instances has there own macros settings, like server 1:
+
+```
+
+
+ clickhouse-01
+ 01
+ 01
+
+
+```
+
+
+**Make sure your macros settings is equal to remote server settings in metrika.xml**
+
+So now you can start the server.
+
+```
+docker network create clickhouse-net
+docker-compose up -d
+```
+
+Conn to server and see if the cluster settings fine;
+
+```
+docker run -it --rm --network="clickhouse-net" --link clickhouse-01:clickhouse-server yandex/clickhouse-client --host clickhouse-server
+```
+
+```sql
+clickhouse-01 :) select * from system.clusters;
+
+SELECT *
+FROM system.clusters
+
+┌─cluster─────────────────────┬─shard_num─┬─shard_weight─┬─replica_num─┬─host_name─────┬─host_address─┬─port─┬─is_local─┬─user────┬─default_database─┐
+│ cluster_1 │ 1 │ 1 │ 1 │ clickhouse-01 │ 172.21.0.4 │ 9000 │ 1 │ default │ │
+│ cluster_1 │ 1 │ 1 │ 2 │ clickhouse-06 │ 172.21.0.5 │ 9000 │ 1 │ default │ │
+│ cluster_1 │ 2 │ 1 │ 1 │ clickhouse-02 │ 172.21.0.8 │ 9000 │ 0 │ default │ │
+│ cluster_1 │ 2 │ 1 │ 2 │ clickhouse-03 │ 172.21.0.6 │ 9000 │ 0 │ default │ │
+│ cluster_1 │ 3 │ 1 │ 1 │ clickhouse-04 │ 172.21.0.7 │ 9000 │ 0 │ default │ │
+│ cluster_1 │ 3 │ 1 │ 2 │ clickhouse-05 │ 172.21.0.3 │ 9000 │ 0 │ default │ │
+│ test_shard_localhost │ 1 │ 1 │ 1 │ localhost │ 127.0.0.1 │ 9000 │ 1 │ default │ │
+│ test_shard_localhost_secure │ 1 │ 1 │ 1 │ localhost │ 127.0.0.1 │ 9440 │ 0 │ default │ │
+└─────────────────────────────┴───────────┴──────────────┴─────────────┴───────────────┴──────────────┴──────┴──────────┴─────────┴──────────────────┘
+```
+
+If you see this, it means cluster's settings work well(but not conn fine).
+
+
+### Replica Table
+
+So now we have a cluster and replica settings. For clickhouse, we need to create ReplicatedMergeTree Table as a local table in every server.
+
+```sql
+CREATE TABLE ttt (id Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/ttt', '{replica}') PARTITION BY id ORDER BY id
+```
+
+and Create Distributed Table conn to local table
+
+```sql
+CREATE TABLE ttt_all as ttt ENGINE = Distributed(cluster_1, default, ttt, rand());
+```
+
+
+### Insert and test
+
+gen some data and test.
+
+
+```
+# docker exec into client server 1 and
+for ((idx=1;idx<=100;++idx)); do clickhouse-client --host clickhouse-server --query "Insert into default.ttt_all values ($idx)"; done;
+```
+
+For Distributed table.
+
+```
+select count(*) from ttt_all;
+```
+
+For loacl table.
+
+```
+select count(*) from ttt;
+```
+
+
+## Authentication
+
+Please see config/users.xml
+
+
+- Conn
+```bash
+docker run -it --rm --network="clickhouse-net" --link clickhouse-01:clickhouse-server yandex/clickhouse-client --host clickhouse-server -u user1 --password 123456
+```
+
+## Source
+
+- https://clickhouse.yandex/docs/en/operations/table_engines/replication/#creating-replicated-tables
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml
new file mode 100644
index 000000000000..94c854dc273a
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_config.xml
@@ -0,0 +1,370 @@
+
+
+
+
+ error
+ 1000M
+ 1
+ 10
+
+
+
+ 8123
+ 9000
+
+
+
+
+
+
+
+
+ /etc/clickhouse-server/server.crt
+ /etc/clickhouse-server/server.key
+
+ /etc/clickhouse-server/dhparam.pem
+ none
+ true
+ true
+ sslv2,sslv3
+ true
+
+
+
+ true
+ true
+ sslv2,sslv3
+ true
+
+
+
+ RejectCertificateHandler
+
+
+
+
+
+
+
+
+ 9009
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 4096
+ 3
+
+
+ 100
+
+
+
+
+
+ 8589934592
+
+
+ 5368709120
+
+
+
+ /var/lib/clickhouse/
+
+
+ /var/lib/clickhouse/tmp/
+
+
+ /var/lib/clickhouse/user_files/
+
+
+ users.xml
+
+
+ default
+
+
+
+
+
+ default
+
+
+
+
+
+
+
+
+
+
+
+
+
+ localhost
+ 9000
+
+
+
+
+
+
+ localhost
+ 9440
+ 1
+
+
+
+
+
+
+
+ /etc/clickhouse-server/metrika.xml
+
+
+
+
+
+
+
+
+ 3600
+
+
+
+ 3600
+
+
+ 60
+
+
+
+
+
+
+
+
+
+ system
+
+
+ toYYYYMM(event_date)
+
+ 7500
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ *_dictionary.xml
+
+
+
+
+
+
+
+
+
+ /clickhouse/task_queue/ddl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ click_cost
+ any
+
+ 0
+ 3600
+
+
+ 86400
+ 60
+
+
+
+ max
+
+ 0
+ 60
+
+
+ 3600
+ 300
+
+
+ 86400
+ 3600
+
+
+
+
+
+ /var/lib/clickhouse/format_schemas/
+
+
+
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml
new file mode 100644
index 000000000000..b58ffc34bc29
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/clickhouse_metrika.xml
@@ -0,0 +1,60 @@
+
+
+
+
+ 1
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-06
+ 9000
+
+
+
+ 1
+ true
+
+ clickhouse-02
+ 9000
+
+
+ clickhouse-03
+ 9000
+
+
+
+ 1
+ true
+
+
+ clickhouse-04
+ 9000
+
+
+ clickhouse-05
+ 9000
+
+
+
+
+
+
+ clickhouse-zookeeper
+ 2181
+
+
+
+ ::/0
+
+
+
+ 10000000000
+ 0.01
+ lz4
+
+
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml
new file mode 100644
index 000000000000..75df1c5916e8
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-01.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-01
+ 01
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml
new file mode 100644
index 000000000000..67e4a545b30c
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-02.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-02
+ 02
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml
new file mode 100644
index 000000000000..e9278191b80f
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-03.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-03
+ 02
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml
new file mode 100644
index 000000000000..033c0ad1152e
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-04.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-04
+ 03
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml
new file mode 100644
index 000000000000..c63314c5acea
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-05.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-05
+ 03
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml
new file mode 100644
index 000000000000..4b01bda9948c
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/macros/macros-06.xml
@@ -0,0 +1,9 @@
+
+
+ clickhouse-06
+ 01
+ 01
+ data
+ cluster_1
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml b/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml
new file mode 100644
index 000000000000..e1b8de78e37a
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/config/users.xml
@@ -0,0 +1,117 @@
+
+
+
+
+
+
+
+ 10000000000
+
+
+ 0
+
+
+ random
+
+
+
+
+ 1
+
+
+
+
+
+
+ 123456
+
+ ::/0
+
+ default
+ default
+
+
+
+
+
+
+
+
+ ::/0
+
+
+
+ default
+
+
+ default
+
+
+
+
+
+
+ ::1
+ 127.0.0.1
+
+ readonly
+ default
+
+
+
+
+
+
+
+
+
+
+ 3600
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml b/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml
new file mode 100644
index 000000000000..96d7618b47e6
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/docker-compose.yml
@@ -0,0 +1,198 @@
+version: '3'
+
+networks:
+ ckh_net:
+
+services:
+ clickhouse-zookeeper:
+ image: zookeeper
+ ports:
+ - "2181:2181"
+ - "2182:2182"
+ container_name: clickhouse-zookeeper
+ hostname: clickhouse-zookeeper
+ networks:
+ - ckh_net
+
+ clickhouse-01:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-01
+ container_name: clickhouse-01
+ networks:
+ - ckh_net
+ ports:
+ - 9001:9000
+ - 8124:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-01.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-01:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-02:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-02
+ container_name: clickhouse-02
+ networks:
+ - ckh_net
+ ports:
+ - 9002:9000
+ - 8125:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-02.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-02:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-03:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-03
+ container_name: clickhouse-03
+ networks:
+ - ckh_net
+ ports:
+ - 9003:9000
+ - 8126:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-03.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-03:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-04:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-04
+ container_name: clickhouse-04
+ networks:
+ - ckh_net
+ ports:
+ - 9004:9000
+ - 8127:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-04.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-04:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-05:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-05
+ container_name: clickhouse-05
+ networks:
+ - ckh_net
+ ports:
+ - 9005:9000
+ - 8128:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-05.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-05:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ clickhouse-06:
+ image: clickhouse/clickhouse-server
+ hostname: clickhouse-06
+ container_name: clickhouse-06
+ networks:
+ - ckh_net
+ ports:
+ - 9006:9000
+ - 8129:8123
+ volumes:
+ - ./config/clickhouse_config.xml:/etc/clickhouse-server/config.xml
+ - ./config/clickhouse_metrika.xml:/etc/clickhouse-server/metrika.xml
+ - ./config/macros/macros-06.xml:/etc/clickhouse-server/config.d/macros.xml
+ - ./config/users.xml:/etc/clickhouse-server/users.xml
+ # - ./data/server-06:/var/lib/clickhouse
+ ulimits:
+ nofile:
+ soft: 262144
+ hard: 262144
+ depends_on:
+ - "clickhouse-zookeeper"
+
+ kafka0:
+ image: confluentinc/cp-kafka:7.0.5
+ hostname: kafka0
+ container_name: kafka0
+ ports:
+ - 9092:9092
+ - 9093
+ - 9997
+ - 29092
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ JMX_PORT: 9997
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+ volumes:
+ - ./kafka-script.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
+ networks:
+ ckh_net:
+ aliases:
+ - hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local
+
+
+ # Kafka UI for debugging kafka queues
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8090:8080
+ depends_on:
+ - kafka0
+ networks:
+ - ckh_net
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
+ KAFKA_CLUSTERS_0_JMXPORT: 9997
+
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh b/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh
new file mode 100755
index 000000000000..023c832b4e1b
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/kafka-script.sh
@@ -0,0 +1,11 @@
+# This script is required to run kafka cluster (without zookeeper)
+#!/bin/sh
+
+# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter
+sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
+
+# Docker workaround: Ignore cub zk-ready
+sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
+
+# KRaft required step: Format the storage directory with a new cluster ID
+echo "kafka-storage format --ignore-formatted -t $(kafka-storage random-uuid) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql
new file mode 100644
index 000000000000..0fe194a0e676
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/api_event_logs.sql
@@ -0,0 +1,237 @@
+CREATE TABLE hyperswitch.api_events_queue on cluster '{cluster}' (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` String,
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `url_path` LowCardinality(Nullable(String)),
+ `event_type` LowCardinality(Nullable(String)),
+ `created_at` DateTime CODEC(T64, LZ4),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String)
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local:9092',
+kafka_topic_list = 'hyperswitch-api-log-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+
+CREATE TABLE hyperswitch.api_events_clustered on cluster '{cluster}' (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` Nullable(String),
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String),
+ INDEX flowIndex flow_type TYPE bloom_filter GRANULARITY 1,
+ INDEX apiIndex api_name TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status_code TYPE bloom_filter GRANULARITY 1
+) ENGINE = ReplicatedMergeTree(
+ '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/api_events_clustered',
+ '{replica}'
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, flow_type, status_code, api_name)
+TTL created_at + toIntervalMonth(6)
+;
+
+
+CREATE TABLE hyperswitch.api_events_dist on cluster '{cluster}' (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` Nullable(String),
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `url_path` LowCardinality(Nullable(String)),
+ `event_type` LowCardinality(Nullable(String)),
+ `inserted_at` DateTime64(3),
+ `created_at` DateTime64(3),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String)
+) ENGINE = Distributed('{cluster}', 'hyperswitch', 'api_events_clustered', rand());
+
+CREATE MATERIALIZED VIEW hyperswitch.api_events_mv on cluster '{cluster}' TO hyperswitch.api_events_dist (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` Nullable(String),
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `url_path` LowCardinality(Nullable(String)),
+ `event_type` LowCardinality(Nullable(String)),
+ `inserted_at` DateTime64(3),
+ `created_at` DateTime64(3),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String)
+) AS
+SELECT
+ merchant_id,
+ payment_id,
+ refund_id,
+ payment_method_id,
+ payment_method,
+ payment_method_type,
+ customer_id,
+ user_id,
+ request_id,
+ flow_type,
+ api_name,
+ request,
+ response,
+ status_code,
+ url_path,
+ event_type,
+ now() as inserted_at,
+ created_at,
+ latency,
+ user_agent,
+ ip_addr
+FROM
+ hyperswitch.api_events_queue
+WHERE length(_error) = 0;
+
+
+CREATE MATERIALIZED VIEW hyperswitch.api_events_parse_errors on cluster '{cluster}'
+(
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+)
+ENGINE = MergeTree
+ORDER BY (topic, partition, offset)
+SETTINGS index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM hyperswitch.api_events_queue
+WHERE length(_error) > 0
+;
+
+
+ALTER TABLE hyperswitch.api_events_clustered on cluster '{cluster}' ADD COLUMN `url_path` LowCardinality(Nullable(String));
+ALTER TABLE hyperswitch.api_events_clustered on cluster '{cluster}' ADD COLUMN `event_type` LowCardinality(Nullable(String));
+
+
+CREATE TABLE hyperswitch.api_audit_log ON CLUSTER '{cluster}' (
+ `merchant_id` LowCardinality(String),
+ `payment_id` String,
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` Nullable(String),
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String),
+ `url_path` LowCardinality(Nullable(String)),
+ `event_type` LowCardinality(Nullable(String)),
+ `customer_id` LowCardinality(Nullable(String))
+) ENGINE = ReplicatedMergeTree( '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/api_audit_log', '{replica}' ) PARTITION BY merchant_id
+ORDER BY (merchant_id, payment_id)
+TTL created_at + toIntervalMonth(18)
+SETTINGS index_granularity = 8192
+
+
+CREATE MATERIALIZED VIEW hyperswitch.api_audit_log_mv ON CLUSTER `{cluster}` TO hyperswitch.api_audit_log(
+ `merchant_id` LowCardinality(String),
+ `payment_id` String,
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `request_id` Nullable(String),
+ `flow_type` LowCardinality(String),
+ `api_name` LowCardinality(String),
+ `request` String,
+ `response` String,
+ `status_code` UInt32,
+ `url_path` LowCardinality(Nullable(String)),
+ `event_type` LowCardinality(Nullable(String)),
+ `inserted_at` DateTime64(3),
+ `created_at` DateTime64(3),
+ `latency` Nullable(UInt128),
+ `user_agent` Nullable(String),
+ `ip_addr` Nullable(String)
+) AS
+SELECT
+ merchant_id,
+ multiIf(payment_id IS NULL, '', payment_id) AS payment_id,
+ refund_id,
+ payment_method_id,
+ payment_method,
+ payment_method_type,
+ customer_id,
+ user_id,
+ request_id,
+ flow_type,
+ api_name,
+ request,
+ response,
+ status_code,
+ url_path,
+ api_event_type AS event_type,
+ now() AS inserted_at,
+ created_at,
+ latency,
+ user_agent,
+ ip_addr
+FROM hyperswitch.api_events_queue
+WHERE length(_error) = 0
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql
new file mode 100644
index 000000000000..3a6281ae9050
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_attempts.sql
@@ -0,0 +1,217 @@
+CREATE TABLE hyperswitch.payment_attempt_queue on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` LowCardinality(Nullable(String)),
+ `capture_on` Nullable(DateTime) CODEC(T64, LZ4),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `modified_at` DateTime CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-payment-attempt-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+
+CREATE TABLE hyperswitch.payment_attempt_dist on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` Nullable(String),
+ `capture_on` Nullable(DateTime) CODEC(T64, LZ4),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Distributed('{cluster}', 'hyperswitch', 'payment_attempt_clustered', cityHash64(attempt_id));
+
+
+
+CREATE MATERIALIZED VIEW hyperswitch.payment_attempt_mv on cluster '{cluster}' TO hyperswitch.payment_attempt_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` Nullable(String),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime64(3),
+ `capture_on` Nullable(DateTime64(3)),
+ `last_synced` Nullable(DateTime64(3)),
+ `modified_at` DateTime64(3),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ payment_id,
+ merchant_id,
+ attempt_id,
+ status,
+ amount,
+ currency,
+ connector,
+ save_to_locker,
+ error_message,
+ offer_amount,
+ surcharge_amount,
+ tax_amount,
+ payment_method_id,
+ payment_method,
+ payment_method_type,
+ connector_transaction_id,
+ capture_method,
+ confirm,
+ authentication_type,
+ cancellation_reason,
+ amount_to_capture,
+ mandate_id,
+ browser_info,
+ error_code,
+ connector_metadata,
+ payment_experience,
+ created_at,
+ capture_on,
+ last_synced,
+ modified_at,
+ now() as inserted_at,
+ sign_flag
+FROM
+ hyperswitch.payment_attempt_queue
+WHERE length(_error) = 0;
+
+
+CREATE TABLE hyperswitch.payment_attempt_clustered on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` Nullable(String),
+ `capture_on` Nullable(DateTime) CODEC(T64, LZ4),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1,
+ INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1,
+ INDEX authenticationTypeIndex authentication_type TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status TYPE bloom_filter GRANULARITY 1
+) ENGINE = ReplicatedCollapsingMergeTree(
+ '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/payment_attempt_clustered',
+ '{replica}',
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, attempt_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW hyperswitch.payment_attempt_parse_errors on cluster '{cluster}'
+(
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+)
+ENGINE = MergeTree
+ORDER BY (topic, partition, offset)
+SETTINGS index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM hyperswitch.payment_attempt_queue
+WHERE length(_error) > 0
+;
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql
new file mode 100644
index 000000000000..eb2d83140e92
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/payment_intents.sql
@@ -0,0 +1,165 @@
+CREATE TABLE hyperswitch.payment_intents_queue on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` String,
+ `business_label` String,
+ `modified_at` DateTime,
+ `created_at` DateTime,
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-payment-intent-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+CREATE TABLE hyperswitch.payment_intents_dist on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` LowCardinality(String),
+ `business_label` String,
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Distributed('{cluster}', 'hyperswitch', 'payment_intents_clustered', cityHash64(payment_id));
+
+CREATE TABLE hyperswitch.payment_intents_clustered on cluster '{cluster}' (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` LowCardinality(String),
+ `business_label` String,
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector_id TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status TYPE bloom_filter GRANULARITY 1
+) ENGINE = ReplicatedCollapsingMergeTree(
+ '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/payment_intents_clustered',
+ '{replica}',
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, payment_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW hyperswitch.payment_intent_mv on cluster '{cluster}' TO hyperswitch.payment_intents_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` LowCardinality(String),
+ `business_label` String,
+ `modified_at` DateTime64(3),
+ `created_at` DateTime64(3),
+ `last_synced` Nullable(DateTime64(3)),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ payment_id,
+ merchant_id,
+ status,
+ amount,
+ currency,
+ amount_captured,
+ customer_id,
+ description,
+ return_url,
+ connector_id,
+ statement_descriptor_name,
+ statement_descriptor_suffix,
+ setup_future_usage,
+ off_session,
+ client_secret,
+ active_attempt_id,
+ business_country,
+ business_label,
+ modified_at,
+ created_at,
+ last_synced,
+ now() as inserted_at,
+ sign_flag
+FROM hyperswitch.payment_intents_queue
+WHERE length(_error) = 0;
+
+CREATE MATERIALIZED VIEW hyperswitch.payment_intent_parse_errors on cluster '{cluster}'
+(
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+)
+ENGINE = MergeTree
+ORDER BY (topic, partition, offset)
+SETTINGS index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM hyperswitch.payment_intents_queue
+WHERE length(_error) > 0
+;
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql
new file mode 100644
index 000000000000..bf5f6e0e2405
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/refund_analytics.sql
@@ -0,0 +1,173 @@
+CREATE TABLE hyperswitch.refund_queue on cluster '{cluster}' (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime,
+ `modified_at` DateTime,
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-refund-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+CREATE TABLE hyperswitch.refund_dist on cluster '{cluster}' (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Distributed('{cluster}', 'hyperswitch', 'refund_clustered', cityHash64(refund_id));
+
+
+
+CREATE TABLE hyperswitch.refund_clustered on cluster '{cluster}' (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1,
+ INDEX refundTypeIndex refund_type TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex refund_status TYPE bloom_filter GRANULARITY 1
+) ENGINE = ReplicatedCollapsingMergeTree(
+ '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/refund_clustered',
+ '{replica}',
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, refund_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW hyperswitch.kafka_parse_refund on cluster '{cluster}' TO hyperswitch.refund_dist (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime64(3),
+ `modified_at` DateTime64(3),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ internal_reference_id,
+ refund_id,
+ payment_id,
+ merchant_id,
+ connector_transaction_id,
+ connector,
+ connector_refund_id,
+ external_reference_id,
+ refund_type,
+ total_amount,
+ currency,
+ refund_amount,
+ refund_status,
+ sent_to_gateway,
+ refund_error_message,
+ refund_arn,
+ attempt_id,
+ description,
+ refund_reason,
+ refund_error_code,
+ created_at,
+ modified_at,
+ now() as inserted_at,
+ sign_flag
+FROM hyperswitch.refund_queue
+WHERE length(_error) = 0;
+
+CREATE MATERIALIZED VIEW hyperswitch.refund_parse_errors on cluster '{cluster}'
+(
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+)
+ENGINE = MergeTree
+ORDER BY (topic, partition, offset)
+SETTINGS index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM hyperswitch.refund_queue
+WHERE length(_error) > 0
+;
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql
new file mode 100644
index 000000000000..37766392bc70
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/sdk_events.sql
@@ -0,0 +1,156 @@
+CREATE TABLE hyperswitch.sdk_events_queue on cluster '{cluster}' (
+ `payment_id` Nullable(String),
+ `merchant_id` String,
+ `remote_ip` Nullable(String),
+ `log_type` LowCardinality(Nullable(String)),
+ `event_name` LowCardinality(Nullable(String)),
+ `first_event` LowCardinality(Nullable(String)),
+ `latency` Nullable(UInt32),
+ `timestamp` String,
+ `browser_name` LowCardinality(Nullable(String)),
+ `browser_version` Nullable(String),
+ `platform` LowCardinality(Nullable(String)),
+ `source` LowCardinality(Nullable(String)),
+ `category` LowCardinality(Nullable(String)),
+ `version` LowCardinality(Nullable(String)),
+ `value` Nullable(String),
+ `component` LowCardinality(Nullable(String)),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_experience` LowCardinality(Nullable(String))
+) ENGINE = Kafka SETTINGS
+ kafka_broker_list = 'hyper-c1-kafka-brokers.kafka-cluster.svc.cluster.local:9092',
+ kafka_topic_list = 'hyper-sdk-logs',
+ kafka_group_name = 'hyper-c1',
+ kafka_format = 'JSONEachRow',
+ kafka_handle_error_mode = 'stream';
+
+CREATE TABLE hyperswitch.sdk_events_clustered on cluster '{cluster}' (
+ `payment_id` Nullable(String),
+ `merchant_id` String,
+ `remote_ip` Nullable(String),
+ `log_type` LowCardinality(Nullable(String)),
+ `event_name` LowCardinality(Nullable(String)),
+ `first_event` Bool DEFAULT 1,
+ `browser_name` LowCardinality(Nullable(String)),
+ `browser_version` Nullable(String),
+ `platform` LowCardinality(Nullable(String)),
+ `source` LowCardinality(Nullable(String)),
+ `category` LowCardinality(Nullable(String)),
+ `version` LowCardinality(Nullable(String)),
+ `value` Nullable(String),
+ `component` LowCardinality(Nullable(String)),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_experience` LowCardinality(Nullable(String)) DEFAULT '',
+ `created_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4),
+ `inserted_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4),
+ `latency` Nullable(UInt32) DEFAULT 0,
+ INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1,
+ INDEX eventIndex event_name TYPE bloom_filter GRANULARITY 1,
+ INDEX platformIndex platform TYPE bloom_filter GRANULARITY 1,
+ INDEX logTypeIndex log_type TYPE bloom_filter GRANULARITY 1,
+ INDEX categoryIndex category TYPE bloom_filter GRANULARITY 1,
+ INDEX sourceIndex source TYPE bloom_filter GRANULARITY 1,
+ INDEX componentIndex component TYPE bloom_filter GRANULARITY 1,
+ INDEX firstEventIndex first_event TYPE bloom_filter GRANULARITY 1
+) ENGINE = ReplicatedMergeTree(
+ '/clickhouse/{installation}/{cluster}/tables/{shard}/hyperswitch/sdk_events_clustered', '{replica}'
+)
+PARTITION BY
+ toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id)
+TTL
+ toDateTime(created_at) + toIntervalMonth(6)
+SETTINGS
+ index_granularity = 8192
+;
+
+CREATE TABLE hyperswitch.sdk_events_dist on cluster '{cluster}' (
+ `payment_id` Nullable(String),
+ `merchant_id` String,
+ `remote_ip` Nullable(String),
+ `log_type` LowCardinality(Nullable(String)),
+ `event_name` LowCardinality(Nullable(String)),
+ `first_event` Bool DEFAULT 1,
+ `browser_name` LowCardinality(Nullable(String)),
+ `browser_version` Nullable(String),
+ `platform` LowCardinality(Nullable(String)),
+ `source` LowCardinality(Nullable(String)),
+ `category` LowCardinality(Nullable(String)),
+ `version` LowCardinality(Nullable(String)),
+ `value` Nullable(String),
+ `component` LowCardinality(Nullable(String)),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_experience` LowCardinality(Nullable(String)) DEFAULT '',
+ `created_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4),
+ `inserted_at` DateTime64(3) DEFAULT now64() CODEC(T64, LZ4),
+ `latency` Nullable(UInt32) DEFAULT 0
+) ENGINE = Distributed(
+ '{cluster}', 'hyperswitch', 'sdk_events_clustered', rand()
+);
+
+CREATE MATERIALIZED VIEW hyperswitch.sdk_events_mv on cluster '{cluster}' TO hyperswitch.sdk_events_dist (
+ `payment_id` Nullable(String),
+ `merchant_id` String,
+ `remote_ip` Nullable(String),
+ `log_type` LowCardinality(Nullable(String)),
+ `event_name` LowCardinality(Nullable(String)),
+ `first_event` Bool,
+ `latency` Nullable(UInt32),
+ `browser_name` LowCardinality(Nullable(String)),
+ `browser_version` Nullable(String),
+ `platform` LowCardinality(Nullable(String)),
+ `source` LowCardinality(Nullable(String)),
+ `category` LowCardinality(Nullable(String)),
+ `version` LowCardinality(Nullable(String)),
+ `value` Nullable(String),
+ `component` LowCardinality(Nullable(String)),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_experience` LowCardinality(Nullable(String)),
+ `created_at` DateTime64(3)
+) AS
+SELECT
+ payment_id,
+ merchant_id,
+ remote_ip,
+ log_type,
+ event_name,
+ multiIf(first_event = 'true', 1, 0) AS first_event,
+ latency,
+ browser_name,
+ browser_version,
+ platform,
+ source,
+ category,
+ version,
+ value,
+ component,
+ payment_method,
+ payment_experience,
+ toDateTime64(timestamp, 3) AS created_at
+FROM
+ hyperswitch.sdk_events_queue
+WHERE length(_error) = 0
+;
+
+CREATE MATERIALIZED VIEW hyperswitch.sdk_parse_errors on cluster '{cluster}' (
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+) ENGINE = MergeTree
+ ORDER BY (topic, partition, offset)
+SETTINGS
+ index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM
+ hyperswitch.sdk_events_queue
+WHERE
+ length(_error) > 0
+;
diff --git a/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql b/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql
new file mode 100644
index 000000000000..202b94ac6040
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/cluster_setup/scripts/seed_scripts.sql
@@ -0,0 +1 @@
+create database hyperswitch on cluster '{cluster}';
\ No newline at end of file
diff --git a/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql b/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql
new file mode 100644
index 000000000000..b41a75fe67e5
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/scripts/api_events_v2.sql
@@ -0,0 +1,134 @@
+CREATE TABLE api_events_v2_queue (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `connector` Nullable(String),
+ `request_id` String,
+ `flow_type` LowCardinality(String),
+ `api_flow` LowCardinality(String),
+ `api_auth_type` LowCardinality(String),
+ `request` String,
+ `response` Nullable(String),
+ `authentication_data` Nullable(String),
+ `status_code` UInt32,
+ `created_at` DateTime CODEC(T64, LZ4),
+ `latency` UInt128,
+ `user_agent` String,
+ `ip_addr` String,
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-api-log-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+
+CREATE TABLE api_events_v2_dist (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `connector` Nullable(String),
+ `request_id` String,
+ `flow_type` LowCardinality(String),
+ `api_flow` LowCardinality(String),
+ `api_auth_type` LowCardinality(String),
+ `request` String,
+ `response` Nullable(String),
+ `authentication_data` Nullable(String),
+ `status_code` UInt32,
+ `created_at` DateTime CODEC(T64, LZ4),
+ `inserted_at` DateTime CODEC(T64, LZ4),
+ `latency` UInt128,
+ `user_agent` String,
+ `ip_addr` String,
+ INDEX flowIndex flow_type TYPE bloom_filter GRANULARITY 1,
+ INDEX apiIndex api_flow TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status_code TYPE bloom_filter GRANULARITY 1
+) ENGINE = MergeTree
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, flow_type, status_code, api_flow)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW api_events_v2_mv TO api_events_v2_dist (
+ `merchant_id` String,
+ `payment_id` Nullable(String),
+ `refund_id` Nullable(String),
+ `payment_method_id` Nullable(String),
+ `payment_method` Nullable(String),
+ `payment_method_type` Nullable(String),
+ `customer_id` Nullable(String),
+ `user_id` Nullable(String),
+ `connector` Nullable(String),
+ `request_id` String,
+ `flow_type` LowCardinality(String),
+ `api_flow` LowCardinality(String),
+ `api_auth_type` LowCardinality(String),
+ `request` String,
+ `response` Nullable(String),
+ `authentication_data` Nullable(String),
+ `status_code` UInt32,
+ `created_at` DateTime CODEC(T64, LZ4),
+ `inserted_at` DateTime CODEC(T64, LZ4),
+ `latency` UInt128,
+ `user_agent` String,
+ `ip_addr` String
+) AS
+SELECT
+ merchant_id,
+ payment_id,
+ refund_id,
+ payment_method_id,
+ payment_method,
+ payment_method_type,
+ customer_id,
+ user_id,
+ connector,
+ request_id,
+ flow_type,
+ api_flow,
+ api_auth_type,
+ request,
+ response,
+ authentication_data,
+ status_code,
+ created_at,
+ now() as inserted_at,
+ latency,
+ user_agent,
+ ip_addr
+FROM
+ api_events_v2_queue
+where length(_error) = 0;
+
+
+CREATE MATERIALIZED VIEW api_events_parse_errors
+(
+ `topic` String,
+ `partition` Int64,
+ `offset` Int64,
+ `raw` String,
+ `error` String
+)
+ENGINE = MergeTree
+ORDER BY (topic, partition, offset)
+SETTINGS index_granularity = 8192 AS
+SELECT
+ _topic AS topic,
+ _partition AS partition,
+ _offset AS offset,
+ _raw_message AS raw,
+ _error AS error
+FROM api_events_v2_queue
+WHERE length(_error) > 0
+;
diff --git a/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql b/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql
new file mode 100644
index 000000000000..276e311e57a9
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/scripts/payment_attempts.sql
@@ -0,0 +1,156 @@
+CREATE TABLE payment_attempts_queue (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` LowCardinality(Nullable(String)),
+ `capture_on` Nullable(DateTime) CODEC(T64, LZ4),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `modified_at` DateTime CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-payment-attempt-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+CREATE TABLE payment_attempt_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` Nullable(String),
+ `capture_on` Nullable(DateTime) CODEC(T64, LZ4),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1,
+ INDEX paymentMethodIndex payment_method TYPE bloom_filter GRANULARITY 1,
+ INDEX authenticationTypeIndex authentication_type TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status TYPE bloom_filter GRANULARITY 1
+) ENGINE = CollapsingMergeTree(
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, attempt_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+
+CREATE MATERIALIZED VIEW kafka_parse_pa TO payment_attempt_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `attempt_id` String,
+ `status` LowCardinality(String),
+ `amount` Nullable(UInt32),
+ `currency` LowCardinality(Nullable(String)),
+ `connector` LowCardinality(Nullable(String)),
+ `save_to_locker` Nullable(Bool),
+ `error_message` Nullable(String),
+ `offer_amount` Nullable(UInt32),
+ `surcharge_amount` Nullable(UInt32),
+ `tax_amount` Nullable(UInt32),
+ `payment_method_id` Nullable(String),
+ `payment_method` LowCardinality(Nullable(String)),
+ `payment_method_type` LowCardinality(Nullable(String)),
+ `connector_transaction_id` Nullable(String),
+ `capture_method` Nullable(String),
+ `confirm` Bool,
+ `authentication_type` LowCardinality(Nullable(String)),
+ `cancellation_reason` Nullable(String),
+ `amount_to_capture` Nullable(UInt32),
+ `mandate_id` Nullable(String),
+ `browser_info` Nullable(String),
+ `error_code` Nullable(String),
+ `connector_metadata` Nullable(String),
+ `payment_experience` Nullable(String),
+ `created_at` DateTime64(3),
+ `capture_on` Nullable(DateTime64(3)),
+ `last_synced` Nullable(DateTime64(3)),
+ `modified_at` DateTime64(3),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ payment_id,
+ merchant_id,
+ attempt_id,
+ status,
+ amount,
+ currency,
+ connector,
+ save_to_locker,
+ error_message,
+ offer_amount,
+ surcharge_amount,
+ tax_amount,
+ payment_method_id,
+ payment_method,
+ payment_method_type,
+ connector_transaction_id,
+ capture_method,
+ confirm,
+ authentication_type,
+ cancellation_reason,
+ amount_to_capture,
+ mandate_id,
+ browser_info,
+ error_code,
+ connector_metadata,
+ payment_experience,
+ created_at,
+ capture_on,
+ last_synced,
+ modified_at,
+ now() as inserted_at,
+ sign_flag
+FROM
+ payment_attempts_queue;
+
diff --git a/crates/analytics/docs/clickhouse/scripts/payment_intents.sql b/crates/analytics/docs/clickhouse/scripts/payment_intents.sql
new file mode 100644
index 000000000000..8cd487f364b4
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/scripts/payment_intents.sql
@@ -0,0 +1,116 @@
+CREATE TABLE payment_intents_queue (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` String,
+ `business_label` String,
+ `modified_at` DateTime CODEC(T64, LZ4),
+ `created_at` DateTime CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-payment-intent-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+
+CREATE TABLE payment_intents_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` LowCardinality(String),
+ `business_label` String,
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `last_synced` Nullable(DateTime) CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector_id TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex status TYPE bloom_filter GRANULARITY 1
+) ENGINE = CollapsingMergeTree(
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, payment_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW kafka_parse_payment_intent TO payment_intents_dist (
+ `payment_id` String,
+ `merchant_id` String,
+ `status` LowCardinality(String),
+ `amount` UInt32,
+ `currency` LowCardinality(Nullable(String)),
+ `amount_captured` Nullable(UInt32),
+ `customer_id` Nullable(String),
+ `description` Nullable(String),
+ `return_url` Nullable(String),
+ `connector_id` LowCardinality(Nullable(String)),
+ `statement_descriptor_name` Nullable(String),
+ `statement_descriptor_suffix` Nullable(String),
+ `setup_future_usage` LowCardinality(Nullable(String)),
+ `off_session` Nullable(Bool),
+ `client_secret` Nullable(String),
+ `active_attempt_id` String,
+ `business_country` LowCardinality(String),
+ `business_label` String,
+ `modified_at` DateTime64(3),
+ `created_at` DateTime64(3),
+ `last_synced` Nullable(DateTime64(3)),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ payment_id,
+ merchant_id,
+ status,
+ amount,
+ currency,
+ amount_captured,
+ customer_id,
+ description,
+ return_url,
+ connector_id,
+ statement_descriptor_name,
+ statement_descriptor_suffix,
+ setup_future_usage,
+ off_session,
+ client_secret,
+ active_attempt_id,
+ business_country,
+ business_label,
+ modified_at,
+ created_at,
+ last_synced,
+ now() as inserted_at,
+ sign_flag
+FROM payment_intents_queue;
diff --git a/crates/analytics/docs/clickhouse/scripts/refunds.sql b/crates/analytics/docs/clickhouse/scripts/refunds.sql
new file mode 100644
index 000000000000..a131270c1326
--- /dev/null
+++ b/crates/analytics/docs/clickhouse/scripts/refunds.sql
@@ -0,0 +1,121 @@
+CREATE TABLE refund_queue (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime CODEC(T64, LZ4),
+ `modified_at` DateTime CODEC(T64, LZ4),
+ `sign_flag` Int8
+) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka0:29092',
+kafka_topic_list = 'hyperswitch-refund-events',
+kafka_group_name = 'hyper-c1',
+kafka_format = 'JSONEachRow',
+kafka_handle_error_mode = 'stream';
+
+
+CREATE TABLE refund_dist (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `modified_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `inserted_at` DateTime DEFAULT now() CODEC(T64, LZ4),
+ `sign_flag` Int8,
+ INDEX connectorIndex connector TYPE bloom_filter GRANULARITY 1,
+ INDEX refundTypeIndex refund_type TYPE bloom_filter GRANULARITY 1,
+ INDEX currencyIndex currency TYPE bloom_filter GRANULARITY 1,
+ INDEX statusIndex refund_status TYPE bloom_filter GRANULARITY 1
+) ENGINE = CollapsingMergeTree(
+ sign_flag
+)
+PARTITION BY toStartOfDay(created_at)
+ORDER BY
+ (created_at, merchant_id, refund_id)
+TTL created_at + toIntervalMonth(6)
+;
+
+CREATE MATERIALIZED VIEW kafka_parse_refund TO refund_dist (
+ `internal_reference_id` String,
+ `refund_id` String,
+ `payment_id` String,
+ `merchant_id` String,
+ `connector_transaction_id` String,
+ `connector` LowCardinality(Nullable(String)),
+ `connector_refund_id` Nullable(String),
+ `external_reference_id` Nullable(String),
+ `refund_type` LowCardinality(String),
+ `total_amount` Nullable(UInt32),
+ `currency` LowCardinality(String),
+ `refund_amount` Nullable(UInt32),
+ `refund_status` LowCardinality(String),
+ `sent_to_gateway` Bool,
+ `refund_error_message` Nullable(String),
+ `refund_arn` Nullable(String),
+ `attempt_id` String,
+ `description` Nullable(String),
+ `refund_reason` Nullable(String),
+ `refund_error_code` Nullable(String),
+ `created_at` DateTime64(3),
+ `modified_at` DateTime64(3),
+ `inserted_at` DateTime64(3),
+ `sign_flag` Int8
+) AS
+SELECT
+ internal_reference_id,
+ refund_id,
+ payment_id,
+ merchant_id,
+ connector_transaction_id,
+ connector,
+ connector_refund_id,
+ external_reference_id,
+ refund_type,
+ total_amount,
+ currency,
+ refund_amount,
+ refund_status,
+ sent_to_gateway,
+ refund_error_message,
+ refund_arn,
+ attempt_id,
+ description,
+ refund_reason,
+ refund_error_code,
+ created_at,
+ modified_at,
+ now() as inserted_at,
+ sign_flag
+FROM refund_queue;
diff --git a/crates/analytics/src/api_event.rs b/crates/analytics/src/api_event.rs
new file mode 100644
index 000000000000..113344d47254
--- /dev/null
+++ b/crates/analytics/src/api_event.rs
@@ -0,0 +1,9 @@
+mod core;
+pub mod events;
+pub mod filters;
+pub mod metrics;
+pub mod types;
+
+pub trait APIEventAnalytics: events::ApiLogsFilterAnalytics {}
+
+pub use self::core::{api_events_core, get_api_event_metrics, get_filters};
diff --git a/crates/analytics/src/api_event/core.rs b/crates/analytics/src/api_event/core.rs
new file mode 100644
index 000000000000..b368d6374f75
--- /dev/null
+++ b/crates/analytics/src/api_event/core.rs
@@ -0,0 +1,176 @@
+use std::collections::HashMap;
+
+use api_models::analytics::{
+ api_event::{
+ ApiEventMetricsBucketIdentifier, ApiEventMetricsBucketValue, ApiLogsRequest,
+ ApiMetricsBucketResponse,
+ },
+ AnalyticsMetadata, ApiEventFiltersResponse, GetApiEventFiltersRequest,
+ GetApiEventMetricRequest, MetricsResponse,
+};
+use error_stack::{IntoReport, ResultExt};
+use router_env::{
+ instrument, logger,
+ tracing::{self, Instrument},
+};
+
+use super::{
+ events::{get_api_event, ApiLogsResult},
+ metrics::ApiEventMetricRow,
+};
+use crate::{
+ errors::{AnalyticsError, AnalyticsResult},
+ metrics,
+ types::FiltersError,
+ AnalyticsProvider,
+};
+
+#[instrument(skip_all)]
+pub async fn api_events_core(
+ pool: &AnalyticsProvider,
+ req: ApiLogsRequest,
+ merchant_id: String,
+) -> AnalyticsResult> {
+ let data = match pool {
+ AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented)
+ .into_report()
+ .attach_printable("SQL Analytics is not implemented for API Events"),
+ AnalyticsProvider::Clickhouse(pool) => get_api_event(&merchant_id, req, pool).await,
+ AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
+ | AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
+ get_api_event(&merchant_id, req, ckh_pool).await
+ }
+ }
+ .change_context(AnalyticsError::UnknownError)?;
+ Ok(data)
+}
+
+pub async fn get_filters(
+ pool: &AnalyticsProvider,
+ req: GetApiEventFiltersRequest,
+ merchant_id: String,
+) -> AnalyticsResult {
+ use api_models::analytics::{api_event::ApiEventDimensions, ApiEventFilterValue};
+
+ use super::filters::get_api_event_filter_for_dimension;
+ use crate::api_event::filters::ApiEventFilter;
+
+ let mut res = ApiEventFiltersResponse::default();
+ for dim in req.group_by_names {
+ let values = match pool {
+ AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented)
+ .into_report()
+ .attach_printable("SQL Analytics is not implemented for API Events"),
+ AnalyticsProvider::Clickhouse(ckh_pool)
+ | AnalyticsProvider::CombinedSqlx(_, ckh_pool)
+ | AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
+ get_api_event_filter_for_dimension(dim, &merchant_id, &req.time_range, ckh_pool)
+ .await
+ }
+ }
+ .change_context(AnalyticsError::UnknownError)?
+ .into_iter()
+ .filter_map(|fil: ApiEventFilter| match dim {
+ ApiEventDimensions::StatusCode => fil.status_code.map(|i| i.to_string()),
+ ApiEventDimensions::FlowType => fil.flow_type,
+ ApiEventDimensions::ApiFlow => fil.api_flow,
+ })
+ .collect::>();
+ res.query_data.push(ApiEventFilterValue {
+ dimension: dim,
+ values,
+ })
+ }
+
+ Ok(res)
+}
+
+#[instrument(skip_all)]
+pub async fn get_api_event_metrics(
+ pool: &AnalyticsProvider,
+ merchant_id: &str,
+ req: GetApiEventMetricRequest,
+) -> AnalyticsResult> {
+ let mut metrics_accumulator: HashMap =
+ HashMap::new();
+
+ let mut set = tokio::task::JoinSet::new();
+ for metric_type in req.metrics.iter().cloned() {
+ let req = req.clone();
+ let pool = pool.clone();
+ let task_span = tracing::debug_span!(
+ "analytics_api_metrics_query",
+ api_event_metric = metric_type.as_ref()
+ );
+
+ // TODO: lifetime issues with joinset,
+ // can be optimized away if joinset lifetime requirements are relaxed
+ let merchant_id_scoped = merchant_id.to_owned();
+ set.spawn(
+ async move {
+ let data = pool
+ .get_api_event_metrics(
+ &metric_type,
+ &req.group_by_names.clone(),
+ &merchant_id_scoped,
+ &req.filters,
+ &req.time_series.map(|t| t.granularity),
+ &req.time_range,
+ )
+ .await
+ .change_context(AnalyticsError::UnknownError);
+ (metric_type, data)
+ }
+ .instrument(task_span),
+ );
+ }
+
+ while let Some((metric, data)) = set
+ .join_next()
+ .await
+ .transpose()
+ .into_report()
+ .change_context(AnalyticsError::UnknownError)?
+ {
+ let data = data?;
+ let attributes = &[
+ metrics::request::add_attributes("metric_type", metric.to_string()),
+ metrics::request::add_attributes("source", pool.to_string()),
+ ];
+
+ let value = u64::try_from(data.len());
+ if let Ok(val) = value {
+ metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes);
+ logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
+ }
+ for (id, value) in data {
+ metrics_accumulator
+ .entry(id)
+ .and_modify(|data| {
+ data.api_count = data.api_count.or(value.api_count);
+ data.status_code_count = data.status_code_count.or(value.status_code_count);
+ data.latency = data.latency.or(value.latency);
+ })
+ .or_insert(value);
+ }
+ }
+
+ let query_data: Vec = metrics_accumulator
+ .into_iter()
+ .map(|(id, val)| ApiMetricsBucketResponse {
+ values: ApiEventMetricsBucketValue {
+ latency: val.latency,
+ api_count: val.api_count,
+ status_code_count: val.status_code_count,
+ },
+ dimensions: id,
+ })
+ .collect();
+
+ Ok(MetricsResponse {
+ query_data,
+ meta_data: [AnalyticsMetadata {
+ current_time_range: req.time_range,
+ }],
+ })
+}
diff --git a/crates/analytics/src/api_event/events.rs b/crates/analytics/src/api_event/events.rs
new file mode 100644
index 000000000000..73b3fb9cbad2
--- /dev/null
+++ b/crates/analytics/src/api_event/events.rs
@@ -0,0 +1,105 @@
+use api_models::analytics::{
+ api_event::{ApiLogsRequest, QueryType},
+ Granularity,
+};
+use common_utils::errors::ReportSwitchExt;
+use error_stack::ResultExt;
+use router_env::Flow;
+use time::PrimitiveDateTime;
+
+use crate::{
+ query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window},
+ types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
+};
+pub trait ApiLogsFilterAnalytics: LoadRow {}
+
+pub async fn get_api_event(
+ merchant_id: &String,
+ query_param: ApiLogsRequest,
+ pool: &T,
+) -> FiltersResult>
+where
+ T: AnalyticsDataSource + ApiLogsFilterAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents);
+ query_builder.add_select_column("*").switch()?;
+
+ query_builder
+ .add_filter_clause("merchant_id", merchant_id)
+ .switch()?;
+ match query_param.query_param {
+ QueryType::Payment { payment_id } => query_builder
+ .add_filter_clause("payment_id", payment_id)
+ .switch()?,
+ QueryType::Refund {
+ payment_id,
+ refund_id,
+ } => {
+ query_builder
+ .add_filter_clause("payment_id", payment_id)
+ .switch()?;
+ query_builder
+ .add_filter_clause("refund_id", refund_id)
+ .switch()?;
+ }
+ }
+ if let Some(list_api_name) = query_param.api_name_filter {
+ query_builder
+ .add_filter_in_range_clause("api_flow", &list_api_name)
+ .switch()?;
+ } else {
+ query_builder
+ .add_filter_in_range_clause(
+ "api_flow",
+ &[
+ Flow::PaymentsCancel,
+ Flow::PaymentsCapture,
+ Flow::PaymentsConfirm,
+ Flow::PaymentsCreate,
+ Flow::PaymentsStart,
+ Flow::PaymentsUpdate,
+ Flow::RefundsCreate,
+ Flow::IncomingWebhookReceive,
+ ],
+ )
+ .switch()?;
+ }
+ //TODO!: update the execute_query function to return reports instead of plain errors...
+ query_builder
+ .execute_query::(pool)
+ .await
+ .change_context(FiltersError::QueryBuildingError)?
+ .change_context(FiltersError::QueryExecutionFailure)
+}
+#[derive(Debug, serde::Serialize, serde::Deserialize)]
+pub struct ApiLogsResult {
+ pub merchant_id: String,
+ pub payment_id: Option,
+ pub refund_id: Option,
+ pub payment_method_id: Option,
+ pub payment_method: Option,
+ pub payment_method_type: Option,
+ pub customer_id: Option,
+ pub user_id: Option,
+ pub connector: Option,
+ pub request_id: Option,
+ pub flow_type: String,
+ pub api_flow: String,
+ pub api_auth_type: Option,
+ pub request: String,
+ pub response: Option,
+ pub error: Option,
+ pub authentication_data: Option,
+ pub status_code: u16,
+ pub latency: Option,
+ pub user_agent: Option,
+ pub hs_latency: Option,
+ pub ip_addr: Option,
+ #[serde(with = "common_utils::custom_serde::iso8601")]
+ pub created_at: PrimitiveDateTime,
+}
diff --git a/crates/analytics/src/api_event/filters.rs b/crates/analytics/src/api_event/filters.rs
new file mode 100644
index 000000000000..87414ebad4ba
--- /dev/null
+++ b/crates/analytics/src/api_event/filters.rs
@@ -0,0 +1,53 @@
+use api_models::analytics::{api_event::ApiEventDimensions, Granularity, TimeRange};
+use common_utils::errors::ReportSwitchExt;
+use error_stack::ResultExt;
+use time::PrimitiveDateTime;
+
+use crate::{
+ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
+ types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
+};
+
+pub trait ApiEventFilterAnalytics: LoadRow {}
+
+pub async fn get_api_event_filter_for_dimension(
+ dimension: ApiEventDimensions,
+ merchant_id: &String,
+ time_range: &TimeRange,
+ pool: &T,
+) -> FiltersResult>
+where
+ T: AnalyticsDataSource + ApiEventFilterAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents);
+
+ query_builder.add_select_column(dimension).switch()?;
+ time_range
+ .set_filter_clause(&mut query_builder)
+ .attach_printable("Error filtering time range")
+ .switch()?;
+
+ query_builder
+ .add_filter_clause("merchant_id", merchant_id)
+ .switch()?;
+
+ query_builder.set_distinct();
+
+ query_builder
+ .execute_query::(pool)
+ .await
+ .change_context(FiltersError::QueryBuildingError)?
+ .change_context(FiltersError::QueryExecutionFailure)
+}
+
+#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
+pub struct ApiEventFilter {
+ pub status_code: Option,
+ pub flow_type: Option,
+ pub api_flow: Option,
+}
diff --git a/crates/analytics/src/api_event/metrics.rs b/crates/analytics/src/api_event/metrics.rs
new file mode 100644
index 000000000000..16f2d7a2f5ab
--- /dev/null
+++ b/crates/analytics/src/api_event/metrics.rs
@@ -0,0 +1,110 @@
+use api_models::analytics::{
+ api_event::{
+ ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier,
+ },
+ Granularity, TimeRange,
+};
+use time::PrimitiveDateTime;
+
+use crate::{
+ query::{Aggregate, GroupByClause, ToSql, Window},
+ types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult},
+};
+
+mod api_count;
+pub mod latency;
+mod status_code_count;
+use api_count::ApiCount;
+use latency::MaxLatency;
+use status_code_count::StatusCodeCount;
+
+use self::latency::LatencyAvg;
+
+#[derive(Debug, PartialEq, Eq, serde::Deserialize)]
+pub struct ApiEventMetricRow {
+ pub latency: Option,
+ pub api_count: Option,
+ pub status_code_count: Option,
+ #[serde(with = "common_utils::custom_serde::iso8601::option")]
+ pub start_bucket: Option,
+ #[serde(with = "common_utils::custom_serde::iso8601::option")]
+ pub end_bucket: Option,
+}
+
+pub trait ApiEventMetricAnalytics: LoadRow + LoadRow {}
+
+#[async_trait::async_trait]
+pub trait ApiEventMetric
+where
+ T: AnalyticsDataSource + ApiEventMetricAnalytics,
+{
+ async fn load_metrics(
+ &self,
+ dimensions: &[ApiEventDimensions],
+ merchant_id: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ pool: &T,
+ ) -> MetricsResult>;
+}
+
+#[async_trait::async_trait]
+impl ApiEventMetric for ApiEventMetrics
+where
+ T: AnalyticsDataSource + ApiEventMetricAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ async fn load_metrics(
+ &self,
+ dimensions: &[ApiEventDimensions],
+ merchant_id: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ pool: &T,
+ ) -> MetricsResult> {
+ match self {
+ Self::Latency => {
+ MaxLatency
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::ApiCount => {
+ ApiCount
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::StatusCodeCount => {
+ StatusCodeCount
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ }
+ }
+}
diff --git a/crates/analytics/src/api_event/metrics/api_count.rs b/crates/analytics/src/api_event/metrics/api_count.rs
new file mode 100644
index 000000000000..7f5f291aa53e
--- /dev/null
+++ b/crates/analytics/src/api_event/metrics/api_count.rs
@@ -0,0 +1,106 @@
+use api_models::analytics::{
+ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
+ Granularity, TimeRange,
+};
+use common_utils::errors::ReportSwitchExt;
+use error_stack::ResultExt;
+use time::PrimitiveDateTime;
+
+use super::ApiEventMetricRow;
+use crate::{
+ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
+ types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
+};
+
+#[derive(Default)]
+pub(super) struct ApiCount;
+
+#[async_trait::async_trait]
+impl super::ApiEventMetric for ApiCount
+where
+ T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ async fn load_metrics(
+ &self,
+ _dimensions: &[ApiEventDimensions],
+ merchant_id: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ pool: &T,
+ ) -> MetricsResult> {
+ let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents);
+
+ query_builder
+ .add_select_column(Aggregate::Count {
+ field: None,
+ alias: Some("api_count"),
+ })
+ .switch()?;
+ if !filters.flow_type.is_empty() {
+ query_builder
+ .add_filter_in_range_clause(ApiEventDimensions::FlowType, &filters.flow_type)
+ .attach_printable("Error adding flow_type filter")
+ .switch()?;
+ }
+ query_builder
+ .add_select_column(Aggregate::Min {
+ field: "created_at",
+ alias: Some("start_bucket"),
+ })
+ .switch()?;
+ query_builder
+ .add_select_column(Aggregate::Max {
+ field: "created_at",
+ alias: Some("end_bucket"),
+ })
+ .switch()?;
+ if let Some(granularity) = granularity.as_ref() {
+ granularity
+ .set_group_by_clause(&mut query_builder)
+ .attach_printable("Error adding granularity")
+ .switch()?;
+ }
+
+ query_builder
+ .add_filter_clause("merchant_id", merchant_id)
+ .switch()?;
+
+ time_range
+ .set_filter_clause(&mut query_builder)
+ .attach_printable("Error filtering time range")
+ .switch()?;
+
+ query_builder
+ .execute_query::(pool)
+ .await
+ .change_context(MetricsError::QueryBuildingError)?
+ .change_context(MetricsError::QueryExecutionFailure)?
+ .into_iter()
+ .map(|i| {
+ Ok((
+ ApiEventMetricsBucketIdentifier::new(TimeRange {
+ start_time: match (granularity, i.start_bucket) {
+ (Some(g), Some(st)) => g.clip_to_start(st)?,
+ _ => time_range.start_time,
+ },
+ end_time: granularity.as_ref().map_or_else(
+ || Ok(time_range.end_time),
+ |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
+ )?,
+ }),
+ i,
+ ))
+ })
+ .collect::,
+ crate::query::PostProcessingError,
+ >>()
+ .change_context(MetricsError::PostProcessingFailure)
+ }
+}
diff --git a/crates/analytics/src/api_event/metrics/latency.rs b/crates/analytics/src/api_event/metrics/latency.rs
new file mode 100644
index 000000000000..379b39fbeb9e
--- /dev/null
+++ b/crates/analytics/src/api_event/metrics/latency.rs
@@ -0,0 +1,138 @@
+use api_models::analytics::{
+ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
+ Granularity, TimeRange,
+};
+use common_utils::errors::ReportSwitchExt;
+use error_stack::ResultExt;
+use time::PrimitiveDateTime;
+
+use super::ApiEventMetricRow;
+use crate::{
+ query::{
+ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
+ Window,
+ },
+ types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
+};
+
+#[derive(Default)]
+pub(super) struct MaxLatency;
+
+#[async_trait::async_trait]
+impl super::ApiEventMetric for MaxLatency
+where
+ T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ async fn load_metrics(
+ &self,
+ _dimensions: &[ApiEventDimensions],
+ merchant_id: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ pool: &T,
+ ) -> MetricsResult> {
+ let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents);
+
+ query_builder
+ .add_select_column(Aggregate::Sum {
+ field: "latency",
+ alias: Some("latency_sum"),
+ })
+ .switch()?;
+
+ query_builder
+ .add_select_column(Aggregate::Count {
+ field: Some("latency"),
+ alias: Some("latency_count"),
+ })
+ .switch()?;
+
+ query_builder
+ .add_select_column(Aggregate::Min {
+ field: "created_at",
+ alias: Some("start_bucket"),
+ })
+ .switch()?;
+ query_builder
+ .add_select_column(Aggregate::Max {
+ field: "created_at",
+ alias: Some("end_bucket"),
+ })
+ .switch()?;
+ if let Some(granularity) = granularity.as_ref() {
+ granularity
+ .set_group_by_clause(&mut query_builder)
+ .attach_printable("Error adding granularity")
+ .switch()?;
+ }
+
+ filters.set_filter_clause(&mut query_builder).switch()?;
+
+ query_builder
+ .add_filter_clause("merchant_id", merchant_id)
+ .switch()?;
+
+ time_range
+ .set_filter_clause(&mut query_builder)
+ .attach_printable("Error filtering time range")
+ .switch()?;
+
+ query_builder
+ .add_custom_filter_clause("request", "10.63.134.6", FilterTypes::NotLike)
+ .attach_printable("Error filtering out locker IP")
+ .switch()?;
+
+ query_builder
+ .execute_query::(pool)
+ .await
+ .change_context(MetricsError::QueryBuildingError)?
+ .change_context(MetricsError::QueryExecutionFailure)?
+ .into_iter()
+ .map(|i| {
+ Ok((
+ ApiEventMetricsBucketIdentifier::new(TimeRange {
+ start_time: match (granularity, i.start_bucket) {
+ (Some(g), Some(st)) => g.clip_to_start(st)?,
+ _ => time_range.start_time,
+ },
+ end_time: granularity.as_ref().map_or_else(
+ || Ok(time_range.end_time),
+ |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
+ )?,
+ }),
+ ApiEventMetricRow {
+ latency: if i.latency_count != 0 {
+ Some(i.latency_sum.unwrap_or(0) / i.latency_count)
+ } else {
+ None
+ },
+ api_count: None,
+ status_code_count: None,
+ start_bucket: i.start_bucket,
+ end_bucket: i.end_bucket,
+ },
+ ))
+ })
+ .collect::,
+ crate::query::PostProcessingError,
+ >>()
+ .change_context(MetricsError::PostProcessingFailure)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, serde::Deserialize)]
+pub struct LatencyAvg {
+ latency_sum: Option,
+ latency_count: u64,
+ #[serde(with = "common_utils::custom_serde::iso8601::option")]
+ pub start_bucket: Option,
+ #[serde(with = "common_utils::custom_serde::iso8601::option")]
+ pub end_bucket: Option,
+}
diff --git a/crates/analytics/src/api_event/metrics/status_code_count.rs b/crates/analytics/src/api_event/metrics/status_code_count.rs
new file mode 100644
index 000000000000..5c652fd8e0c9
--- /dev/null
+++ b/crates/analytics/src/api_event/metrics/status_code_count.rs
@@ -0,0 +1,103 @@
+use api_models::analytics::{
+ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
+ Granularity, TimeRange,
+};
+use common_utils::errors::ReportSwitchExt;
+use error_stack::ResultExt;
+use time::PrimitiveDateTime;
+
+use super::ApiEventMetricRow;
+use crate::{
+ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
+ types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
+};
+
+#[derive(Default)]
+pub(super) struct StatusCodeCount;
+
+#[async_trait::async_trait]
+impl super::ApiEventMetric for StatusCodeCount
+where
+ T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
+ PrimitiveDateTime: ToSql,
+ AnalyticsCollection: ToSql,
+ Granularity: GroupByClause,
+ Aggregate<&'static str>: ToSql,
+ Window<&'static str>: ToSql,
+{
+ async fn load_metrics(
+ &self,
+ _dimensions: &[ApiEventDimensions],
+ merchant_id: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ pool: &T,
+ ) -> MetricsResult> {
+ let mut query_builder: QueryBuilder = QueryBuilder::new(AnalyticsCollection::ApiEvents);
+
+ query_builder
+ .add_select_column(Aggregate::Count {
+ field: Some("status_code"),
+ alias: Some("status_code_count"),
+ })
+ .switch()?;
+
+ filters.set_filter_clause(&mut query_builder).switch()?;
+
+ query_builder
+ .add_filter_clause("merchant_id", merchant_id)
+ .switch()?;
+
+ time_range
+ .set_filter_clause(&mut query_builder)
+ .attach_printable("Error filtering time range")
+ .switch()?;
+
+ query_builder
+ .add_select_column(Aggregate::Min {
+ field: "created_at",
+ alias: Some("start_bucket"),
+ })
+ .switch()?;
+ query_builder
+ .add_select_column(Aggregate::Max {
+ field: "created_at",
+ alias: Some("end_bucket"),
+ })
+ .switch()?;
+ if let Some(granularity) = granularity.as_ref() {
+ granularity
+ .set_group_by_clause(&mut query_builder)
+ .attach_printable("Error adding granularity")
+ .switch()?;
+ }
+
+ query_builder
+ .execute_query::(pool)
+ .await
+ .change_context(MetricsError::QueryBuildingError)?
+ .change_context(MetricsError::QueryExecutionFailure)?
+ .into_iter()
+ .map(|i| {
+ Ok((
+ ApiEventMetricsBucketIdentifier::new(TimeRange {
+ start_time: match (granularity, i.start_bucket) {
+ (Some(g), Some(st)) => g.clip_to_start(st)?,
+ _ => time_range.start_time,
+ },
+ end_time: granularity.as_ref().map_or_else(
+ || Ok(time_range.end_time),
+ |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
+ )?,
+ }),
+ i,
+ ))
+ })
+ .collect::,
+ crate::query::PostProcessingError,
+ >>()
+ .change_context(MetricsError::PostProcessingFailure)
+ }
+}
diff --git a/crates/analytics/src/api_event/types.rs b/crates/analytics/src/api_event/types.rs
new file mode 100644
index 000000000000..72205fc72abf
--- /dev/null
+++ b/crates/analytics/src/api_event/types.rs
@@ -0,0 +1,33 @@
+use api_models::analytics::api_event::{ApiEventDimensions, ApiEventFilters};
+use error_stack::ResultExt;
+
+use crate::{
+ query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
+ types::{AnalyticsCollection, AnalyticsDataSource},
+};
+
+impl QueryFilter for ApiEventFilters
+where
+ T: AnalyticsDataSource,
+ AnalyticsCollection: ToSql,
+{
+ fn set_filter_clause(&self, builder: &mut QueryBuilder) -> QueryResult<()> {
+ if !self.status_code.is_empty() {
+ builder
+ .add_filter_in_range_clause(ApiEventDimensions::StatusCode, &self.status_code)
+ .attach_printable("Error adding status_code filter")?;
+ }
+ if !self.flow_type.is_empty() {
+ builder
+ .add_filter_in_range_clause(ApiEventDimensions::FlowType, &self.flow_type)
+ .attach_printable("Error adding flow_type filter")?;
+ }
+ if !self.api_flow.is_empty() {
+ builder
+ .add_filter_in_range_clause(ApiEventDimensions::ApiFlow, &self.api_flow)
+ .attach_printable("Error adding api_name filter")?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/crates/analytics/src/clickhouse.rs b/crates/analytics/src/clickhouse.rs
new file mode 100644
index 000000000000..964486c93649
--- /dev/null
+++ b/crates/analytics/src/clickhouse.rs
@@ -0,0 +1,458 @@
+use std::sync::Arc;
+
+use actix_web::http::StatusCode;
+use common_utils::errors::ParsingError;
+use error_stack::{IntoReport, Report, ResultExt};
+use router_env::logger;
+use time::PrimitiveDateTime;
+
+use super::{
+ payments::{
+ distribution::PaymentDistributionRow, filters::FilterRow, metrics::PaymentMetricRow,
+ },
+ query::{Aggregate, ToSql, Window},
+ refunds::{filters::RefundFilterRow, metrics::RefundMetricRow},
+ sdk_events::{filters::SdkEventFilter, metrics::SdkEventMetricRow},
+ types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, QueryExecutionError},
+};
+use crate::{
+ api_event::{
+ events::ApiLogsResult,
+ filters::ApiEventFilter,
+ metrics::{latency::LatencyAvg, ApiEventMetricRow},
+ },
+ sdk_events::events::SdkEventsResult,
+ types::TableEngine,
+};
+
+pub type ClickhouseResult = error_stack::Result;
+
+#[derive(Clone, Debug)]
+pub struct ClickhouseClient {
+ pub config: Arc,
+}
+
+#[derive(Clone, Debug, serde::Deserialize)]
+pub struct ClickhouseConfig {
+ username: String,
+ password: Option,
+ host: String,
+ database_name: String,
+}
+
+impl Default for ClickhouseConfig {
+ fn default() -> Self {
+ Self {
+ username: "default".to_string(),
+ password: None,
+ host: "http://localhost:8123".to_string(),
+ database_name: "default".to_string(),
+ }
+ }
+}
+
+impl ClickhouseClient {
+ async fn execute_query(&self, query: &str) -> ClickhouseResult> {
+ logger::debug!("Executing query: {query}");
+ let client = reqwest::Client::new();
+ let params = CkhQuery {
+ date_time_output_format: String::from("iso"),
+ output_format_json_quote_64bit_integers: 0,
+ database: self.config.database_name.clone(),
+ };
+ let response = client
+ .post(&self.config.host)
+ .query(¶ms)
+ .basic_auth(self.config.username.clone(), self.config.password.clone())
+ .body(format!("{query}\nFORMAT JSON"))
+ .send()
+ .await
+ .into_report()
+ .change_context(ClickhouseError::ConnectionError)?;
+
+ logger::debug!(clickhouse_response=?response, query=?query, "Clickhouse response");
+ if response.status() != StatusCode::OK {
+ response.text().await.map_or_else(
+ |er| {
+ Err(ClickhouseError::ResponseError)
+ .into_report()
+ .attach_printable_lazy(|| format!("Error: {er:?}"))
+ },
+ |t| Err(ClickhouseError::ResponseNotOK(t)).into_report(),
+ )
+ } else {
+ Ok(response
+ .json::>()
+ .await
+ .into_report()
+ .change_context(ClickhouseError::ResponseError)?
+ .data)
+ }
+ }
+}
+
+#[async_trait::async_trait]
+impl AnalyticsDataSource for ClickhouseClient {
+ type Row = serde_json::Value;
+
+ async fn load_results(
+ &self,
+ query: &str,
+ ) -> common_utils::errors::CustomResult, QueryExecutionError>
+ where
+ Self: LoadRow,
+ {
+ self.execute_query(query)
+ .await
+ .change_context(QueryExecutionError::DatabaseError)?
+ .into_iter()
+ .map(Self::load_row)
+ .collect::, _>>()
+ .change_context(QueryExecutionError::RowExtractionFailure)
+ }
+
+ fn get_table_engine(table: AnalyticsCollection) -> TableEngine {
+ match table {
+ AnalyticsCollection::Payment
+ | AnalyticsCollection::Refund
+ | AnalyticsCollection::PaymentIntent => {
+ TableEngine::CollapsingMergeTree { sign: "sign_flag" }
+ }
+ AnalyticsCollection::SdkEvents => TableEngine::BasicTree,
+ AnalyticsCollection::ApiEvents => TableEngine::BasicTree,
+ }
+ }
+}
+
+impl LoadRow for ClickhouseClient
+where
+ Self::Row: TryInto>,
+{
+ fn load_row(row: Self::Row) -> common_utils::errors::CustomResult {
+ row.try_into()
+ .change_context(QueryExecutionError::RowExtractionFailure)
+ }
+}
+
+impl super::payments::filters::PaymentFilterAnalytics for ClickhouseClient {}
+impl super::payments::metrics::PaymentMetricAnalytics for ClickhouseClient {}
+impl super::payments::distribution::PaymentDistributionAnalytics for ClickhouseClient {}
+impl super::refunds::metrics::RefundMetricAnalytics for ClickhouseClient {}
+impl super::refunds::filters::RefundFilterAnalytics for ClickhouseClient {}
+impl super::sdk_events::filters::SdkEventFilterAnalytics for ClickhouseClient {}
+impl super::sdk_events::metrics::SdkEventMetricAnalytics for ClickhouseClient {}
+impl super::sdk_events::events::SdkEventsFilterAnalytics for ClickhouseClient {}
+impl super::api_event::events::ApiLogsFilterAnalytics for ClickhouseClient {}
+impl super::api_event::filters::ApiEventFilterAnalytics for ClickhouseClient {}
+impl super::api_event::metrics::ApiEventMetricAnalytics for ClickhouseClient {}
+
+#[derive(Debug, serde::Serialize)]
+struct CkhQuery {
+ date_time_output_format: String,
+ output_format_json_quote_64bit_integers: u8,
+ database: String,
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct CkhOutput {
+ data: Vec,
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse ApiLogsResult in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse SdkEventsResult in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse PaymentMetricRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse PaymentDistributionRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse FilterRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse RefundMetricRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse RefundFilterRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse ApiEventMetricRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse LatencyAvg in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse SdkEventMetricRow in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse SdkEventFilter in clickhouse results",
+ ))
+ }
+}
+
+impl TryInto for serde_json::Value {
+ type Error = Report;
+
+ fn try_into(self) -> Result {
+ serde_json::from_value(self)
+ .into_report()
+ .change_context(ParsingError::StructParseFailure(
+ "Failed to parse ApiEventFilter in clickhouse results",
+ ))
+ }
+}
+
+impl ToSql for PrimitiveDateTime {
+ fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result {
+ let format =
+ time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second]")
+ .into_report()
+ .change_context(ParsingError::DateTimeParsingError)
+ .attach_printable("Failed to parse format description")?;
+ self.format(&format)
+ .into_report()
+ .change_context(ParsingError::EncodeError(
+ "failed to encode to clickhouse date-time format",
+ ))
+ .attach_printable("Failed to format date time")
+ }
+}
+
+impl ToSql for AnalyticsCollection {
+ fn to_sql(&self, _table_engine: &TableEngine) -> error_stack::Result {
+ match self {
+ Self::Payment => Ok("payment_attempt_dist".to_string()),
+ Self::Refund => Ok("refund_dist".to_string()),
+ Self::SdkEvents => Ok("sdk_events_dist".to_string()),
+ Self::ApiEvents => Ok("api_audit_log".to_string()),
+ Self::PaymentIntent => Ok("payment_intents_dist".to_string()),
+ }
+ }
+}
+
+impl ToSql for Aggregate
+where
+ T: ToSql,
+{
+ fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result {
+ Ok(match self {
+ Self::Count { field: _, alias } => {
+ let query = match table_engine {
+ TableEngine::CollapsingMergeTree { sign } => format!("sum({sign})"),
+ TableEngine::BasicTree => "count(*)".to_string(),
+ };
+ format!(
+ "{query}{}",
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ Self::Sum { field, alias } => {
+ let query = match table_engine {
+ TableEngine::CollapsingMergeTree { sign } => format!(
+ "sum({sign} * {})",
+ field
+ .to_sql(table_engine)
+ .attach_printable("Failed to sum aggregate")?
+ ),
+ TableEngine::BasicTree => format!(
+ "sum({})",
+ field
+ .to_sql(table_engine)
+ .attach_printable("Failed to sum aggregate")?
+ ),
+ };
+ format!(
+ "{query}{}",
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ Self::Min { field, alias } => {
+ format!(
+ "min({}){}",
+ field
+ .to_sql(table_engine)
+ .attach_printable("Failed to min aggregate")?,
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ Self::Max { field, alias } => {
+ format!(
+ "max({}){}",
+ field
+ .to_sql(table_engine)
+ .attach_printable("Failed to max aggregate")?,
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ })
+ }
+}
+
+impl ToSql for Window
+where
+ T: ToSql,
+{
+ fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result {
+ Ok(match self {
+ Self::Sum {
+ field,
+ partition_by,
+ order_by,
+ alias,
+ } => {
+ format!(
+ "sum({}) over ({}{}){}",
+ field
+ .to_sql(table_engine)
+ .attach_printable("Failed to sum window")?,
+ partition_by.as_ref().map_or_else(
+ || "".to_owned(),
+ |partition_by| format!("partition by {}", partition_by.to_owned())
+ ),
+ order_by.as_ref().map_or_else(
+ || "".to_owned(),
+ |(order_column, order)| format!(
+ " order by {} {}",
+ order_column.to_owned(),
+ order.to_string()
+ )
+ ),
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ Self::RowNumber {
+ field: _,
+ partition_by,
+ order_by,
+ alias,
+ } => {
+ format!(
+ "row_number() over ({}{}){}",
+ partition_by.as_ref().map_or_else(
+ || "".to_owned(),
+ |partition_by| format!("partition by {}", partition_by.to_owned())
+ ),
+ order_by.as_ref().map_or_else(
+ || "".to_owned(),
+ |(order_column, order)| format!(
+ " order by {} {}",
+ order_column.to_owned(),
+ order.to_string()
+ )
+ ),
+ alias.map_or_else(|| "".to_owned(), |alias| format!(" as {}", alias))
+ )
+ }
+ })
+ }
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum ClickhouseError {
+ #[error("Clickhouse connection error")]
+ ConnectionError,
+ #[error("Clickhouse NON-200 response content: '{0}'")]
+ ResponseNotOK(String),
+ #[error("Clickhouse response error")]
+ ResponseError,
+}
diff --git a/crates/analytics/src/core.rs b/crates/analytics/src/core.rs
new file mode 100644
index 000000000000..354e1e2f1766
--- /dev/null
+++ b/crates/analytics/src/core.rs
@@ -0,0 +1,31 @@
+use api_models::analytics::GetInfoResponse;
+
+use crate::{types::AnalyticsDomain, utils};
+
+pub async fn get_domain_info(
+ domain: AnalyticsDomain,
+) -> crate::errors::AnalyticsResult {
+ let info = match domain {
+ AnalyticsDomain::Payments => GetInfoResponse {
+ metrics: utils::get_payment_metrics_info(),
+ download_dimensions: None,
+ dimensions: utils::get_payment_dimensions(),
+ },
+ AnalyticsDomain::Refunds => GetInfoResponse {
+ metrics: utils::get_refund_metrics_info(),
+ download_dimensions: None,
+ dimensions: utils::get_refund_dimensions(),
+ },
+ AnalyticsDomain::SdkEvents => GetInfoResponse {
+ metrics: utils::get_sdk_event_metrics_info(),
+ download_dimensions: None,
+ dimensions: utils::get_sdk_event_dimensions(),
+ },
+ AnalyticsDomain::ApiEvents => GetInfoResponse {
+ metrics: utils::get_api_event_metrics_info(),
+ download_dimensions: None,
+ dimensions: utils::get_api_event_dimensions(),
+ },
+ };
+ Ok(info)
+}
diff --git a/crates/router/src/analytics/errors.rs b/crates/analytics/src/errors.rs
similarity index 100%
rename from crates/router/src/analytics/errors.rs
rename to crates/analytics/src/errors.rs
diff --git a/crates/analytics/src/lambda_utils.rs b/crates/analytics/src/lambda_utils.rs
new file mode 100644
index 000000000000..f9446a402b4e
--- /dev/null
+++ b/crates/analytics/src/lambda_utils.rs
@@ -0,0 +1,36 @@
+use aws_config::{self, meta::region::RegionProviderChain};
+use aws_sdk_lambda::{config::Region, types::InvocationType::Event, Client};
+use aws_smithy_types::Blob;
+use common_utils::errors::CustomResult;
+use error_stack::{IntoReport, ResultExt};
+
+use crate::errors::AnalyticsError;
+
+async fn get_aws_client(region: String) -> Client {
+ let region_provider = RegionProviderChain::first_try(Region::new(region));
+ let sdk_config = aws_config::from_env().region(region_provider).load().await;
+ Client::new(&sdk_config)
+}
+
+pub async fn invoke_lambda(
+ function_name: &str,
+ region: &str,
+ json_bytes: &[u8],
+) -> CustomResult<(), AnalyticsError> {
+ get_aws_client(region.to_string())
+ .await
+ .invoke()
+ .function_name(function_name)
+ .invocation_type(Event)
+ .payload(Blob::new(json_bytes.to_owned()))
+ .send()
+ .await
+ .into_report()
+ .map_err(|er| {
+ let er_rep = format!("{er:?}");
+ er.attach_printable(er_rep)
+ })
+ .change_context(AnalyticsError::UnknownError)
+ .attach_printable("Lambda invocation failed")?;
+ Ok(())
+}
diff --git a/crates/analytics/src/lib.rs b/crates/analytics/src/lib.rs
new file mode 100644
index 000000000000..24da77f84f2b
--- /dev/null
+++ b/crates/analytics/src/lib.rs
@@ -0,0 +1,509 @@
+mod clickhouse;
+pub mod core;
+pub mod errors;
+pub mod metrics;
+pub mod payments;
+mod query;
+pub mod refunds;
+
+pub mod api_event;
+pub mod sdk_events;
+mod sqlx;
+mod types;
+use api_event::metrics::{ApiEventMetric, ApiEventMetricRow};
+pub use types::AnalyticsDomain;
+pub mod lambda_utils;
+pub mod utils;
+
+use std::sync::Arc;
+
+use api_models::analytics::{
+ api_event::{
+ ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier,
+ },
+ payments::{PaymentDimensions, PaymentFilters, PaymentMetrics, PaymentMetricsBucketIdentifier},
+ refunds::{RefundDimensions, RefundFilters, RefundMetrics, RefundMetricsBucketIdentifier},
+ sdk_events::{
+ SdkEventDimensions, SdkEventFilters, SdkEventMetrics, SdkEventMetricsBucketIdentifier,
+ },
+ Distribution, Granularity, TimeRange,
+};
+use clickhouse::ClickhouseClient;
+pub use clickhouse::ClickhouseConfig;
+use error_stack::IntoReport;
+use router_env::{
+ logger,
+ tracing::{self, instrument},
+};
+use storage_impl::config::Database;
+
+use self::{
+ payments::{
+ distribution::{PaymentDistribution, PaymentDistributionRow},
+ metrics::{PaymentMetric, PaymentMetricRow},
+ },
+ refunds::metrics::{RefundMetric, RefundMetricRow},
+ sdk_events::metrics::{SdkEventMetric, SdkEventMetricRow},
+ sqlx::SqlxClient,
+ types::MetricsError,
+};
+
+#[derive(Clone, Debug)]
+pub enum AnalyticsProvider {
+ Sqlx(SqlxClient),
+ Clickhouse(ClickhouseClient),
+ CombinedCkh(SqlxClient, ClickhouseClient),
+ CombinedSqlx(SqlxClient, ClickhouseClient),
+}
+
+impl Default for AnalyticsProvider {
+ fn default() -> Self {
+ Self::Sqlx(SqlxClient::default())
+ }
+}
+
+impl ToString for AnalyticsProvider {
+ fn to_string(&self) -> String {
+ String::from(match self {
+ Self::Clickhouse(_) => "Clickhouse",
+ Self::Sqlx(_) => "Sqlx",
+ Self::CombinedCkh(_, _) => "CombinedCkh",
+ Self::CombinedSqlx(_, _) => "CombinedSqlx",
+ })
+ }
+}
+
+impl AnalyticsProvider {
+ #[instrument(skip_all)]
+ pub async fn get_payment_metrics(
+ &self,
+ metric: &PaymentMetrics,
+ dimensions: &[PaymentDimensions],
+ merchant_id: &str,
+ filters: &PaymentFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ ) -> types::MetricsResult> {
+ // Metrics to get the fetch time for each payment metric
+ metrics::request::record_operation_time(
+ async {
+ match self {
+ Self::Sqlx(pool) => {
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::Clickhouse(pool) => {
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::CombinedCkh(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ ));
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics")
+ },
+ _ => {}
+
+ };
+
+ ckh_result
+ }
+ Self::CombinedSqlx(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ ));
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics")
+ },
+ _ => {}
+
+ };
+
+ sqlx_result
+ }
+ }
+ },
+ &metrics::METRIC_FETCH_TIME,
+ metric,
+ self,
+ )
+ .await
+ }
+
+ pub async fn get_payment_distribution(
+ &self,
+ distribution: &Distribution,
+ dimensions: &[PaymentDimensions],
+ merchant_id: &str,
+ filters: &PaymentFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ ) -> types::MetricsResult> {
+ // Metrics to get the fetch time for each payment metric
+ metrics::request::record_operation_time(
+ async {
+ match self {
+ Self::Sqlx(pool) => {
+ distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::Clickhouse(pool) => {
+ distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::CombinedCkh(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ ));
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics distribution")
+ },
+ _ => {}
+
+ };
+
+ ckh_result
+ }
+ Self::CombinedSqlx(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ distribution.distribution_for
+ .load_distribution(
+ distribution,
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ ));
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics distribution")
+ },
+ _ => {}
+
+ };
+
+ sqlx_result
+ }
+ }
+ },
+ &metrics::METRIC_FETCH_TIME,
+ &distribution.distribution_for,
+ self,
+ )
+ .await
+ }
+
+ pub async fn get_refund_metrics(
+ &self,
+ metric: &RefundMetrics,
+ dimensions: &[RefundDimensions],
+ merchant_id: &str,
+ filters: &RefundFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ ) -> types::MetricsResult> {
+ // Metrics to get the fetch time for each refund metric
+ metrics::request::record_operation_time(
+ async {
+ match self {
+ Self::Sqlx(pool) => {
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::Clickhouse(pool) => {
+ metric
+ .load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ pool,
+ )
+ .await
+ }
+ Self::CombinedCkh(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(
+ metric.load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ metric.load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ )
+ );
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics metrics")
+ }
+ _ => {}
+ };
+ ckh_result
+ }
+ Self::CombinedSqlx(sqlx_pool, ckh_pool) => {
+ let (ckh_result, sqlx_result) = tokio::join!(
+ metric.load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ ),
+ metric.load_metrics(
+ dimensions,
+ merchant_id,
+ filters,
+ granularity,
+ time_range,
+ sqlx_pool,
+ )
+ );
+ match (&sqlx_result, &ckh_result) {
+ (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
+ logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics metrics")
+ }
+ _ => {}
+ };
+ sqlx_result
+ }
+ }
+ },
+ &metrics::METRIC_FETCH_TIME,
+ metric,
+ self,
+ )
+ .await
+ }
+
+ pub async fn get_sdk_event_metrics(
+ &self,
+ metric: &SdkEventMetrics,
+ dimensions: &[SdkEventDimensions],
+ pub_key: &str,
+ filters: &SdkEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ ) -> types::MetricsResult> {
+ match self {
+ Self::Sqlx(_pool) => Err(MetricsError::NotImplemented).into_report(),
+ Self::Clickhouse(pool) => {
+ metric
+ .load_metrics(dimensions, pub_key, filters, granularity, time_range, pool)
+ .await
+ }
+ Self::CombinedCkh(_sqlx_pool, ckh_pool) | Self::CombinedSqlx(_sqlx_pool, ckh_pool) => {
+ metric
+ .load_metrics(
+ dimensions,
+ pub_key,
+ filters,
+ granularity,
+ // Since SDK events are ckh only use ckh here
+ time_range,
+ ckh_pool,
+ )
+ .await
+ }
+ }
+ }
+
+ pub async fn get_api_event_metrics(
+ &self,
+ metric: &ApiEventMetrics,
+ dimensions: &[ApiEventDimensions],
+ pub_key: &str,
+ filters: &ApiEventFilters,
+ granularity: &Option,
+ time_range: &TimeRange,
+ ) -> types::MetricsResult> {
+ match self {
+ Self::Sqlx(_pool) => Err(MetricsError::NotImplemented).into_report(),
+ Self::Clickhouse(ckh_pool)
+ | Self::CombinedCkh(_, ckh_pool)
+ | Self::CombinedSqlx(_, ckh_pool) => {
+ // Since API events are ckh only use ckh here
+ metric
+ .load_metrics(
+ dimensions,
+ pub_key,
+ filters,
+ granularity,
+ time_range,
+ ckh_pool,
+ )
+ .await
+ }
+ }
+ }
+
+ pub async fn from_conf(config: &AnalyticsConfig) -> Self {
+ match config {
+ AnalyticsConfig::Sqlx { sqlx } => Self::Sqlx(SqlxClient::from_conf(sqlx).await),
+ AnalyticsConfig::Clickhouse { clickhouse } => Self::Clickhouse(ClickhouseClient {
+ config: Arc::new(clickhouse.clone()),
+ }),
+ AnalyticsConfig::CombinedCkh { sqlx, clickhouse } => Self::CombinedCkh(
+ SqlxClient::from_conf(sqlx).await,
+ ClickhouseClient {
+ config: Arc::new(clickhouse.clone()),
+ },
+ ),
+ AnalyticsConfig::CombinedSqlx { sqlx, clickhouse } => Self::CombinedSqlx(
+ SqlxClient::from_conf(sqlx).await,
+ ClickhouseClient {
+ config: Arc::new(clickhouse.clone()),
+ },
+ ),
+ }
+ }
+}
+
+#[derive(Clone, Debug, serde::Deserialize)]
+#[serde(tag = "source")]
+#[serde(rename_all = "lowercase")]
+pub enum AnalyticsConfig {
+ Sqlx {
+ sqlx: Database,
+ },
+ Clickhouse {
+ clickhouse: ClickhouseConfig,
+ },
+ CombinedCkh {
+ sqlx: Database,
+ clickhouse: ClickhouseConfig,
+ },
+ CombinedSqlx {
+ sqlx: Database,
+ clickhouse: ClickhouseConfig,
+ },
+}
+
+impl Default for AnalyticsConfig {
+ fn default() -> Self {
+ Self::Sqlx {
+ sqlx: Database::default(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, serde::Deserialize, Default, serde::Serialize)]
+pub struct ReportConfig {
+ pub payment_function: String,
+ pub refund_function: String,
+ pub dispute_function: String,
+ pub region: String,
+}
diff --git a/crates/analytics/src/main.rs b/crates/analytics/src/main.rs
new file mode 100644
index 000000000000..5bf256ea9783
--- /dev/null
+++ b/crates/analytics/src/main.rs
@@ -0,0 +1,3 @@
+fn main() {
+ println!("Hello world");
+}
diff --git a/crates/router/src/analytics/metrics.rs b/crates/analytics/src/metrics.rs
similarity index 100%
rename from crates/router/src/analytics/metrics.rs
rename to crates/analytics/src/metrics.rs
diff --git a/crates/router/src/analytics/metrics/request.rs b/crates/analytics/src/metrics/request.rs
similarity index 51%
rename from crates/router/src/analytics/metrics/request.rs
rename to crates/analytics/src/metrics/request.rs
index b7c202f2db25..3d1a78808f34 100644
--- a/crates/router/src/analytics/metrics/request.rs
+++ b/crates/analytics/src/metrics/request.rs
@@ -6,24 +6,20 @@ pub fn add_attributes>(
}
#[inline]
-pub async fn record_operation_time(
+pub async fn record_operation_time(
future: F,
metric: &once_cell::sync::Lazy>,
- metric_name: &api_models::analytics::payments::PaymentMetrics,
- source: &crate::analytics::AnalyticsProvider,
+ metric_name: &T,
+ source: &crate::AnalyticsProvider,
) -> R
where
F: futures::Future