-
Notifications
You must be signed in to change notification settings - Fork 0
/
kafka-and-data-proc.tf
235 lines (204 loc) · 8.01 KB
/
kafka-and-data-proc.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
# Infrastructure for setting up integration between the Yandex Data Processing and Managed Service for Apache Kafka® clusters
#
# RU: https://yandex.cloud/ru/docs/data-proc/tutorials/kafka
# EN: https://yandex.cloud/en/docs/data-proc/tutorials/kafka
#
# Set the configuration of the Yandex Data Processing and Managed Service for Apache Kafka® clusters
# Specify the following settings:
locals {
folder_id = "" # Your cloud folder ID, the same as for your provider
dp_ssh_key = "" # Аbsolute path to an SSH public key for the Yandex Data Processing cluster
# The following settings are predefined. Change them only if necessary.
network_name = "dataproc-network" # Name of the network
nat_name = "dataproc-nat" # Name of the NAT gateway
subnet_name = "dataproc-subnet-b" # Name of the subnet
sa_name = "dataproc-sa" # Name of the service account
bucket_name = "dataproc-bucket-8097865" # Name of the Object Storage bucket
dataproc_cluster_name = "dataproc-cluster" # Name of the Yandex Data Processing cluster
kafka_cluster_name = "dataproc-kafka" # Name of the Managed Service for Apache Kafka® cluster
kafka_username = "user1" # Apache Kafka® username.
kafka_password = "password1" # Password of the Apache Kafka® user.
topic_name = "dataproc-kafka-topic" # Name of the Apache Kafka® topic
}
resource "yandex_vpc_network" "dataproc_network" {
description = "Network for Yandex Data Processing and Managed Service for Apache Kafka®"
name = local.network_name
}
# NAT gateway for Yandex Data Processing
resource "yandex_vpc_gateway" "dataproc_nat" {
name = local.nat_name
shared_egress_gateway {}
}
# Routing table for Yandex Data Processing
resource "yandex_vpc_route_table" "dataproc_rt" {
network_id = yandex_vpc_network.dataproc_network.id
static_route {
destination_prefix = "0.0.0.0/0"
gateway_id = yandex_vpc_gateway.dataproc_nat.id
}
}
resource "yandex_vpc_subnet" "dataproc_subnet_b" {
description = "Subnet for Yandex Data Processing and Managed Service for Apache Kafka®"
name = local.subnet_name
zone = "ru-central1-b"
network_id = yandex_vpc_network.dataproc_network.id
v4_cidr_blocks = ["10.140.0.0/24"]
route_table_id = yandex_vpc_route_table.dataproc_rt.id
}
resource "yandex_vpc_security_group" "dataproc_security_group" {
description = "Security group for the Yandex Data Processing and Managed Service for Apache Kafka® clusters"
network_id = yandex_vpc_network.dataproc_network.id
ingress {
description = "Allow any incoming traffic within the security group"
protocol = "ANY"
from_port = 0
to_port = 65535
predefined_target = "self_security_group"
}
ingress {
description = "Allow access to NTP servers for time syncing"
protocol = "UDP"
port = 123
v4_cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow any outgoing traffic within the security group"
protocol = "ANY"
from_port = 0
to_port = 65535
predefined_target = "self_security_group"
}
egress {
description = "Allow connections to the HTTPS port from any IP address"
protocol = "TCP"
port = 443
v4_cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow access to NTP servers for time syncing"
protocol = "UDP"
port = 123
v4_cidr_blocks = ["0.0.0.0/0"]
}
}
resource "yandex_iam_service_account" "dataproc_sa" {
description = "Service account to manage the Yandex Data Processing cluster"
name = local.sa_name
}
# Assign the storage.admin role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "storage_admin" {
folder_id = local.folder_id
role = "storage.admin"
members = ["serviceAccount:${yandex_iam_service_account.dataproc_sa.id}"]
}
# Assign the dataproc.agent role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "dataproc_agent" {
folder_id = local.folder_id
role = "dataproc.agent"
members = ["serviceAccount:${yandex_iam_service_account.dataproc_sa.id}"]
}
# Assign the storage.uploader role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "dataproc_user" {
folder_id = local.folder_id
role = "dataproc.user"
members = ["serviceAccount:${yandex_iam_service_account.dataproc_sa.id}"]
}
resource "yandex_iam_service_account_static_access_key" "sa_static_key" {
description = "Static access key for Object Storage"
service_account_id = yandex_iam_service_account.dataproc_sa.id
}
# Use the key to create a bucket and provide the service account with full control over the bucket
resource "yandex_storage_bucket" "dataproc_bucket" {
access_key = yandex_iam_service_account_static_access_key.sa_static_key.access_key
secret_key = yandex_iam_service_account_static_access_key.sa_static_key.secret_key
bucket = local.bucket_name
grant {
id = yandex_iam_service_account.dataproc_sa.id
type = "CanonicalUser"
permissions = ["FULL_CONTROL"]
}
}
resource "yandex_dataproc_cluster" "dataproc_cluster" {
description = "Yandex Data Processing cluster"
depends_on = [yandex_resourcemanager_folder_iam_binding.storage_admin, yandex_resourcemanager_folder_iam_binding.dataproc_agent, yandex_resourcemanager_folder_iam_binding.dataproc_user]
bucket = yandex_storage_bucket.dataproc_bucket.id
security_group_ids = [yandex_vpc_security_group.dataproc_security_group.id]
name = local.dataproc_cluster_name
service_account_id = yandex_iam_service_account.dataproc_sa.id
zone_id = "ru-central1-b"
ui_proxy = true
cluster_config {
version_id = "2.1"
hadoop {
services = ["HDFS", "LIVY", "SPARK", "TEZ", "YARN"]
ssh_public_keys = [file(local.dp_ssh_key)]
}
subcluster_spec {
name = "main"
role = "MASTERNODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet_b.id
hosts_count = 1
}
subcluster_spec {
name = "data"
role = "DATANODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet_b.id
hosts_count = 1
}
subcluster_spec {
name = "compute"
role = "COMPUTENODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet_b.id
hosts_count = 1
}
}
}
resource "yandex_mdb_kafka_cluster" "kafka_cluster" {
description = "Managed Service for Apache Kafka® cluster"
environment = "PRODUCTION"
name = local.kafka_cluster_name
network_id = yandex_vpc_network.dataproc_network.id
security_group_ids = [yandex_vpc_security_group.dataproc_security_group.id]
config {
brokers_count = 1
version = "3.5"
kafka {
resources {
disk_size = 10 # GB
disk_type_id = "network-ssd"
resource_preset_id = "s2.micro"
}
}
zones = [
"ru-central1-b"
]
}
}
# Apache Kafka® user
resource "yandex_mdb_kafka_user" "kafka_user" {
cluster_id = yandex_mdb_kafka_cluster.kafka_cluster.id
name = local.kafka_username
password = local.kafka_password
}
# Apache Kafka® topic
resource "yandex_mdb_kafka_topic" "dataproc_kafka_topic" {
cluster_id = yandex_mdb_kafka_cluster.kafka_cluster.id
name = local.topic_name
partitions = 1
replication_factor = 1
}