forked from cetic/helm-nifi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
472 lines (425 loc) · 16 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
---
# Update the value, if you have custom k8 cluster domain name
clusterDomain: cluster.local
# Number of nifi nodes
replicaCount: 1
## Set default image, imageTag, and imagePullPolicy.
## ref: https://hub.docker.com/r/dmitriidmnk/nifi
##
image:
repository: dmitriidmnk/nifi
tag: 1.18.0
pullPolicy: IfNotPresent
## Optionally specify an imagePullSecret.
## Secret must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecret: myRegistrKeySecretName
securityContext:
runAsUser: 1000
fsGroup: 1000
## @param useHostNetwork - boolean - optional
## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
sts:
# Parallel podManagementPolicy for faster bootstrap and teardown. Default is OrderedReady.
podManagementPolicy: Parallel
AntiAffinity: soft
useHostNetwork: null
hostPort: null
pod:
annotations:
security.alpha.kubernetes.io/sysctls: net.ipv4.ip_local_port_range=10000 65000
#prometheus.io/scrape: "true"
serviceAccount:
create: false
#name: nifi
annotations: {}
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - example.com
# - example
startupProbe:
enabled: false
failureThreshold: 60
periodSeconds: 10
## Useful if using any custom secrets
## Pass in some secrets to use (if required)
# secrets:
# - name: myNifiSecret
# keys:
# - key1
# - key2
# mountPath: /opt/nifi/secret
## Useful if using any custom configmaps
## Pass in some configmaps to use (if required)
# configmaps:
# - name: myNifiConf
# keys:
# - myconf.conf
# mountPath: /opt/nifi/custom-config
properties:
# Must be Integer!
# nifi.provenance.repository.debug.frequency
provenanceRepositoryDebugFrequency: '1000000'
# https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#nifi_sensitive_props_key
sensitiveKey: changeMechangeMe # Must have at least 12 characters. You can set the property aka env var
# NiFi assumes conf/nifi.properties is persistent but this helm chart
# recreates it every time. Setting the Sensitive Properties Key
# (nifi.sensitive.props.key) is supposed to happen at the same time
# /opt/nifi/data/flow.xml.gz sensitive properties are encrypted. If that
# doesn't happen then NiFi won't start because decryption fails.
# So if sensitiveKeySetFile is configured but doesn't exist, assume
# /opt/nifi/flow.xml.gz hasn't been encrypted and follow the procedure
# https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#updating-the-sensitive-properties-key
# to simultaneously encrypt it and set nifi.sensitive.props.key.
# sensitiveKeySetFile: /opt/nifi/data/sensitive-props-key-applied
# If sensitiveKey was already set, then pass in sensitiveKeyPrior with the old key.
# sensitiveKeyPrior: OldPasswordToChangeFrom
algorithm: NIFI_PBKDF2_AES_GCM_256
# use externalSecure for when inbound SSL is provided by nginx-ingress or other external mechanism
externalSecure: false
isNode: false # set to true, if replicaCount more than 1
httpsPort: 8443
webProxyHost: # 'nifi.default.svc.cluster.local:8443' uncomment and edit, if replicaCount more than 1
clusterPort: 6007
provenanceStorage: "8 GB"
siteToSite:
port: 10000
# use properties.safetyValve to pass explicit 'key: value' pairs that overwrite other configuration
safetyValve:
#nifi.variable.registry.properties: "${NIFI_HOME}/example1.properties, ${NIFI_HOME}/example2.properties"
nifi.web.http.network.interface.default: eth0
# listen to loopback interface so "kubectl port-forward ..." works
nifi.web.http.network.interface.lo: lo
## Include aditional processors
# customLibPath: "/opt/configuration_resources/custom_lib"
# configure log level
logbackXml:
org.apache.nifi: INFO
org.apache.nifi.cluster: INFO
app_file: INFO
## Include additional libraries in the Nifi containers by using the postStart handler
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
# postStart: /opt/nifi/psql; wget -P /opt/nifi/psql https://jdbc.postgresql.org/download/postgresql-42.2.6.jar
# Nifi User Authentication
auth:
# If set while LDAP is enabled, this value will be used for the initial admin and not the ldap bind dn / admin
admin: CN=admin, OU=NIFI # Initial Admin account username. You can set the property aka env var
SSL:
keystorePasswd: changeMe
truststorePasswd: changeMe
# Automaticaly disabled if OIDC or LDAP enabled
singleUser:
username: username
password: changemechangeme # Must have at least 12 characters
clientAuth:
enabled: false
ldap:
enabled: false
host: ldap://ldap-server.example.com:389
userSearchBase: ou=People,dc=example,dc=com
initialAdmin: nifiAdmin # LDAP account for initialize as admin user
userSearchFilter: (objectClass=*) # (memberOf=cn=nifi_users,ou=groups,dc=example,dc=com)
userIdentityAttribute: # uid
authenticationStrategy: SIMPLE # How the connection to the LDAP server is authenticated. Possible values are ANONYMOUS, SIMPLE, LDAPS, or START_TLS.
identityStrategy: USE_USERNAME # USE_DN or USE_USERNAME
authExpiration: 12 hours
groupSearchBase: # ou=Groups,dc=example,dc=com
groupObjectClass: group # groupOfNames
referralStrategy: IGNORE # FOLLOW
groupNameAttribute: # cn
groupMemberAttribute: # member
groupSearchScope: ONE_LEVEL # Search scope for searching groups (ONE_LEVEL, OBJECT, or SUBTREE). Required if searching groups.
userSearchScope: SUBTREE # Search scope for searching users (ONE_LEVEL, OBJECT, or SUBTREE). Required if searching users.
userGroupNameAttribute: # memberOf
managerDN: uid=nifiAdmin,ou=People,dc=example,dc=com # account DN for connection to LDAP-server. You can set the property aka env var
managerPassword: changeMechangeMe # account password for connection to LDAP-server. You can set the property aka env var
providerUserSearchFilter: (uid={0})
syncInterval: 60 secs # Minimum allowable value is 10 secs
oidc:
enabled: false
discoveryUrl: #http://<oidc_provider_address>:<oidc_provider_port>/auth/realms/<client_realm>/.well-known/openid-configuration
clientId: #<client_name_in_oidc_provider>
clientSecret: #<client_secret_in_oidc_provider>
claimIdentifyingUser: email
admin: [email protected]
## Request additional scopes, for example profile
additionalScopes:
openldap:
enabled: false
persistence:
enabled: true
env:
LDAP_ORGANISATION: # name of your organization e.g. "Example"
LDAP_DOMAIN: # your domain e.g. "ldap.example.be"
LDAP_BACKEND: "hdb"
LDAP_TLS: "true"
LDAP_TLS_ENFORCE: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "false"
adminPassword: #ChengeMe
configPassword: #ChangeMe
customLdifFiles:
1-default-users.ldif: |-
# You can find an example ldif file at https://github.com/cetic/fadi/blob/master/examples/basic/example.ldif
## Expose the nifi service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
# headless service
headless:
type: ClusterIP
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# ui service
service:
type: ClusterIP
httpsPort: 8443
# nodePort: 30236
annotations: {}
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## OIDC authentication requires "sticky" session on the LoadBalancer for JWT to work properly...but AWS doesn't like it on creation
# sessionAffinity: ClientIP
# sessionAffinityConfig:
# clientIP:
# timeoutSeconds: 10800
# Enables additional port/ports to nifi service for internal processors
processors:
enabled: false
ports:
- name: processor01
port: 7001
targetPort: 7001
#nodePort: 30701
- name: processor02
port: 7002
targetPort: 7002
#nodePort: 30702
## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
# className: nginx
annotations: {}
# kubernetes.io/ingress.class: 'nginx'
# cert-manager.io/cluster-issuer: your-issuer
# nginx.ingress.kubernetes.io/affinity: cookie
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/upstream-vhost: "localhost:8443"
# nginx.ingress.kubernetes.io/proxy-redirect-from: "https://localhost:8443"
# nginx.ingress.kubernetes.io/proxy-redirect-to: "nifi.example.com"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# proxy_set_header 'X-ProxyScheme' 'https';
# proxy_set_header 'X-ProxyPort' '443';
tls: []
# - secretName: nifi-ingress-tls
# hosts:
# - nifi.example.com
hosts: []
# - nifi.example.com
path: /
# If you want to change the default path, see this issue https://github.com/cetic/helm-nifi/issues/22
# Amount of memory to give the NiFi java heap
jvmMemory: 2g
# Separate image for tailing each log separately and checking zookeeper connectivity
sidecar:
image: busybox
tag: "1.32.0"
imagePullPolicy: "IfNotPresent"
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: false
# When creating persistent storage, the NiFi helm chart can either reference an already-defined
# storage class by name, such as "standard" or can define a custom storage class by specifying
# customStorageClass: true and providing the "storageClass", "storageProvisioner" and "storageType".
# For example, to use SSD storage on Google Compute Engine see values-gcp.yaml
#
# To use a storage class that already exists on the Kubernetes cluster, we can simply reference it by name.
# For example:
# storageClass: standard
#
# The default storage class is used if this variable is not set.
accessModes: [ReadWriteOnce]
## Storage Capacities for persistent volumes
configStorage:
size: 100Mi
authconfStorage:
size: 100Mi
# Storage capacity for the 'data' directory, which is used to hold things such as the flow.xml.gz, configuration, state, etc.
dataStorage:
size: 1Gi
# Storage capacity for the FlowFile repository
flowfileRepoStorage:
size: 10Gi
# Storage capacity for the Content repository
contentRepoStorage:
size: 10Gi
# Storage capacity for the Provenance repository. When changing this, one should also change the properties.provenanceStorage value above, also.
provenanceRepoStorage:
size: 10Gi
# Storage capacity for nifi logs
logStorage:
size: 5Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
logresources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 50m
memory: 50Mi
## Enables setting your own affinity. Mutually exclusive with sts.AntiAffinity
## You need to set the value of sts.AntiAffinity other than "soft" and "hard"
affinity: {}
nodeSelector: {}
tolerations: []
initContainers: {}
# foo-init: # <- will be used as container name
# image: "busybox:1.30.1"
# imagePullPolicy: "IfNotPresent"
# command: ['sh', '-c', 'echo this is an initContainer']
# volumeMounts:
# - mountPath: /tmp/foo
# name: foo
extraVolumeMounts: []
extraVolumes: []
## Extra containers
extraContainers: []
terminationGracePeriodSeconds: 30
## Extra environment variables that will be pass onto deployment pods
env: []
## Extra environment variables from secrets and config maps
envFrom: []
# envFrom:
# - configMapRef:
# name: config-name
# - secretRef:
# name: mysecret
## Openshift support
## Use the following varables in order to enable Route and Security Context Constraint creation
openshift:
scc:
enabled: false
route:
enabled: false
#host: www.test.com
#path: /nifi
# ca server details
# Setting this true would create a nifi-toolkit based ca server
# The ca server will be used to generate self-signed certificates required setting up secured cluster
ca:
## If true, enable the nifi-toolkit certificate authority
enabled: false
persistence:
enabled: true
server: "" # For example "nifi-ca.default.svc.{{ .Values.clusterDomain }}"
envFrom: []
image:
repository: apache/nifi-toolkit
accessModes: [ReadWriteOnce]
storageClass : ''
initContainer:
image: busybox:1.32.0
service:
port: 9090
token: sixteenCharacters
admin:
cn: admin
serviceAccount:
create: false
#name: nifi-ca
openshift:
scc:
enabled: false
# cert-manager support
# Setting this true will have cert-manager create a private CA for the cluster
# as well as the certificates for each cluster node.
certManager:
enabled: false
keystorePasswd: changeme
truststorePasswd: changeme
replaceDefaultTrustStore: false
# For security reason, define secret aka source of env variable for keystorePasswd and truststorePasswd
envFrom: {}
# - secretRef:
# name: test
additionalDnsNames:
- localhost
refreshSeconds: 300
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
# cert-manager takes care of rotating the node certificates, so default
# their lifetime to 90 days. But when the CA expires you may need to
# 'helm delete' the cluster, delete all the node certificates and secrets,
# and then 'helm install' the NiFi cluster again. If a site-to-site trusted
# CA or a NiFi Registry CA certificate expires, you'll need to restart all
# pods to pick up the new version of the CA certificate. So default the CA
# lifetime to 10 years to avoid that happening very often.
# c.f. https://github.com/cert-manager/cert-manager/issues/2478#issuecomment-1095545529
certDuration: 2160h0m0s # the value must be with hours, minutes and seconds
caDuration: 87660h0m0s # the value must be with hours, minutes and seconds
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
## If true, install the Zookeeper chart
## ref: https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
enabled: true
## If the Zookeeper Chart is disabled a URL and port are required to connect
url: ""
port: 2181
replicaCount: 3
# ------------------------------------------------------------------------------
# Nifi registry:
# ------------------------------------------------------------------------------
registry:
## If true, install the Nifi registry
enabled: false
url: ""
port: 80
## Add values for the nifi-registry here
## ref: https://github.com/dysnix/charts/blob/master/nifi-registry/values.yaml
# Configure metrics
metrics:
prometheus:
# Enable Prometheus metrics
enabled: false
# Port used to expose Prometheus metrics
port: 9092
serviceMonitor:
# Enable deployment of Prometheus Operator ServiceMonitor resource
enabled: false
# namespace: monitoring
# Additional labels for the ServiceMonitor
labels: {}