-
Notifications
You must be signed in to change notification settings - Fork 2
/
values.yaml
216 lines (192 loc) · 6.98 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# Label domain for implicit labels defined in this chart.
labelDomain: unikorn-cloud.org
# Version of Kubernetes, should match that pre-installed on the base images
# otherwise you'll have to pay a start-up penalty as kubeadm downloads the
# images.
version: v1.30.2
# OpenStack specific configuration.
# Contains credentials for the cloud, networking options and other
# Values in this object are considered immutable.
openstack:
# Name of the cloud in clouds.yaml.
cloud: my-cloud
# cloud.yaml file, base64 encoded.
cloudsYAML: Y2hhbmdlIG1lIQ==
# External (public) network ID.
# If one is not specified, CAPO will use the only network it finds, otherwise
# it will error.
# externalNetworkID: b19ce45a-3278-48e4-8a0e-83100af68778
# Openstack endpoint CA, base64 encoded.
# ca: Y2hhbmdlIG1lIQ==
# SSH key.
#
# sshKeyName: foo
# Compute failure domain name. Workload pools can be distributed
# across multiple failure domains, but default to this if not
# present. Control plane nodes are deployed in this failure domain.
computeFailureDomain: nova
# If volumes are defined for machine sets, this is the global default.
# It can be overidden on a per-workload pool/control plane basis.
# volumeFailureDomain: nova
# Cluster wide configuration.
#
# cluster:
# Applies taints to all nodes on creation. Once a node is provisioned you
# will need to manually update these.
# NOTE: Cilium chart will automatically add these taints shown below
# taints:
# - key: node.cilium.io/agent-not-ready
# effect: NoSchedule
# value: 'true'
#
# Applies the following key/value pairs to all server instances.
# serverMetadata:
# foo: bar
# Kubernetes API specific configuration.
#
# api:
# # Allow only the selected network address prefixes access to the API.
# allowList:
# - 192.168.0.1/24
#
# # Generate the API server certificate with a specific set of X.509
# # subject alternative names, "localhost" and "127.0.0.1" are required
# # by Kubernetes and added by default.
# # Modifications to this list will trigger a control plane upgrade.
# certificateSANs:
# - foo.acme.com
# Control plane topology.
# Modifications to this object will trigger a control plane upgrade.
controlPlane:
# Number of control plane machines.
replicas: 3
# kubeadm uses phases to init a node.
# https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase
# Any phase can be skipped by adding --skip-phase NAME to the kubeadm init command.
# To skip kubeadm phases in Unikorn, you can supply a list below
# WARNING: Skipping phases may cause cluster instability so make sure you know what you're skipping before enabling this!
# List of Phases: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/
kubeadmSkipPhases: []
# - addon/kube-proxy
# - addon/coredns
# Defines the physical properties of a machine.
# Modifications to this object will trigger a control plane upgrade.
machine:
# Openstack image name.
imageID: f810a963-437d-4a24-a972-a0bff942542b
# Control plane machine type (This is the name of the flavor rather than the actual ID).
flavorID: some-flavor-name
# Ephemeral disk size in GiB. If specified this overrides the default
# size for the flavor.
# diskSize: 80
# Set the server group of the control plane (e.g. anti-affinity rules).
# serverGroupID: foo
# Any additional configuration to be passed to the kubelet can be specifed here
kubelet:
serializeImagePulls: false
# Workload pools topology.
# workloadPools:
# # Pool name
# default:
# # Number of workload machines.
# replicas: 3
#
# # Defines the physical properties of a machine.
# # Modifications to these objects will trigger a affected workload pool upgrades.
# machine:
# # Openstack image name.
# imageID: f810a963-437d-4a24-a972-a0bff942542b
#
# # Workload machine type (This is the name of the flavor rather than the actual ID).
# flavorID: some-flavor-name
#
# # Ephemeral disk size in GiB. If specified this overrides the default
# # size for the flavor.
# diskSize: 160
#
# # Failure domain to provision the pool in, defaults to openstack.failureDomain.
# failureDomain: nova
#
# # Labels to apply to the pool nodes. The pool name will be applied
# # automatically with the $(labelDomain)/node-pool label. The failureDomain
# # will be automatically added as the well known "topology.kubernetes.io/zone"
# # label along with the "topology.kubernetes.io/region" label. This is only
# # intended for use on initial node bring up and will not trigger a rolling
# # upgrade.
# labels:
# group: foo
#
# # Files to include on the machine. These are limited to base64 encoded,
# # root owned and readable at present. Like node labels above, this is only
# # intended for use on initial node bring up and will not trigger a rolling
# # upgrade.
# files:
# - path: /etc/cat
# content: bWVvdw==
#
# # Additional configuration to be passed to the kubelet can be specifed here
# kubelet:
# # For example:
# serializeImagePulls: false
#
# # Enable or disable cluster autoscaling.
# # This object is considered immutable.
# autoscaling:
# # Set the scaling limits.
# # Limits are required by cluster-autoscaler.
# limits:
# # Minimum number of replicas.
# minReplicas: 0
#
# # Maximum number of replicas.
# maxReplicas: 10
#
# # Set the scaler scheduling hints.
# # These are only required when using scale-from-zero support
# # as cluster-autoscaler is unable to determine these values without
# # an active node.
# # scheduler:
# # Number of CPU cores.
# cpu: 4
#
# # Amount of memory.
# memory: 32G
#
# # Whether a GPU is present or not.
# gpu:
# # The GPU type.
# type: nvidia.com/gpu
#
# # The number of GPUs.
# count: 1
# Kubernetes/OpenStack networking options.
# Network options are immutable, changes will not result in
# any modification to the cluster.
# This object is considered immutable.
network:
# Network prefix nodes will be allocated from.
nodeCIDR: 192.168.0.0/16
# Network prefixes service VIPs will be allocated from.
serviceCIDRs:
- 172.16.0.0/12
# Network prefixes pods will be allocated from.
podCIDRs:
- 10.0.0.0/8
# DNS nameservers to use.
dnsNameservers:
- 8.8.8.8
# If specified defines an existing nwtork to use, if not defined a network
# will be created by CAPO. This allows you to use non-standard network
# types e.g. a VLAN to be used for baremetal nodes. The subnet must have a
# router attached.
# provider:
# networkID: 8f526b54-fab3-435d-b4b3-f65fd8474b8a
# subnetID: e3b15dd0-17e4-47c0-bc6c-1b8ea1f1018f
# If specified these security group rules are added to all nodes.
# securityGroupRules:
# - name: ssh-ingress
# direction: ingress
# etherType: IPv4
# protocol: TCP
# portRangeMin: 22
# portRangeMax: 22