forked from rancher/terraform-provider-rke
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example.tf
148 lines (145 loc) · 3.45 KB
/
example.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
resource rke_cluster "cluster" {
nodes = [
{
address = "1.2.3.4"
user = "ubuntu"
role = ["controlplane", "worker", "etcd"]
ssh_key = "${file("~/.ssh/id_rsa")}"
},
]
ingress = {
provider = "nginx"
extra_args = {
enable-ssl-passthrough = ""
}
}
addons = <<EOL
---
kind: Namespace
apiVersion: v1
metadata:
name: cattle-system
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: cattle-admin
namespace: cattle-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cattle-crb
namespace: cattle-system
subjects:
- kind: ServiceAccount
name: cattle-admin
namespace: cattle-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: cattle-keys-ingress
namespace: cattle-system
type: Opaque
data:
tls.crt: <BASE64_CRT> # ssl cert for ingress. If selfsigned, must be signed by same CA as cattle server
tls.key: <BASE64_KEY> # ssl key for ingress. If selfsigned, must be signed by same CA as cattle server
---
apiVersion: v1
kind: Secret
metadata:
name: cattle-keys-server
namespace: cattle-system
type: Opaque
data:
cert.pem: <BASE64_CRT> # ssl cert for cattle server.
key.pem: <BASE64_KEY> # ssl key for cattle server.
cacerts.pem: <BASE64_CA> # CA cert used to sign cattle server cert and key
---
apiVersion: v1
kind: Service
metadata:
namespace: cattle-system
name: cattle-service
labels:
app: cattle
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 443
targetPort: 443
protocol: TCP
name: https
selector:
app: cattle
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
namespace: cattle-system
name: cattle-ingress-http
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: "30"
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open
nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open
spec:
rules:
- host: <FQDN> # FQDN to access cattle server
http:
paths:
- backend:
serviceName: cattle-service
servicePort: 80
tls:
- secretName: cattle-keys-ingress
hosts:
- <FQDN> # FQDN to access cattle server
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
namespace: cattle-system
name: cattle
spec:
replicas: 1
template:
metadata:
labels:
app: cattle
spec:
serviceAccountName: cattle-admin
containers:
- image: rancher/rancher:master
imagePullPolicy: Always
name: cattle-server
ports:
- containerPort: 80
protocol: TCP
- containerPort: 443
protocol: TCP
volumeMounts:
- mountPath: /etc/rancher/ssl
name: cattle-keys-volume
readOnly: true
volumes:
- name: cattle-keys-volume
secret:
defaultMode: 420
secretName: cattle-keys-server
EOL
}
###############################################################################
# If you need kubeconfig.yml for using kubectl, please uncomment follows.
###############################################################################
#resource "local_file" "kube_cluster_yaml" {
# filename = "${path.root}/kube_config_cluster.yml"
# content = "${rke_cluster.cluster.kube_config_yaml}"
#}