-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbootstrap.sh
executable file
·113 lines (91 loc) · 3.23 KB
/
bootstrap.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/bin/bash
# Enter your email
DOMAIN=textxd.org
# Enter a region & zone
#
# use the us-central1-a zone to take full advantage of all ML GPU types which
# are not available in all regions:
# https://cloud.google.com/ml-engine/docs/tensorflow/regions#region_considerations
REGION=us-central1
ZONE=${REGION}-a
TYPE=n1-standard-2
gcloud config set compute/zone $ZONE
# Enable Kubernetes Engine API
gcloud services enable container.googleapis.com
# install kubectl
gcloud components install kubectl
gcloud compute addresses create jupyter --region $REGION
IPADDRESS=$(gcloud compute addresses describe jupyter --region us-central1 --format="get(address)")
IPADDRESS=${IPADDRESS:-UNCONFIGURED}
HUBTOKEN=$(openssl rand -hex 32)
PROXYTOKEN=$(openssl rand -hex 32)
# Enter a name for your cluster
CLUSTERNAME=shared
cat >config.yaml <<EOF
# Dynamic config.yaml file generated by $0
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
EOF
# create cluster with 8 nodes for ~26 max simulatneous users
eval "cat <<EOF
$(<config.yaml.template)
EOF
" | tee -a config.yaml
# NOTE: the eval cat pattern is a bash-ism and may not work in non-bash shells
gcloud beta container clusters create $CLUSTERNAME \
--cluster-version latest \
--node-labels hub.jupyter.org/node-purpose=core \
--num-nodes=2 \
--machine-type=$TYPE \
--zone=$ZONE \
--enable-autorepair \
--enable-autoupgrade \
--enable-autoscaling --min-nodes 1 --max-nodes 5
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=$EMAIL
# get and init helm
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
kubectl --namespace kube-system create serviceaccount tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
helm version
kubectl patch deployment tiller-deploy --namespace=kube-system --type=json --patch='[{"op": "add", "path": "/spec/template/spec/containers/0/command", "value": ["/tiller", "--listen=localhost:44134"]}]'
# add jhub helm charts
helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/
helm repo update
# retry until tiller active
while [ $? -ne 0 ]; do
echo "Retrying..."
sleep 5
helm repo update
done
# Suggested values: advanced users of Kubernetes and Helm should feel
# free to use different values.
RELEASE=jhub
NAMESPACE=jhub
# install hub
helm upgrade --install $RELEASE jupyterhub/jupyterhub \
--namespace $NAMESPACE \
--version 0.7.0 \
--values config.yaml
# retry until docker image is fully pulled
while [ $? -ne 0 ]; do
sleep 5
echo "Retrying..."
helm upgrade --install $RELEASE jupyterhub/jupyterhub \
--namespace $NAMESPACE \
--version 0.7.0 \
--values config.yaml
done
# print pods
kubectl get pod --namespace $NAMESPACE
kubectl get service --namespace $NAMESPACE
# wait until external ip address is established
kubectl --namespace=shared get svc | grep pending
while [ $? -ne 1 ]; do
echo "IP Pending..."
sleep 5
kubectl get service --namespace $NAMESPACE | grep pending
done
kubectl get service --namespace $NAMESPACE