diff --git a/cicd/k3s-sharding/EPconfig.txt b/cicd/k3s-sharding/EPconfig.txt new file mode 100644 index 000000000..5521b1d64 --- /dev/null +++ b/cicd/k3s-sharding/EPconfig.txt @@ -0,0 +1,34 @@ +{ + "Attr":[ + { + "hostName":"192.168.80.10", + "name":"192.168.80.10_tcp_6443", + "inactiveReTries":2, + "probeType":"tcp", + "probeReq":"", + "probeResp":"", + "probeDuration":10, + "probePort":6443 + }, + { + "hostName":"192.168.80.11", + "name":"192.168.80.11_tcp_6443", + "inactiveReTries":2, + "probeType":"tcp", + "probeReq":"", + "probeResp":"", + "probeDuration":10, + "probePort":6443 + }, + { + "hostName":"192.168.80.12", + "name":"192.168.80.12_tcp_6443", + "inactiveReTries":2, + "probeType":"tcp", + "probeReq":"", + "probeResp":"", + "probeDuration":10, + "probePort":6443 + } + ] +} diff --git a/cicd/k3s-sharding/Vagrantfile b/cicd/k3s-sharding/Vagrantfile new file mode 100644 index 000000000..8d460f5e9 --- /dev/null +++ b/cicd/k3s-sharding/Vagrantfile @@ -0,0 +1,72 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "2").to_i +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + config.vm.define "master1" do |master| + master.vm.hostname = 'master1' + master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master1.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + config.vm.define "master2" do |master| + master.vm.hostname = 'master2' + master.vm.network :private_network, ip: "192.168.90.11", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.11", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master2.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + config.vm.define "master3" do |master| + master.vm.hostname = 'master3' + master.vm.network :private_network, ip: "192.168.90.12", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.12", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master3.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision :shell, :path => "worker.sh" + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + end +end diff --git a/cicd/k3s-sharding/config.sh b/cicd/k3s-sharding/config.sh new file mode 100755 index 000000000..b0cfb3651 --- /dev/null +++ b/cicd/k3s-sharding/config.sh @@ -0,0 +1,7 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up +#sudo ip route add 123.123.123.1 via 192.168.90.10 || true +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/tcp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/udp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/sctp-onearm-ds.yml' diff --git a/cicd/k3s-sharding/host.sh b/cicd/k3s-sharding/host.sh new file mode 100755 index 000000000..9eb72fda6 --- /dev/null +++ b/cicd/k3s-sharding/host.sh @@ -0,0 +1,6 @@ +sudo apt-get install -y lksctp-tools +sudo ip route add 123.123.123.0/24 via 192.168.90.10 +sysctl net.ipv4.conf.eth1.arp_accept=1 +sysctl net.ipv4.conf.eth2.arp_accept=1 +sysctl net.ipv4.conf.default.arp_accept=1 +echo "Host is up" diff --git a/cicd/k3s-sharding/host_validation.sh b/cicd/k3s-sharding/host_validation.sh new file mode 100755 index 000000000..0ad637cda --- /dev/null +++ b/cicd/k3s-sharding/host_validation.sh @@ -0,0 +1,59 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) +extIP1=$(cat /vagrant/extIP1) +extIP2=$(cat /vagrant/extIP2) + +mode="onearm" +tcp_port=55001 +udp_port=55002 +sctp_port=55003 + +code=0 +echo TCP Service IP: $extIP + +ip route list match $extIP | grep $extIP -A 2 + +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" +for((i=0;i<20;i++)) +do + +out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K3s-sharding TCP\t($mode)\t[OK]" +else + echo -e "K3s-sharding TCP\t($mode)\t[FAILED]" + code=1 +fi + +echo UDP Service IP: $extIP1 + +out=$(timeout 5 /vagrant/udp_client $extIP1 $udp_port) +if [[ ${out} == *"Client"* ]]; then + echo -e "K3s-sharding UDP\t($mode)\t[OK]" +else + echo -e "K3s-sharding UDP\t($mode)\t[FAILED]" + code=1 +fi + +echo SCTP Service IP: $extIP2 + +sctp_darn -H 192.168.80.9 -h $extIP2 -p $sctp_port -s < /vagrant/input > output +#sleep 2 +exp="New connection, peer addresses +192.168.80.202:55003" + +res=`cat output | grep -A 1 "New connection, peer addresses"` +sudo rm -rf output +if [[ "$res" == "$exp" ]]; then + #echo $res + echo -e "K3s-sharding SCTP\t($mode)\t[OK]" +else + echo -e "K3s-sharding SCTP\t($mode)\t[FAILED]" + code=1 +fi + + +done +exit $code diff --git a/cicd/k3s-sharding/input b/cicd/k3s-sharding/input new file mode 100644 index 000000000..6fb66a5e2 --- /dev/null +++ b/cicd/k3s-sharding/input @@ -0,0 +1,6 @@ + + + + + + diff --git a/cicd/k3s-sharding/kube-loxilb.yml b/cicd/k3s-sharding/kube-loxilb.yml new file mode 100644 index 000000000..65816d1c6 --- /dev/null +++ b/cicd/k3s-sharding/kube-loxilb.yml @@ -0,0 +1,148 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - namespaces + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --cidrPools=defaultPool=192.168.80.200/24 + #- --setBGP=64512 + - --setRoles=0.0.0.0 + - --setUniqueIP + - --numZoneInstances=3 + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-sharding/lbconfig.txt b/cicd/k3s-sharding/lbconfig.txt new file mode 100644 index 000000000..f96094054 --- /dev/null +++ b/cicd/k3s-sharding/lbconfig.txt @@ -0,0 +1,41 @@ +{ + "lbAttr":[ + { + "serviceArguments":{ + "externalIP":"192.168.80.80", + "port":6443, + "protocol":"tcp", + "sel":0, + "mode":2, + "BGP":false, + "Monitor":true, + "inactiveTimeOut":240, + "block":0 + }, + "secondaryIPs":null, + "endpoints":[ + { + "endpointIP":"192.168.80.10", + "targetPort":6443, + "weight":1, + "state":"active", + "counter":"" + }, + { + "endpointIP":"192.168.80.11", + "targetPort":6443, + "weight":1, + "state":"active", + "counter":"" + }, + { + "endpointIP":"192.168.80.12", + "targetPort":6443, + "weight":1, + "state":"active", + "counter":"" + } + ] + } + ] +} diff --git a/cicd/k3s-sharding/loxilb.yml b/cicd/k3s-sharding/loxilb.yml new file mode 100644 index 000000000..6869f969f --- /dev/null +++ b/cicd/k3s-sharding/loxilb.yml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + volumes: + - name: hllb + hostPath: + path: /etc/loxilb + type: DirectoryOrCreate + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + command: + - /root/loxilb-io/loxilb/loxilb + args: + - --egr-hooks + - --blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico + volumeMounts: + - name: hllb + mountPath: /etc/loxilb + ports: + - containerPort: 11111 + - containerPort: 179 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP diff --git a/cicd/k3s-sharding/master1.sh b/cicd/k3s-sharding/master1.sh new file mode 100755 index 000000000..a70eb09de --- /dev/null +++ b/cicd/k3s-sharding/master1.sh @@ -0,0 +1,13 @@ +sudo su +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.90' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +curl -fL https://get.k3s.io | sh -s - server --node-ip=192.168.80.10 --disable servicelb --disable traefik --cluster-init external-hostname=192.168.80.10 --node-external-ip=192.168.80.80 --disable-cloud-controller --flannel-iface=eth1 +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - +sleep 60 +echo $MASTER_IP > /vagrant/master-ip +cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sed -i -e "s/127.0.0.1/192.168.80.80/g" /vagrant/k3s.yaml +sudo mkdir -p /etc/loxilb +sudo cp /vagrant/lbconfig.txt /etc/loxilb/ +sudo cp /vagrant/EPconfig.txt /etc/loxilb/ +/vagrant/wait_ready.sh diff --git a/cicd/k3s-sharding/master2.sh b/cicd/k3s-sharding/master2.sh new file mode 100755 index 000000000..354d86a52 --- /dev/null +++ b/cicd/k3s-sharding/master2.sh @@ -0,0 +1,13 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) +sudo mkdir -p /etc/loxilb +sudo cp /vagrant/lbconfig.txt /etc/loxilb/ +sudo cp /vagrant/EPconfig.txt /etc/loxilb/ +#curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 --disable-cloud-controller -t ${NODE_TOKEN} +curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.80 -t ${NODE_TOKEN} --flannel-iface=eth1 +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - +#sudo kubectl apply -f /vagrant/loxilb.yml +#sudo kubectl apply -f /vagrant/kube-loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-sharding/master3.sh b/cicd/k3s-sharding/master3.sh new file mode 100755 index 000000000..d05e74d66 --- /dev/null +++ b/cicd/k3s-sharding/master3.sh @@ -0,0 +1,13 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) +sudo mkdir -p /etc/loxilb +sudo cp /vagrant/lbconfig.txt /etc/loxilb/ +sudo cp /vagrant/EPconfig.txt /etc/loxilb/ +#curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 --disable-cloud-controller -t ${NODE_TOKEN} +curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.12 external-hostname=192.168.80.12 --node-external-ip=192.168.80.80 -t ${NODE_TOKEN} --flannel-iface=eth1 +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - +sudo kubectl apply -f /vagrant/loxilb.yml +sudo kubectl apply -f /vagrant/kube-loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-sharding/rmconfig.sh b/cicd/k3s-sharding/rmconfig.sh new file mode 100755 index 000000000..1f85eb636 --- /dev/null +++ b/cicd/k3s-sharding/rmconfig.sh @@ -0,0 +1,8 @@ +#!/bin/bash +sudo ip route del 123.123.123.1 via 192.168.90.10 || true +vagrant destroy -f worker1 +vagrant destroy -f worker2 +vagrant destroy -f master1 +vagrant destroy -f master2 +vagrant destroy -f master3 +vagrant destroy -f host diff --git a/cicd/k3s-sharding/sctp-onearm-ds.yml b/cicd/k3s-sharding/sctp-onearm-ds.yml new file mode 100644 index 000000000..793bab3ef --- /dev/null +++ b/cicd/k3s-sharding/sctp-onearm-ds.yml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-onearm-test + ports: + - port: 55003 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: sctp-onearm-ds + labels: + what: sctp-onearm-test +spec: + selector: + matchLabels: + what: sctp-onearm-test + template: + metadata: + labels: + what: sctp-onearm-test + spec: + containers: + - name: sctp-onearm-pod + image: loxilbio/sctp-darn:latest + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k3s-sharding/tcp-onearm-ds.yml b/cicd/k3s-sharding/tcp-onearm-ds.yml new file mode 100644 index 000000000..b1ea660d7 --- /dev/null +++ b/cicd/k3s-sharding/tcp-onearm-ds.yml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 55001 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: tcp-onearm-ds + labels: + what: tcp-onearm-test +spec: + selector: + matchLabels: + what: tcp-onearm-test + template: + metadata: + labels: + what: tcp-onearm-test + spec: + containers: + - name: tcp-onearm-pod + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k3s-sharding/udp-onearm-ds.yml b/cicd/k3s-sharding/udp-onearm-ds.yml new file mode 100644 index 000000000..01bcf8e70 --- /dev/null +++ b/cicd/k3s-sharding/udp-onearm-ds.yml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-onearm-svc + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: udp-onearm-test + ports: + - port: 55002 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: udp-onearm-ds + labels: + what: udp-onearm-test +spec: + selector: + matchLabels: + what: udp-onearm-test + template: + metadata: + labels: + what: udp-onearm-test + spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k3s-sharding/udp_client b/cicd/k3s-sharding/udp_client new file mode 100755 index 000000000..b70cd81fc Binary files /dev/null and b/cicd/k3s-sharding/udp_client differ diff --git a/cicd/k3s-sharding/validation.sh b/cicd/k3s-sharding/validation.sh new file mode 100755 index 000000000..4af978908 --- /dev/null +++ b/cicd/k3s-sharding/validation.sh @@ -0,0 +1,38 @@ +#!/bin/bash +source ../common.sh +echo sharding-test + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 45 +extIP="192.168.80.200" +extIP1="192.168.80.201" +extIP2="192.168.80.202" +echo $extIP +echo $extIP > extIP +echo $extIP1 > extIP1 +echo $extIP2 > extIP2 + +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nCluster Info" +echo "******************************************************************************" +echo "******************************************************************************" +echo -e "\nPods" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nNodes" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get nodes' 2> /dev/null + +vagrant ssh host -c 'sudo /vagrant/host_validation.sh' 2> /dev/null +sudo rm extIP diff --git a/cicd/k3s-sharding/wait_ready.sh b/cicd/k3s-sharding/wait_ready.sh new file mode 100755 index 000000000..5ff06e373 --- /dev/null +++ b/cicd/k3s-sharding/wait_ready.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function wait_cluster_ready { + Res=$(sudo kubectl get pods -A | + while IFS= read -r line; do + if [[ "$line" != *"Running"* && "$line" != *"READY"* ]]; then + echo "not ready" + return + fi + done) + if [[ $Res == *"not ready"* ]]; then + return 1 + fi + return 0 +} + +function wait_cluster_ready_full { + i=1 + nr=0 + for ((;;)) do + wait_cluster_ready + nr=$? + if [[ $nr == 0 ]]; then + echo "Cluster is ready" + break + fi + i=$(( $i + 1 )) + if [[ $i -ge 40 ]]; then + echo "Cluster is not ready.Giving up" + exit 1 + fi + echo "Cluster is not ready...." + sleep 10 + done +} + +wait_cluster_ready_full diff --git a/cicd/k3s-sharding/worker.sh b/cicd/k3s-sharding/worker.sh new file mode 100644 index 000000000..016a47b40 --- /dev/null +++ b/cicd/k3s-sharding/worker.sh @@ -0,0 +1,12 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) +mkdir -p /etc/rancher/k3s +cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml +curl -sfL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - agent --server https://192.168.80.80:6443 --node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR} -t ${NODE_TOKEN} --flannel-iface=eth1 +#sudo kubectl apply -f /vagrant/loxilb-peer.yml +#sudo kubectl apply -f /vagrant/nginx.yml +#sudo kubectl apply -f /vagrant/udp.yml +#sudo kubectl apply -f /vagrant/sctp.yml +/vagrant/wait_ready.sh