Skip to content

Commit

Permalink
Merge pull request #385 from TrekkieCoder/main
Browse files Browse the repository at this point in the history
PR - cicd for calico with kube-proxy(ipvs)
  • Loading branch information
UltraInstinct14 authored Sep 1, 2023
2 parents e5bbbc9 + 1b3cc72 commit 31d1ba8
Show file tree
Hide file tree
Showing 24 changed files with 1,060 additions and 0 deletions.
36 changes: 36 additions & 0 deletions .github/workflows/k8s-calico-ipvs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: K8s-Calico-Cluster-IPVS-Sanity-CI
on:
#schedule:
# Runs "At 13:00 UTC every day-of-week"
#- cron: '0 13 * * *'
workflow_dispatch:
inputs:
testName:
description: 'Test Run-Name'
required: true
default: 'k8s-calico-cluster-ipvs'
jobs:
test-runner:
name: k8s-calico-cluster-ipvs-sanity
runs-on: self-hosted
if: github.repository == 'loxilb-io/loxilb'
&& github.event.inputs.tagName == ''
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive

- name: Run the test
run: |
cd cicd/k8s-calico-ipvs
./config.sh
./validation.sh
cd -
- name: Clean test-bed
if: success() || failure()
run: |
cd cicd/k8s-calico-ipvs || true
./rmconfig.sh
cd -
78 changes: 78 additions & 0 deletions cicd/k8s-calico-ipvs/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

require "yaml"
settings = YAML.load_file "yaml/settings.yaml"

workers = settings["nodes"]["workers"]["count"]

Vagrant.configure("2") do |config|

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end

config.vm.define "loxilb" do |loxilb|
loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"]
loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"]
loxilb.vm.hostname = 'llb1'
#loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp"
loxilb.vm.network :private_network, ip: settings["network"]["iloxilb_ip"], :netmask => "255.255.255.0"
loxilb.vm.network :private_network, ip: settings["network"]["oloxilb_ip"], :netmask => "255.255.255.0"
loxilb.vm.provision :shell, :path => "node_scripts/loxilb.sh"
loxilb.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 6000]
vbox.customize ["modifyvm", :id, "--cpus", 4]
end
end


config.vm.box = settings["software"]["cluster"]["box"]
config.vm.define "master" do |master|
master.vm.hostname = 'master'
master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0"
master.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
master.vm.provision "shell",
env: {
"CALICO_VERSION" => settings["software"]["calico"],
"CONTROL_IP" => settings["network"]["control_ip"],
"POD_CIDR" => settings["network"]["pod_cidr"],
"SERVICE_CIDR" => settings["network"]["service_cidr"]
},
path: "node_scripts/master.sh"

master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end

(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 100
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
worker.vm.provision "shell", path: "node_scripts/worker.sh"

worker.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end
end
end
49 changes: 49 additions & 0 deletions cicd/k8s-calico-ipvs/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/bash
VMs=$(vagrant global-status | grep -i virtualbox)
while IFS= read -a VMs; do
read -a vm <<< "$VMs"
cd ${vm[4]} 2>&1>/dev/null
echo "Destroying ${vm[1]}"
vagrant destroy -f ${vm[1]}
cd - 2>&1>/dev/null
done <<< "$VMs"

vagrant up
sudo ip route add 123.123.123.1 via 192.168.90.9
sudo ip route add 124.124.124.1 via 192.168.90.9
sudo ip route add 125.125.125.1 via 192.168.90.9

for((i=1; i<=60; i++))
do
fin=1
pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE")

while IFS= read -a pods; do
read -a pod <<< "$pods"
if [[ ${pod[3]} != *"Running"* ]]; then
echo "${pod[1]} is not UP yet"
fin=0
fi
done <<< "$pods"
if [ $fin == 1 ];
then
break;
fi
echo "Will try after 10s"
sleep 10
done

#Create default Service
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp.yml' 2> /dev/null

#Create onearm Service
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_onearm.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_onearm.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_onearm.yml' 2> /dev/null

#Create fullnat Service
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null
19 changes: 19 additions & 0 deletions cicd/k8s-calico-ipvs/configs/config
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3dNVEF5TkRZek9Gb1hEVE16TURneU9UQXlORFl6T0Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSzhTCkZwZTh0UVlQOVdvVTZYNVAwanBuSHMxM1hYQ3NnVk03QjJDRG5NWGpFeEExbm9xWElybk1YbFlmVTZ2blhLbSsKUDRqRGFUM0puK1hxaHFLUml3dmkra01lL1FUSVJiUThwVmVtUnBXaDB0dWNPbTJmeno0UkQxeS83ZmJHd0VGWgpCM3BRNm9CSVdXR0F3VERISDdoU0RlU0czdnhFRzdMdVNnUElxYWM1MEpkVWYrVXJ5SUlzdTlnYkJZN2NGcjhzCm03YmFTUkF5NkZyWlRveTBPRW1sT2NWWTBWYUpZdWpPdmVkTk5KRkw1RTlCenArbjA0UVcwVjRRd0NDaUIwQ3EKRjJTdWo4aDRSZWk0VTE5WmkvN2FyYyt5MTZUbURXem00Y0FRa05DWEY4dExYRTlMb3hUVEgvdGJFUHhORUk0KwpDT1BGZ0puVjd4R0l2QVh5UEVzQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJTHBkU2YrYzhZS0tlb0ZQVk1pUFZQWVREaUdNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS2JtYnBzTlF4UlFvTjRuMnUxOQo5Ump0WTJvMmUvSFA3NCthdlVKRitqM3FqRGFSdnhBS3JFRVNLMGJ1WmpFNzdQcmNtRnB3UFRmQ2wxazRDV2hyCnhPM1U0a2tXamQrWXlGc3BVYjJIaUE5R3ZhTXczWDBxaWtDRGZjNGxIaExXOWtRMWNpUHNyMmhPQURyZ2hNVUgKWWxEM2RBSnRuZzRXVm5TU2NiODk3Vm9DUU56NExlNi9WaWszRXA2ZG9pb1ZQbTZsNW9JNlU1ODJPMlorSGkwYgo4bzgxK3QxRVBvZ01tQkJWMVdWSzd4UkVLRFJuMmgzQjY4NHc0TGgyNkdyQ1NZVGlwRGhjUGJmRFJvS2o4aEFZClM0YWFRSDJLZ2kvdnkvY0VPVXc1Sk9VdmZBWUlmQ0R1d3k4K1NlOUc5NStaZStoTGdUcHkvZ3hTMHNhN20xTVQKM3FRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://192.168.80.10:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJTjd6VjBzQnlrQUF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1ERXdNalEyTXpoYUZ3MHlOREE0TXpFd01qUTJOREZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTdEekJqTkU5UlpMemcvK3UKMVRjTzN2R21ybS9jT1Y1VEVvZVpvZTlTMzFPa2VxektQTElGZGFvcXBDY2VLN3V2WTg5WEFWV1JVNm4rV080egpCWFllakZ3Y3lzS2lPM3pFQ29ERm9NNEQvRllzZ3ZvWUtNWXFGWmdlaUdvS3UzYzhqVGhjL3lkajVRdG1MOW5uCkl4ekxPdHc4aitKZ3dJN3RLODduNG4rdUZtb204ZXViRUwzZkNqdi9uN212VE5GNU9NK0FOYyszZnpjYkhTUFIKVFZ1K1lGZGZUT2tXc0piaWFKVURacWtIZUtKem82Y1NHOUNqN29acjArSFViWjRUdk1MZkhwVWtCMktKZy9HWAo1SXlyQzJFU2w3TkI4NkdwN1duTmFveE83NVphVmgxZnJscGYxSXN1UWxsYkFhdWdJamk2bm1ESXNsQk5GcEJjCm9uWUw3UUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTQzZYVW4vblBHQ2lucUJUMVRJajFUMkV3NApoakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBUXloclNzOWxLM2hXOW1EMUc1MWJJSk1TbGtKNmFpdEFJUXRXCnFodzBUQVNPZFl0dlRFV1NUdjNQSk50MndwcUNoQ1JVaU8xWTRpZ29tZjYwOHhmY0o5K2RTSUV5aTZGYkh4dHYKb2RZcDN0UzhtTXBGNkJPVUhMY2xBVndTYjBNNFpHS3kvVVRtWGhlU0oyczFrc21CRTlaOWFLTjRTOWZNSUd1SQpkME44RGdKZ3BXN3RIK29tTEgyUDEzZUpHS2VXMTVROGhxNThiSytpMGxqZy9rY1p5UndJM0VWM2lJbTlCNEJSCko2OXcvRTNiNitETkJOM0dkSFpwM1pFbG8xNGNtU1ltTjduaC9EdThjYUtWRkJLeUxEeXJ3Q3NXM2dwU0ROL0sKc1hUc0NqY0RNNVBCMEhrRG1UVFNiNStKaWE0bnZPcllkenZudkFtNWRETFhVSlFlN1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBN0R6QmpORTlSWkx6Zy8rdTFUY08zdkdtcm0vY09WNVRFb2Vab2U5UzMxT2tlcXpLClBMSUZkYW9xcENjZUs3dXZZODlYQVZXUlU2bitXTzR6QlhZZWpGd2N5c0tpTzN6RUNvREZvTTREL0ZZc2d2b1kKS01ZcUZaZ2VpR29LdTNjOGpUaGMveWRqNVF0bUw5bm5JeHpMT3R3OGorSmd3STd0Szg3bjRuK3VGbW9tOGV1YgpFTDNmQ2p2L243bXZUTkY1T00rQU5jKzNmemNiSFNQUlRWdStZRmRmVE9rV3NKYmlhSlVEWnFrSGVLSnpvNmNTCkc5Q2o3b1pyMCtIVWJaNFR2TUxmSHBVa0IyS0pnL0dYNUl5ckMyRVNsN05CODZHcDdXbk5hb3hPNzVaYVZoMWYKcmxwZjFJc3VRbGxiQWF1Z0lqaTZubURJc2xCTkZwQmNvbllMN1FJREFRQUJBb0lCQUFyeWdkR2h2S0lsdmkwbQp3eFpVVjljVEFiTmhzYVhpN2h5VXRoVGYvMG9rR1NJcU1iRUFXdXBwK1ZIa0VpemFwTFVPWGF6TkowL21OOGd0Ck9hWU9KRHBDNW42cTZGT3pZMjVOSzF0WlVLdjMzbFl2ZXNFZzljQk1iVlhLL0RaVnZ6T1lJZzhjNXk4dENRNDgKbmM1dHZpazdIWDlaY1R4Ykl6aDlmUmRzN1VkU2l6QVljbWFOUlhyWVdwanZobGVxeS9icml6cmR6R2kwR3JxUgpGMFpSK2lqaVIzMU03Z0VrTy9LVmpROUpqUW5RSm04Rmc1Z05zN1hOd0s4ZFRMZ1NaNTI4OWdpaVVLYVdzaFZ5CnZGcUpVWXZQSkJFZlo1TnFZT2tDSEtVa1JiWDRvMGgyZ05PV2EvdUZhTTZkeXlqOWk3d0o4UjhzeHhpUGxYS2UKeXluYUNPRUNnWUVBN3NjSzRTSDk2UFI0UURrVmlhQXVwU0RPaWo4ODFzN1FXa2VNRlV3RjdEUS9HTVFYN05PZQpBVmRNT1EvM0IrSEdPK1JLSWVpM0NiZ2wzdjljbzJBWUZWanBTdHozUEhxVXMvZ0FoWjNuM0hOaGhBanZwcDk3CkJXUGhycmZmSVo4T0ZIK1NueGRCS0tMUTBPSWdMUURQWndoMzFGQ3d6dXdUU1ZPUnhrOTNUbFVDZ1lFQS9VYlAKVWVvT21zajhzV0kvWklxUjFRdDhSUlNVZE1BMGorTXQzL2JHL0tzQkVETk8wZHJDcTdtVHJGZitGcElmdDFxTgpXdk92cWtVVzY2djU4OU1EWFdDRGlOT1ZpUkNjTm5qb0VaUWpLQVREZDBVNFlRTTdtcm4vclhoYUtQNjg3MWdlCmxENThJTng1V0x6UHYwSjNMWlpkN041WUNOV2JWckNEaVY5MEx6a0NnWUEya0VWc0xOaFk0NFNYS0hSRGZ0Y3AKNU5WTndpV2s4SjJzQTYxL01HQXFHY1pSWW40VklFWjdCL2ZqRWtMaENqYkNlT2gzMXpYOGdwZ2szVFhPSDZkaApPWEFXSzJoVDZhOFJjUnF0YmVnTitFL1FYRHBuV1FwRHNROWhYYU1maTdrcjlmc0xYOFVFQkRDeml2alBUK0FWCksxbzJxam05RHpWWkREL0RrV1V5QVFLQmdRREdrS0V4STBMWDd3TE5QcTFjY1piclk5bkZmdUQwdDB0K0V5bUEKRU1Ub2lsaUhEdktZYTkxN0xENnVPejRsQytKNXFUQnhRZU5TcGwvVjNEcFdBZlQ5WEJGRFVENUgreEc5VXdUOQo0eG04NGg4c2ZzUTRxb1FzUmU1QlhiMnhyaVVKc0JncE9PT3dENm5DL0NRVFdsUjlGUW9HRGpzT2tnajY2ZC8xCjd6UHpZUUtCZ1FEZSt1dnFPQ2F3dk9GQ2J2N3IyUXdIN3pHLzJMdXJqMHd0UENYd09IbXFiWWpvSnhEZGlyd2oKc0JsOXUxTzNLVkR5TmJGS2NHOUFyVmlEU3JOUFkrNEdiZCtqYTlheHFVUGZHU3pBWDlwRUdDcmdacHRrK0d0Ngo5SFBsbkFsYXpzTWgyUGhxSlN4U3ZRQ2h3QXk0cmZMdlltMVhkYjhaYnkrVndCTlZVL21obXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
1 change: 1 addition & 0 deletions cicd/k8s-calico-ipvs/configs/join.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
kubeadm join 192.168.80.10:6443 --token erzh6n.ysnbfgbxinfum5ps --discovery-token-ca-cert-hash sha256:43c9a9c2b22f053d87a0e11df980c04f8171778049609a62d29db794e30ece03
6 changes: 6 additions & 0 deletions cicd/k8s-calico-ipvs/input
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@






93 changes: 93 additions & 0 deletions cicd/k8s-calico-ipvs/node_scripts/common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
#!/bin/bash
#
# Common setup for all servers (Control Plane and Nodes)

set -euxo pipefail

# Variable Declaration

# DNS Setting
if [ ! -d /etc/systemd/resolved.conf.d ]; then
sudo mkdir /etc/systemd/resolved.conf.d/
fi
cat <<EOF | sudo tee /etc/systemd/resolved.conf.d/dns_servers.conf
[Resolve]
DNS=${DNS_SERVERS}
EOF

sudo systemctl restart systemd-resolved

# disable swap
sudo swapoff -a

# keeps the swap off during reboot
(crontab -l 2>/dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true
sudo apt-get update -y
# Install CRI-O Runtime

VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')"

# Create the .conf file to load the modules at bootup
cat <<EOF | sudo tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# Install ipvs related modules
sudo modprobe ip_vs
sudo modprobe ip_vs_rr
sudo modprobe ip_vs_wrr
sudo modprobe ip_vs_sh
#sudo modprobe nf_conntrack_ipv4

sudo sysctl net.ipv4.vs.sloppy_sctp=1

# Set up required sysctl params, these persist across reboots.
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sudo sysctl --system

cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /
EOF
cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list
deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /
EOF

curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -

sudo apt-get update
sudo apt-get install cri-o cri-o-runc -y

cat >> /etc/default/crio << EOF
${ENVIRONMENT}
EOF
sudo systemctl daemon-reload
sudo systemctl enable crio --now

echo "CRI runtime installed successfully"

sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION"
sudo apt-get update -y
sudo apt-get install -y jq
sudo apt-get install -y ipvsadm

local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')"
cat > /etc/default/kubelet << EOF
KUBELET_EXTRA_ARGS=--node-ip=$local_ip
${ENVIRONMENT}
EOF
13 changes: 13 additions & 0 deletions cicd/k8s-calico-ipvs/node_scripts/loxilb.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/')

apt-get update
apt-get install -y software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce
docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest --ka=out
echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc
echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc

echo $LOXILB_IP > /vagrant/loxilb-ip
65 changes: 65 additions & 0 deletions cicd/k8s-calico-ipvs/node_scripts/master.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash
#
# Setup for Control Plane (Master) servers

set -euxo pipefail

NODENAME=$(hostname -s)

sudo kubeadm config images pull

echo "Preflight Check Passed: Downloaded All Required Images"

#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap
sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml

mkdir -p "$HOME"/.kube
sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config
sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config

# Save Configs to shared /Vagrant location

# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration.

config_path="/vagrant/configs"

if [ -d $config_path ]; then
rm -f $config_path/*
else
mkdir -p $config_path
fi

cp -i /etc/kubernetes/admin.conf $config_path/config
touch $config_path/join.sh
chmod +x $config_path/join.sh

kubeadm token create --print-join-command > $config_path/join.sh

# Install Calico Network Plugin

curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O

kubectl apply -f calico.yaml

sudo -i -u vagrant bash << EOF
whoami
mkdir -p /home/vagrant/.kube
sudo cp -i $config_path/config /home/vagrant/.kube/
sudo chown 1000:1000 /home/vagrant/.kube/config
EOF

# Install Metrics Server

kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml

# Install loxilb
kubectl apply -f /vagrant/yaml/kube-loxilb.yml

#Install routes for pod to client (fullnat service) in nodes
sudo ip route add 9.9.9.9 via 192.168.80.9
sudo ip route add 123.123.123.1 via 192.168.80.9
sudo ip route add 124.124.124.1 via 192.168.80.9
sudo ip route add 125.125.125.1 via 192.168.80.9

#Install routes for pod to client (default service) in nodes
sudo ip route add 192.168.90.0/24 via 192.168.80.9
27 changes: 27 additions & 0 deletions cicd/k8s-calico-ipvs/node_scripts/worker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash
#
# Setup for Node servers

set -euxo pipefail

config_path="/vagrant/configs"

/bin/bash $config_path/join.sh -v

sudo -i -u vagrant bash << EOF
whoami
mkdir -p /home/vagrant/.kube
sudo cp -i $config_path/config /home/vagrant/.kube/
sudo chown 1000:1000 /home/vagrant/.kube/config
NODENAME=$(hostname -s)
kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker
EOF

#Install routes for pod to client (fullnat service) in nodes
sudo ip route add 123.123.123.1 via 192.168.80.9
sudo ip route add 124.124.124.1 via 192.168.80.9
sudo ip route add 125.125.125.1 via 192.168.80.9
sudo ip route add 9.9.9.9 via 192.168.80.9

#Install routes for pod to client (default service) in nodes
sudo ip route add 192.168.90.0/24 via 192.168.80.9
Loading

0 comments on commit 31d1ba8

Please sign in to comment.