From 708d4a2c19b22f6e10f1fdbb90938224cbb96e6f Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Mon, 9 Sep 2024 17:37:49 +0900 Subject: [PATCH 01/34] gh-87 : sctpmh with seagull test suite cicd added --- .github/workflows/sctpmh-seagull-sanity.yml | 36 ++ cicd/common.sh | 2 +- cicd/docker-k3s-calico/common.sh | 2 +- cicd/docker-k3s-cilium/common.sh | 2 +- cicd/k0s-incluster/common.sh | 2 +- cicd/microk8s-incluster/common.sh | 4 +- .../.vagrant/bundler/global.sol | 1 + .../.vagrant/rgloader/loader.rb | 9 + cicd/sctpmh-seagull/Vagrantfile | 27 + cicd/sctpmh-seagull/bastion.sh | 12 + cicd/sctpmh-seagull/check_ha.sh | 134 ++++ cicd/sctpmh-seagull/common.sh | 572 ++++++++++++++++++ cicd/sctpmh-seagull/config.sh | 23 + cicd/sctpmh-seagull/rmconfig.sh | 3 + cicd/sctpmh-seagull/rmsetup.sh | 35 ++ cicd/sctpmh-seagull/setup.sh | 121 ++++ cicd/sctpmh-seagull/validation.sh | 56 ++ cicd/sctpmh-seagull/validation1.sh | 67 ++ cicd/sctpmh-seagull/validation2.sh | 114 ++++ cicd/sctpmh-seagull/validation3.sh | 171 ++++++ cicd/sctpmh-seagull/validation4.sh | 172 ++++++ cicd/sctpmh-seagull/validation5.sh | 168 +++++ cicd/sctpmh-seagull/validation6.sh | 175 ++++++ 23 files changed, 1902 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/sctpmh-seagull-sanity.yml create mode 100644 cicd/sctpmh-seagull/.vagrant/bundler/global.sol create mode 100644 cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb create mode 100644 cicd/sctpmh-seagull/Vagrantfile create mode 100644 cicd/sctpmh-seagull/bastion.sh create mode 100644 cicd/sctpmh-seagull/check_ha.sh create mode 100644 cicd/sctpmh-seagull/common.sh create mode 100755 cicd/sctpmh-seagull/config.sh create mode 100755 cicd/sctpmh-seagull/rmconfig.sh create mode 100755 cicd/sctpmh-seagull/rmsetup.sh create mode 100755 cicd/sctpmh-seagull/setup.sh create mode 100755 cicd/sctpmh-seagull/validation.sh create mode 100755 cicd/sctpmh-seagull/validation1.sh create mode 100755 cicd/sctpmh-seagull/validation2.sh create mode 100755 cicd/sctpmh-seagull/validation3.sh create mode 100755 cicd/sctpmh-seagull/validation4.sh create mode 100755 cicd/sctpmh-seagull/validation5.sh create mode 100755 cicd/sctpmh-seagull/validation6.sh diff --git a/.github/workflows/sctpmh-seagull-sanity.yml b/.github/workflows/sctpmh-seagull-sanity.yml new file mode 100644 index 000000000..4e49da07a --- /dev/null +++ b/.github/workflows/sctpmh-seagull-sanity.yml @@ -0,0 +1,36 @@ +name: SCTP-MH-LB-Seagull-Sanity-CI +on: + # schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'sctpmh-seagull-sanity' +jobs: + test-runner: + name: k8s-calico-incluster-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/sctpmh-seagull + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/sctpmh-seagull || true + ./rmconfig.sh + cd - diff --git a/cicd/common.sh b/cicd/common.sh index 619a1c36b..b96e2271f 100644 --- a/cicd/common.sh +++ b/cicd/common.sh @@ -561,7 +561,7 @@ function create_lb_rule() { return fi - hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 diff --git a/cicd/docker-k3s-calico/common.sh b/cicd/docker-k3s-calico/common.sh index ea1196029..1931a2309 100644 --- a/cicd/docker-k3s-calico/common.sh +++ b/cicd/docker-k3s-calico/common.sh @@ -536,7 +536,7 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 diff --git a/cicd/docker-k3s-cilium/common.sh b/cicd/docker-k3s-cilium/common.sh index ea1196029..1931a2309 100644 --- a/cicd/docker-k3s-cilium/common.sh +++ b/cicd/docker-k3s-cilium/common.sh @@ -536,7 +536,7 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 diff --git a/cicd/k0s-incluster/common.sh b/cicd/k0s-incluster/common.sh index ea1196029..1931a2309 100755 --- a/cicd/k0s-incluster/common.sh +++ b/cicd/k0s-incluster/common.sh @@ -536,7 +536,7 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 diff --git a/cicd/microk8s-incluster/common.sh b/cicd/microk8s-incluster/common.sh index c65963cd2..1931a2309 100755 --- a/cicd/microk8s-incluster/common.sh +++ b/cicd/microk8s-incluster/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/sctpmh-seagull/.vagrant/bundler/global.sol b/cicd/sctpmh-seagull/.vagrant/bundler/global.sol new file mode 100644 index 000000000..072d3ec90 --- /dev/null +++ b/cicd/sctpmh-seagull/.vagrant/bundler/global.sol @@ -0,0 +1 @@ +{"dependencies":[["vagrant-disksize",["= 0.1.3"]],["net-ssh",[">= 2.6.5","< 8.0.0"]],["net-scp",[">= 1.1"]],["log4r",["~> 1.1"]],["vagrant-scp",["= 0.5.9"]]],"checksum":"d41cdc087dc2595e62da764647bfcacc91965875ce15159c44cdee684a184f69","vagrant_version":"2.3.7"} \ No newline at end of file diff --git a/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb b/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb new file mode 100644 index 000000000..c3c05b095 --- /dev/null +++ b/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb @@ -0,0 +1,9 @@ +# This file loads the proper rgloader/loader.rb file that comes packaged +# with Vagrant so that encoded files can properly run with Vagrant. + +if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"] + require File.expand_path( + "rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]) +else + raise "Encoded files can't be read outside of the Vagrant installer." +end diff --git a/cicd/sctpmh-seagull/Vagrantfile b/cicd/sctpmh-seagull/Vagrantfile new file mode 100644 index 000000000..f1d438ff8 --- /dev/null +++ b/cicd/sctpmh-seagull/Vagrantfile @@ -0,0 +1,27 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +#box_name = (ENV['VAGRANT_BOX'] || "ubuntu/focal64") +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "bastion" do |bastion| + bastion.vm.hostname = 'bastion' + #bastion.vm.network :private_network, ip: "4.0.5.3", :netmask => "255.255.255.0" + bastion.vm.network :private_network, ip: "4.0.4.3", :netmask => "255.255.255.0" + bastion.vm.provision :shell, :path => "bastion.sh" + bastion.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8196] + vbox.customize ["modifyvm", :id, "--cpus", 12] + #vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + #vbox.customize ["modifyvm", :id, "--nicpromisc3", "allow-all"] + end + end +end diff --git a/cicd/sctpmh-seagull/bastion.sh b/cicd/sctpmh-seagull/bastion.sh new file mode 100644 index 000000000..dc9a4a3a2 --- /dev/null +++ b/cicd/sctpmh-seagull/bastion.sh @@ -0,0 +1,12 @@ +apt-get update +apt-get install -y software-properties-common curl wget lksctp-tools jq +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +echo "blacklist sctp" >> /etc/modprobe.d/blacklist.conf +echo "install sctp /bin/false" >> /etc/modprobe.d/blacklist.conf + +echo "Rebooting Now!" +reboot +" diff --git a/cicd/sctpmh-seagull/check_ha.sh b/cicd/sctpmh-seagull/check_ha.sh new file mode 100644 index 000000000..f06e85953 --- /dev/null +++ b/cicd/sctpmh-seagull/check_ha.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +master="llb1" +backup="llb2" + +function check_ha() { + while : ; do + status1=$($hexec llb1 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') + status2=$($hexec llb2 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') + count=0 + if [[ $status1 == "MASTER" && $status2 == "BACKUP" ]]; + then + master="llb1" + backup="llb2" + break + elif [[ $status2 == "MASTER" && $status1 == "BACKUP" ]]; + then + master="llb2" + backup="llb1" + break + else + count=$(( $count + 1 )) + if [[ $count -ge 20 ]]; then + echo "KA llb1-$status1, llb2-$status2 [NOK]" >&2 + exit 1; + fi + sleep 5 + fi + done +} + +function checkSync() { + count=1 + sync=0 + while [[ $count -le 5 ]] ; do + echo -e "\nStatus at MASTER:$master\n" >&2 + #$dexec $master loxicmd get ct | grep est >&2 + ct=`$dexec $master loxicmd get ct | grep est` + echo "${ct//'\n'/$'\n'}" >&2 + + echo -e "\nStatus at BACKUP:$backup\n" >&2 + ct=`$dexec $backup loxicmd get ct | grep est` + echo "${ct//'\n'/$'\n'}" >&2 + + nres1=$($hexec $master curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + nres2=$($hexec $backup curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + + if [[ $nres1 == 0 ]]; then + echo -e "No active connections in Master:$master. Exiting!" >&2 + return 2 + fi + + if [[ $nres1 == $nres2 && $nres1 != 0 ]]; then + echo -e "\nConnections sync successful!!!\n" >&2 + sync=1 + break; + fi + echo -e "\nConnections sync pending.. Let's wait a little more..\n" >&2 + count=$(( $count + 1 )) + sleep 2 + done + + if [[ $sync == 0 ]]; then + echo -e "\nConnection Sync failed\n" >&2 + return 0 + fi + echo "$sync" +} + +function restart_mloxilb() { + if [[ $master == "llb1" ]]; then + pat="cluster=172.17.0.3" + copts=" --cluster=172.17.0.3" + self=" --self=0" + ka=" --ka=172.17.0.3:172.17.0.2" + else + pat="cluster=172.17.0.2" + copts=" --cluster=172.17.0.2" + self=" --self=1" + ka=" --ka=172.17.0.2:172.17.0.3" + fi + echo "Restarting MASTER: $master" + pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) + echo "Killing $pid" >&2 + docker exec -dt $master kill -9 $pid + docker exec -dt $master ip link del llb0 + docker exec -dt $master nohup /root/loxilb-io/loxilb/loxilb $copts $self $ka > /dev/null & + pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 +} + +function restart_loxilbs() { + if [[ $master == "llb1" ]]; then + mpat="cluster=172.17.0.3" + mcopts=" --cluster=172.17.0.3" + mself=" --self=0" + mka=" --ka=172.17.0.3:172.17.0.2" + + bpat="cluster=172.17.0.2" + bcopts=" --cluster=172.17.0.2" + bself=" --self=1" + bka=" --ka=172.17.0.2:172.17.0.3" + else + mpat="cluster=172.17.0.2" + mcopts=" --cluster=172.17.0.2" + mself=" --self=1" + mka=" --ka=172.17.0.2:172.17.0.3" + + bpat="cluster=172.17.0.3" + bcopts=" --cluster=172.17.0.3" + bself=" --self=0" + bka=" --ka=172.17.0.3:172.17.0.2" + fi + echo "Restarting $master" + pid=$(docker exec -i $master ps -aef | grep $mpat | xargs | cut -d ' ' -f 2) + echo "Killing $mpid" >&2 + docker exec -dt $master kill -9 $pid + docker exec -dt $master ip link del llb0 + docker exec -dt $master nohup /root/loxilb-io/loxilb/loxilb $mcopts $mself $mka > /dev/null & + pid=$(docker exec -i $master ps -aef | grep $mpat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 + + echo "Restarting $backup" + pid=$(docker exec -i $backup ps -aef | grep $bpat | xargs | cut -d ' ' -f 2) + echo "Killing $pid" >&2 + docker exec -dt $backup kill -9 $pid + docker exec -dt $backup ip link del llb0 + docker exec -dt $backup nohup /root/loxilb-io/loxilb/loxilb $bcopts $bself $bka > /dev/null & + pid=$(docker exec -i $backup ps -aef | grep $bpat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 + +} + + diff --git a/cicd/sctpmh-seagull/common.sh b/cicd/sctpmh-seagull/common.sh new file mode 100644 index 000000000..c5a28ac5f --- /dev/null +++ b/cicd/sctpmh-seagull/common.sh @@ -0,0 +1,572 @@ +#!/bin/bash + +if [[ "$1" == "init" ]]; then + pull_dockers +fi + +hn="netns" +pid="" +vrn="/var/run/" +hexec="sudo ip netns exec " +dexec="sudo docker exec -i " +hns="sudo ip netns " +hexist="$vrn$hn" +lxdocker="ghcr.io/loxilb-io/loxilb:latest" +hostdocker="ghcr.io/loxilb-io/nettest:latest" +cluster_opts="" +extra_opts="" +ka_opts="" +#var=$(lsb_release -r | cut -f2) +#if [[ $var == *"22.04"* ]];then +# lxdocker="ghcr.io/loxilb-io/loxilb:latestu22" +#fi + +loxilbs=() + +## Given a docker name(arg1), return its pid +get_docker_pid() { + id=`docker ps -f name=$1| grep -w $1 | cut -d " " -f 1 | grep -iv "CONTAINER"` + pid=`docker inspect -f '{{.State.Pid}}' $id` +} + +## Pull all necessary dockers for testbed +pull_dockers() { + ## loxilb docker + docker pull $lxdocker + ## Host docker + docker pull docker pull $hostdocker + ## BGP host docker + docker pull ewindisch/quagga +} + +## Creates a docker host +## arg1 - "loxilb"|"host" +## arg2 - instance-name +spawn_docker_host() { + POSITIONAL_ARGS=() + local bpath + local kpath + local ka + local bgp + while [[ $# -gt 0 ]]; do + case "$1" in + -t | --dock-type ) + dtype="$2" + shift 2 + ;; + -d | --dock-name ) + dname="$2" + shift 2 + ;; + -b | --with-bgp ) + if [[ "$2" == "yes" ]]; then + bgp=$2 + fi + shift 2 + ;; + -c | --bgp-config ) + bpath="$2" + bgp="yes" + shift 2 + ;; + -k | --with-ka ) + ka="in" + if [[ "$2" == "out" ]]; then + ka=$2 + fi + shift 2 + ;; + -d | --ka-config ) + kpath="$2" + if [[ -z ${ka+x} ]]; then + ka="in" + fi + shift 2 + ;; + -e | --extra-args) + extra_opts="$2" + shift 2 + ;; + -*|--*) + echo "Unknown option $1" + exit + ;; + esac + done + set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + echo "Spawning $dname($dtype)" >&2 + if [[ "$dtype" == "loxilb" ]]; then + loxilbs+=("$dname") + if [[ "$pick_config" == "yes" ]]; then + echo "$dname will pick config from $(pwd)/${dname}_config" + loxilb_config="-v $(pwd)/${dname}_config:/etc/loxilb/" + fi + if [[ "$bgp" == "yes" ]]; then + bgp_opts="-b" + if [[ ! -z "$bpath" ]]; then + bgp_conf="-v $bpath:/etc/gobgp/" + fi + fi + if [[ ! -z ${ka+x} ]]; then + sudo mkdir -p /etc/shared/$dname/ + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log -v /etc/shared/$dname:/etc/shared $loxilb_config --name $dname $lxdocker + get_llb_peerIP $dname + docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $ka_opts $extra_opts + else + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log $loxilb_config --name $dname $lxdocker $bgp_opts + docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $extra_opts + fi + elif [[ "$dtype" == "host" ]]; then + if [[ ! -z "$bpath" ]]; then + bgp_conf="--volume $bpath:/etc/quagga" + fi + if [[ "$bgp" == "yes" || ! -z "$bpath" ]]; then + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit $bgp_conf --name $dname ewindisch/quagga + else + docker run -u root --cap-add SYS_ADMIN -dit --name $dname $hostdocker + fi + elif [[ "$dtype" == "seahost" ]]; then + docker run -u root --cap-add SYS_ADMIN -i -t --rm --detach --entrypoint /bin/bash --name $dname ghcr.io/loxilb-io/seagull:ubuntu1804 + docker exec -dit $dname ifconfig eth0 0 + fi + + pid="" + + sleep 2 + get_docker_pid $dname + echo $pid + if [ ! -f "$hexist/$dname" -a "$pid" != "" ]; then + sudo mkdir -p /var/run/netns + sudo touch /var/run/netns/$dname + #echo "sudo mount -o bind /proc/$pid/ns/net /var/run/netns/$2" + sudo mount -o bind /proc/$pid/ns/net /var/run/netns/$dname + fi + + $hexec $dname ifconfig lo up + $hexec $dname sysctl net.ipv6.conf.all.disable_ipv6=1 2>&1 >> /dev/null + #$hexec $dname sysctl net.ipv4.conf.all.arp_accept=1 2>&1 >> /dev/null + if [ -f /proc/sys/net/ipv4/conf/eth0/arp_ignore ]; then + $hexec $dname sysctl net.ipv4.conf.eth0.arp_ignore=2 2>&1 >> /dev/null + fi +} + +## Get loxilb peer docker IP +get_llb_peerIP() { + if [[ "$1" == "llb1" ]]; then + llb1IP=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' llb1) + if [[ "lb$llb1IP" == "lb" ]];then + llb2IP="172.17.0.3" + else + read A B C D <<<"${llb1IP//./ }" + llb2IP="$A.$B.$C.$((D+1))" + fi + cluster_opts=" --cluster=$llb2IP --self=0" + ka_opts=" --ka=$llb2IP:$llb1IP" + elif [[ "$1" == "llb2" ]]; then + llb2IP=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' llb2) + if [[ "lb$llb2IP" == "lb" ]];then + llb1IP="172.17.0.2" + else + read A B C D <<<"${llb2IP//./ }" + llb1IP="$A.$B.$C.$((D-1))" + fi + cluster_opts=" --cluster=$llb1IP --self=1" + ka_opts=" --ka=$llb1IP:$llb2IP" + fi +} + +## Deletes a docker host +## arg1 - hostname +delete_docker_host() { + dcmd="kill" + if [[ $1 == "llb"* ]] || [[ $1 == "loxilb"* ]]; then + dcmd="stop" + fi + if docker $dcmd $1 2>&1 >> /dev/null + then + hd="true" + ka=`docker ps -f name=ka_$1| grep -w ka_$1 | cut -d " " -f 1 | grep -iv "CONTAINER"` + loxilbs=( "${loxilbs[@]/$1}" ) + if [ "$ka" != "" ]; then + docker kill ka_$1 2>&1 >> /dev/null + docker rm ka_$1 2>&1 >> /dev/null + fi + fi + if [ -f "$hexist/$1" ]; then + $hns del $1 + sudo rm -fr "$hexist/$1" 2>&1 >> /dev/null + fi + docker rm $1 2>&1 >> /dev/null || true +} + +## Connects two docker hosts +## arg1 - hostname1 +## arg2 - hostname2 +## arg3 - mtu +connect_docker_hosts() { + link1=e$1$2 + link2=e$2$1 + + mtu="9000" + if [[ $# -gt 2 ]]; then + mtu=$3 + fi + + #echo $link1 $link2 + sudo ip -n $1 link add $link1 type veth peer name $link2 netns $2 + sudo ip -n $1 link set $link1 mtu $mtu up + sudo ip -n $2 link set $link2 mtu $mtu up +} + +## arg1 - hostname1 +## arg2 - hostname2 +disconnect_docker_hosts() { + link1=e$1$2 + link2=e$2$1 + # echo $link1 $link2 + if [ -f "$hexist/$1" ]; then + ifexist1=`sudo ip -n $1 link show $link1 | grep -w $link1` + if [ "chk$ifexist1" != "chk" ]; then + sudo ip -n $1 link set $link1 down 2>&1 >> /dev/null + sudo ip -n $1 link del $link1 2>&1 >> /dev/null + fi + fi + + if [ -f "$hexist/$2" ]; then + ifexist2=`sudo ip -n $2 link show | grep -w $link2` + if [ "chk$ifexist2" != "chk" ]; then + sudo ip -n $2 link set $link2 down 2>&1 >> /dev/null + sudo ip -n $2 link del $link2 2>&1 >> /dev/null + fi + fi +} + +## arg1 - hostname1 +## arg2 - hostname2 +## arg3 - ip_addr +## arg4 - gw +config_docker_host() { + POSITIONAL_ARGS=() + while [[ $# -gt 0 ]]; do + case $1 in + --host1) + local h1="$2" + shift + shift + ;; + --host2) + local h2="$2" + shift + shift + ;; + --ptype) + local ptype="$2" + shift + shift + ;; + --id) + local xid="$2" + shift + shift + ;; + --addr) + local addr="$2" + shift + shift + ;; + --gw) + local gw="$2" + shift + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac + done + set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + + link1=e$h1$h2 + link2=e$h2$h1 + #echo "$h1:$link1->$h2:$link2" + + #if [[ -n "${loxilbs[$h1]}" && "$pick_config" == "yes" ]]; then + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$h1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + + if [[ "$ptype" == "phy" ]]; then + sudo ip -n $h1 addr add $addr dev $link1 + elif [[ "$ptype" == "vlan" ]]; then + sudo ip -n $h1 addr add $addr dev vlan$xid + elif [[ "$ptype" == "vxlan" ]]; then + sudo ip -n $h1 addr add $addr dev vxlan$xid + elif [[ "$ptype" == "trunk" ]]; then + trunk="bond$xid" + sudo ip -n $h1 link set $link1 down + sudo ip -n $h1 link add $trunk type bond + sudo ip -n $h1 link set $link1 master $trunk + sudo ip -n $h1 link set $link1 up + sudo ip -n $h1 link set $trunk up + + sudo ip -n $h2 link set $link2 down + sudo ip -n $h2 link add $trunk type bond + sudo ip -n $h2 link set $link2 master $trunk + sudo ip -n $h2 link set $link2 up + sudo ip -n $h2 link set $trunk up + + sudo ip -n $h1 addr add $addr dev bond$xid + if [[ "$gw" != "" ]]; then + sudo ip -n $h2 addr add $gw/24 dev bond$xid + sudo ip -n $h1 route add default via $gw proto static + fi + else + echo "Check port-type" + fi + + if [[ "$gw" != "" ]]; then + sudo ip -n $h1 route del default 2>&1 >> /dev/null + sudo ip -n $h1 route add default via $gw + fi +} + +## arg1 - hostname1 +## arg2 - hostname2 +## arg3 - vlan +## arg4 - tagged/untagged +create_docker_host_vlan() { + local addr="" + POSITIONAL_ARGS=() + while [[ $# -gt 0 ]]; do + case $1 in + --host1) + local h1="$2" + shift + shift + ;; + --host2) + local h2="$2" + shift + shift + ;; + --ptype) + local ptype="$2" + shift + shift + ;; + --id) + local vid="$2" + shift + shift + ;; + --addr) + addr="$2" + shift + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac + done + + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$h1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + + set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + link1=e$h1$h2 + link2=e$h2$h1 + + #echo "$h1:$link1->$h2:$link2" + + if [[ "$ptype" == "tagged" ]]; then + brport="$link1.$vid" + sudo ip -n $h1 link add link $link1 name $brport type vlan id $vid + sudo ip -n $h1 link set $brport up + else + brport=$link1 + fi + + sudo ip -n $h1 link add vlan$vid type bridge 2>&1 | true + sudo ip -n $h1 link set $brport master vlan$vid + sudo ip -n $h1 link set vlan$vid up + if [[ "$addr" != "" ]]; then + sudo ip -n $h1 addr add $addr dev vlan$vid + fi +} + +## arg1 - hostname1 +## arg2 - hostname2 +## arg3 - vxlan-id +## arg4 - phy/vlan +## arg5 - local ip if arg4 is phy/vlan-id if arg4 is vlan +## arg6 - local ip if arg4 is vlan +create_docker_host_vxlan() { + POSITIONAL_ARGS=() + while [[ $# -gt 0 ]]; do + case $1 in + --host1) + local h1="$2" + shift + shift + ;; + --host2) + local h2="$2" + shift + shift + ;; + --uif) + local uifType="$2" + shift + shift + ;; + --vid) + local vid="$2" + shift + shift + ;; + --pvid) + local pvid="$2" + shift + shift + ;; + --id) + local vxid="$2" + shift + shift + ;; + --ep) + local ep="$2" + shift + shift + ;; + --lip) + local lip="$2" + shift + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac + done + + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$h1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + + set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + link1=e$h1$h2 + link2=e$h2$h1 + + #echo "$h1:$link1->$h2:$link2" + + if [[ "$uifType" == "phy" ]]; then + sudo ip -n $h1 link add vxlan$vxid type vxlan id $vxid local $lip dev $link1 dstport 0 + sudo ip -n $h1 link set vxlan$vxid up + elif [[ "$uifType" == "vlan" ]]; then + sudo ip -n $h1 link add vxlan$vxid type vxlan id $vxid local $lip dev vlan$vid dstport 0 + sudo ip -n $h1 link set vxlan$vxid up + fi + + if [[ "$pvid" != "" ]]; then + sudo ip -n $h1 link add vlan$pvid type bridge 2>&1 | true + sudo ip -n $h1 link set vxlan$vxid master vlan$pvid + sudo ip -n $h1 link set vlan$pvid up + fi + + if [[ "$ep" != "" ]]; then + sudo bridge -n $h1 fdb append 00:00:00:00:00:00 dst $ep dev vxlan$vxid + fi + +} + +## arg1 - hostname1 +## arg2 - hostname2 +create_docker_host_cnbridge() { + POSITIONAL_ARGS=() + while [[ $# -gt 0 ]]; do + case $1 in + --host1) + local h1="$2" + shift + shift + ;; + --host2) + local h2="$2" + shift + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac + done + + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$h1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + + set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + link1=e$h1$h2 + link2=e$h2$h1 + + #echo "$h1:$link1->$h2:$link2" + + brport=$link1 + + sudo ip -n $h1 link add br$h1 type bridge 2>&1 | true + sudo ip -n $h1 link set $brport master br$h1 + sudo ip -n $h1 link set br$h1 up +} + +#Arg1: host name +#Arg2: --:: +#Arg3: --endpoints::,.. +function create_lb_rule() { + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + args=( "$@" ) + args=( "${args[@]/$1}" ) + echo "$1: loxicmd create lb ${args[*]}" + $dexec $1 loxicmd create lb ${args[*]} + + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then + echo "ERROR : No hook point found"; + exit 1 + fi +} + +#Arg1: host name +#Arg2: +#Arg3: +function add_route() { + if [[ ${loxilbs[*]} =~ (^|[[:space:]])$1($|[[:space:]]) && "$pick_config" == "yes" ]]; then + return + fi + echo "$1: ip route add $2 via $3 proto static" + $hexec $1 ip route add $2 via $3 proto static +} diff --git a/cicd/sctpmh-seagull/config.sh b/cicd/sctpmh-seagull/config.sh new file mode 100755 index 000000000..bc09793b2 --- /dev/null +++ b/cicd/sctpmh-seagull/config.sh @@ -0,0 +1,23 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up + +for i in {1..60} +do + ping 4.0.4.3 -c 1 -W 1 2>&1> /dev/null; + if [[ $? -eq 0 ]] + then + echo -e "Machine rebooted [OK]" + code=0 + break + else + echo -e "Waiting for machine to be UP" + sleep 1 + fi +done +if [[ $code == 0 ]]; +then + vagrant ssh bastion -c 'sudo /vagrant/setup.sh' +else + echo "VM not up" +fi diff --git a/cicd/sctpmh-seagull/rmconfig.sh b/cicd/sctpmh-seagull/rmconfig.sh new file mode 100755 index 000000000..88da3a557 --- /dev/null +++ b/cicd/sctpmh-seagull/rmconfig.sh @@ -0,0 +1,3 @@ +#!/bin/bash +vagrant destroy -f bastion +sudo rm -rf status*.txt diff --git a/cicd/sctpmh-seagull/rmsetup.sh b/cicd/sctpmh-seagull/rmsetup.sh new file mode 100755 index 000000000..a2bf9aaeb --- /dev/null +++ b/cicd/sctpmh-seagull/rmsetup.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +echo "#########################################" +echo "Removing testbed" +echo "#########################################" + +source /vagrant/common.sh + +disconnect_docker_hosts user r1 +disconnect_docker_hosts user r2 +disconnect_docker_hosts r1 sw1 +disconnect_docker_hosts r2 sw1 +disconnect_docker_hosts sw1 llb1 +disconnect_docker_hosts sw1 llb2 +disconnect_docker_hosts llb1 sw2 +disconnect_docker_hosts llb2 sw2 +disconnect_docker_hosts sw2 r3 +disconnect_docker_hosts sw2 r4 +disconnect_docker_hosts r3 ep1 +disconnect_docker_hosts r4 ep1 + +delete_docker_host user +delete_docker_host llb1 +delete_docker_host llb2 +delete_docker_host r1 +delete_docker_host r2 +delete_docker_host r3 +delete_docker_host r4 +delete_docker_host sw1 +delete_docker_host sw2 +delete_docker_host ep1 + +echo "#########################################" +echo "Removed testbed" +echo "#########################################" diff --git a/cicd/sctpmh-seagull/setup.sh b/cicd/sctpmh-seagull/setup.sh new file mode 100755 index 000000000..a19ff84d0 --- /dev/null +++ b/cicd/sctpmh-seagull/setup.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +source /vagrant/common.sh + +echo "#########################################" +echo "Spawning all hosts" +echo "#########################################" + +spawn_docker_host --dock-type loxilb --dock-name llb1 --with-ka in +spawn_docker_host --dock-type loxilb --dock-name llb2 --with-ka in +spawn_docker_host --dock-type seahost --dock-name ep1 +spawn_docker_host --dock-type host --dock-name r1 +spawn_docker_host --dock-type host --dock-name r2 +spawn_docker_host --dock-type host --dock-name r3 +spawn_docker_host --dock-type host --dock-name r4 +spawn_docker_host --dock-type host --dock-name sw1 +spawn_docker_host --dock-type host --dock-name sw2 +spawn_docker_host --dock-type seahost --dock-name user + +echo "#########################################" +echo "Connecting and configuring hosts" +echo "#########################################" + +connect_docker_hosts user r1 +connect_docker_hosts user r2 +connect_docker_hosts r1 sw1 +connect_docker_hosts r2 sw1 +connect_docker_hosts sw1 llb1 +connect_docker_hosts sw1 llb2 +connect_docker_hosts llb1 sw2 +connect_docker_hosts llb2 sw2 +connect_docker_hosts sw2 r3 +connect_docker_hosts sw2 r4 +connect_docker_hosts r3 ep1 +connect_docker_hosts r4 ep1 + +create_docker_host_cnbridge --host1 sw1 --host2 llb1 +create_docker_host_cnbridge --host1 sw1 --host2 llb2 +create_docker_host_cnbridge --host1 sw1 --host2 r1 +create_docker_host_cnbridge --host1 sw1 --host2 r2 + +create_docker_host_cnbridge --host1 sw2 --host2 llb1 +create_docker_host_cnbridge --host1 sw2 --host2 llb2 +create_docker_host_cnbridge --host1 sw2 --host2 r3 +create_docker_host_cnbridge --host1 sw2 --host2 r4 + +#node1 config +config_docker_host --host1 user --host2 r1 --ptype phy --addr 1.1.1.1/24 --gw 1.1.1.254 +config_docker_host --host1 user --host2 r2 --ptype phy --addr 2.2.2.1/24 +config_docker_host --host1 r1 --host2 user --ptype phy --addr 1.1.1.254/24 +config_docker_host --host1 r2 --host2 user --ptype phy --addr 2.2.2.254/24 + +create_docker_host_vlan --host1 llb1 --host2 sw1 --id 11 --ptype untagged +create_docker_host_vlan --host1 llb2 --host2 sw1 --id 11 --ptype untagged +create_docker_host_vlan --host1 r1 --host2 sw1 --id 11 --ptype untagged +create_docker_host_vlan --host1 r2 --host2 sw1 --id 11 --ptype untagged +config_docker_host --host1 r1 --host2 sw1 --ptype vlan --id 11 --addr 11.11.11.253/24 --gw 11.11.11.11 +config_docker_host --host1 r2 --host2 sw1 --ptype vlan --id 11 --addr 11.11.11.254/24 --gw 11.11.11.11 +config_docker_host --host1 llb1 --host2 sw1 --ptype vlan --id 11 --addr 11.11.11.1/24 +config_docker_host --host1 llb2 --host2 sw1 --ptype vlan --id 11 --addr 11.11.11.2/24 + +create_docker_host_vlan --host1 llb1 --host2 sw2 --id 10 --ptype untagged +create_docker_host_vlan --host1 llb2 --host2 sw2 --id 10 --ptype untagged +create_docker_host_vlan --host1 r3 --host2 sw2 --id 10 --ptype untagged +create_docker_host_vlan --host1 r4 --host2 sw2 --id 10 --ptype untagged + +config_docker_host --host1 r3 --host2 sw2 --ptype vlan --id 10 --addr 10.10.10.253/24 --gw 10.10.10.10 +config_docker_host --host1 r4 --host2 sw2 --ptype vlan --id 10 --addr 10.10.10.254/24 --gw 10.10.10.10 +config_docker_host --host1 llb1 --host2 sw2 --ptype vlan --id 10 --addr 10.10.10.1/24 +config_docker_host --host1 llb2 --host2 sw2 --ptype vlan --id 10 --addr 10.10.10.2/24 + +config_docker_host --host1 ep1 --host2 r3 --ptype phy --addr 31.31.31.1/24 --gw 31.31.31.254 +config_docker_host --host1 ep1 --host2 r4 --ptype phy --addr 32.32.32.1/24 +config_docker_host --host1 r3 --host2 ep1 --ptype phy --addr 31.31.31.254/24 +config_docker_host --host1 r4 --host2 ep1 --ptype phy --addr 32.32.32.254/24 + +$hexec user ip route change default via 1.1.1.254 +$hexec ep1 ip route change default via 31.31.31.254 + +# Backup paths in user +$hexec user ip route add 21.21.21.1/32 via 2.2.2.254 +$hexec user ip route add 134.134.134.1/32 via 2.2.2.254 + +$hexec ep1 ip route add 134.134.134.1/32 via 32.32.32.254 +$hexec ep1 ip route add 135.135.135.1/32 via 31.31.31.254 + +$hexec llb1 ip route add 1.1.1.0/24 via 11.11.11.253 +$hexec llb1 ip route add 2.2.2.0/24 via 11.11.11.254 +$hexec llb2 ip route add 1.1.1.0/24 via 11.11.11.253 +$hexec llb2 ip route add 2.2.2.0/24 via 11.11.11.254 + +$hexec llb1 ip route add 31.31.31.0/24 via 10.10.10.253 +$hexec llb1 ip route add 32.32.32.0/24 via 10.10.10.254 +$hexec llb2 ip route add 31.31.31.0/24 via 10.10.10.253 +$hexec llb2 ip route add 32.32.32.0/24 via 10.10.10.254 + +sleep 20 +##Create LB rule user->ep1 +create_lb_rule llb1 20.20.20.1 --name=sctpmh1 --secips=21.21.21.1,22.22.22.1 --sctp=2020:8080 --endpoints=31.31.31.1:1 --mode=fullnat +create_lb_rule llb2 20.20.20.1 --name=sctpmh1 --secips=21.21.21.1,22.22.22.1 --sctp=2020:8080 --endpoints=31.31.31.1:1 --mode=fullnat + +##Create LB rule ep1->user +create_lb_rule llb1 133.133.133.1 --name=sctpmh2 --secips=134.134.134.1,135.135.135.1 --sctp=2020:8080 --endpoints=1.1.1.1:1 --mode=fullnat +create_lb_rule llb2 133.133.133.1 --name=sctpmh2 --secips=134.134.134.1,135.135.135.1 --sctp=2020:8080 --endpoints=1.1.1.1:1 --mode=fullnat + +$dexec llb1 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_8080 --probetype=ping +$dexec llb1 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_8080 --probetype=ping +$dexec llb2 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_8080 --probetype=ping +$dexec llb2 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_8080 --probetype=ping + + +create_lb_rule llb1 11.11.11.11 --tcp=80:8080 --endpoints=31.31.31.1:1 +create_lb_rule llb2 11.11.11.11 --tcp=80:8080 --endpoints=31.31.31.1:1 +create_lb_rule llb1 10.10.10.10 --tcp=80:8080 --endpoints=31.31.31.1:1 +create_lb_rule llb2 10.10.10.10 --tcp=80:8080 --endpoints=31.31.31.1:1 + +$dexec llb1 loxicmd save --all +$dexec llb2 loxicmd save --all + +$hexec user ifconfig eth0 0 +$hexec ep1 ifconfig eth0 0 diff --git a/cicd/sctpmh-seagull/validation.sh b/cicd/sctpmh-seagull/validation.sh new file mode 100755 index 000000000..25b73e7b1 --- /dev/null +++ b/cicd/sctpmh-seagull/validation.sh @@ -0,0 +1,56 @@ +#!/bin/bash +code=0 +tc=( "Basic Test - Client & EP Uni-homed and LB is Multi-homed" "Multipath Test, Client and LB Multihomed, EP is uni-homed" "C2LB Multipath Failover Test - Client and LB Multihomed, EP is uni-homed" "E2E Multipath Failover Test - Client, LB and EP all Multihomed" "C2LB HA Failover Test - Client and LB Multihomed, EP is uni-homed" "E2E HA Failover Test. Client, LB and EP all Multihomed" ) +padding="............................................................................................................." +border="**************************************************************************************************************************************************" + +for((j=0,i=1; i<=6; i++, j++)); do + echo "SCTP Multihoming - Test case #$i" + echo -e "\n\n\n$border\n" + cmd="sudo /vagrant/validation$i.sh" + vagrant ssh bastion -c "$cmd" + echo -e "\n\n" + file=status$i.txt + status=`cat $file` + title=${tc[j]} + echo -e "\n\n" + + if [[ $status == "NOK" ]]; then + code=1 + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[FAILED]"; + else + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[PASSED]"; + fi + echo -e "\n\n\n$border\n\n" + + sleep 30 +done + +echo -e "\n\n\n$border\n" +printf "================================================== SCTP MULTIHOMING CONSOLIDATED RESULT ==========================================================\n" +for((j=0,i=1; i<=6; i++, j++)); do + file=status$i.txt + status=`cat $file` + title=${tc[j]} + echo -e "\n\n" + + if [[ $status == "NOK" ]]; then + code=1 + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[FAILED]"; + else + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[PASSED]"; + fi +done + +echo -e "\n$border" + +echo -e "\n\n\n$border\n" +if [[ $code == 0 ]]; then + echo -e "SCTP Multihoming CICD [OK]" +else + echo -e "SCTP Multihoming CICD [NOK]" +fi +echo -e "\n$border\n" + + +exit $code diff --git a/cicd/sctpmh-seagull/validation1.sh b/cicd/sctpmh-seagull/validation1.sh new file mode 100755 index 000000000..20e0454f8 --- /dev/null +++ b/cicd/sctpmh-seagull/validation1.sh @@ -0,0 +1,67 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh + +echo -e "sctpmh: SCTP Multihoming Basic Test - Client & EP Uni-homed and LB is Multi-homed\n" +extIP="20.20.20.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 40 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > ep1.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 25 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > user.out' 2>&1 > /dev/null & + +sleep 2 + +for((i=0;i<5;i++)) do + $dexec user bash -c 'tail -n 25 /opt/seagull/diameter-env/run/user.out' + res=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + $dexec $master loxicmd get ct --servName=sctpmh1 + echo -e "\n" + sleep 5 +done + +if [ "$res" -gt "0" ]; then + #echo -e $res + echo -e "\nsctpmh SCTP Multihoming service Basic Test [OK]\n" + echo "OK" > /vagrant/status1.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status1.txt + echo "sctpmh SCTP Multihoming service Basic Test [NOK]" + echo "Calls : $res" + ## Dump some debug info + echo "system route-info" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo "llb1 bpf-info" + $dexec llb1 ntc filter show dev eth0 ingress + echo "BFP trace -- " + sudo timeout 5 cat /sys/kernel/debug/tracing/trace_pipe + sudo killall -9 cat + echo "BFP trace -- " + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh-seagull/validation2.sh b/cicd/sctpmh-seagull/validation2.sh new file mode 100755 index 000000000..f001a497f --- /dev/null +++ b/cicd/sctpmh-seagull/validation2.sh @@ -0,0 +1,114 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh +echo -e "sctpmh: SCTP Multihoming - Multipath Test, Client and LB Multihomed, EP is uni-homed\n" +extIP="20.20.20.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" + +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 40 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > ep1.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 25 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > user.out' 2>&1 > /dev/null & + +sleep 2 +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +call_old=0 +call_new=0 + +for((i=0;i<5;i++)) do + + $dexec user bash -c 'tail -n 25 /opt/seagull/diameter-env/run/user.out' + call_new=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + echo -e "\n\n" + $dexec $master loxicmd get ct --servName=sctpmh1 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "20.20.20.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "21.21.21.1 | 2.2.2.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "22.22.22.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + + echo "Counters: $p1c_new $p2c_new $p3c_new" + + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [ACTIVE]" + p1=1 + else + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [NOT ACTIVE]" + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [NOT ACTIVE]" + fi + if [[ $call_new -gt $call_old ]]; then + echo "\nSuccessful Calls: \t$call_new [ACTIVE]" + calls=1 + else + echo "\nSuccessful Calls: \t$call_new [NOT ACTIVE]" + fi + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + call_old=$call_new + echo -e "\n" + sleep 5 +done + +if [[ $p1 == 1 && $p2 == 1 && $p3 == 1 && $calls == 1 ]]; then + echo "sctpmh SCTP Multihoming Multipath [OK]" + echo "OK" > /vagrant/status2.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status2.txt + echo "sctpmh SCTP Multihoming Multipath [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo "-----------------------------" + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh-seagull/validation3.sh b/cicd/sctpmh-seagull/validation3.sh new file mode 100755 index 000000000..5a31136d9 --- /dev/null +++ b/cicd/sctpmh-seagull/validation3.sh @@ -0,0 +1,171 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh + +echo -e "sctpmh: SCTP Multihoming - Multipath Failover Test. Client and LB Multihomed, EP is uni-homed\n" +extIP="20.20.20.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" + +echo -e "\nTraffic Flow: User -> LB -> EP " + +sudo docker exec -dt user ksh -c "sed -i 's/\"call-rate\" value=\"5000\"/\"call-rate\" value=\"100\"/g' /opt/seagull/diameter-env/config/conf.client.xml" + +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 220 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > ep1.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 210 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > user.out' 2>&1 > /dev/null & + +sleep 30 +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +down=0 +code=0 +call_old=0 +call_new=0 +fail_old=0 +fail_new=0 +recover=0 +frecover=0 + +for((i=0;i<35;i++)) do + $dexec user bash -c 'tail -n 25 /opt/seagull/diameter-env/run/user.out' + call_new=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + fail_new=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Failed calls"'| xargs | cut -d '|' -f 4) + echo -e "\n\n" + $dexec $master loxicmd get ct --servName=sctpmh1 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "20.20.20.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "21.21.21.1 | 2.2.2.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "22.22.22.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + + echo "Counters: $p1c_new $p2c_new $p3c_new" + + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [ACTIVE]" + p1=1 + #if [[ $down == 1 ]]; then + #echo "This path shouldn't be ACTIVE" + #code=1 + #fi + echo -e "Turning off this path from User->LB\n" + $hexec user ip link set euserr1 down; + down=1 + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "20.20.20.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + else + if [[ $down == 1 ]]; then + p1dok=1 + echo "Path 1: 1.1.1.1 -> 20.20.20.1 NOT ACTIVE - [OK]" + else + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [NOT ACTIVE]" + fi + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [NOT ACTIVE]" + fi + + echo -e "\n" + if [[ $recover == 1 ]]; then + printf "\t***Setup Recovered***" + fi + echo -e "\n\n" + + if [[ $fail_new -gt $fail_old && $down == 1 && $recover == 0 ]]; then + printf "Failed Calls: \t%10s \t[INCREASING]\n" $fail_new + fstart=1 + code=1 + calls=0 + else + if [[ $fail_new -eq $fail_old ]]; then + if [[ $down == 1 && $fstart == 1 ]]; then + printf "Failed Calls: \t%10s \t[STABLE]\n" $fail_new + frecover=1 + code=0 + else + printf "Failed Calls: \t%10s\n" $fail_new + fi + fi + fi + + if [[ $call_new -gt $call_old ]]; then + printf "Successful Calls: \t%10s \t[ACTIVE]\n" $call_new + calls=1 + if [[ $down == 1 && $frecover == 1 ]]; then + recover=1 + fi + else + printf "Successful Calls: \t%10s \t[NOT ACTIVE]\n" $call_new + fi + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + call_old=$call_new + fail_old=$fail_new + echo -e "\n" + sleep 5 +done + +#Restore +$hexec user ip link set euserr1 up +$hexec user ip route add default via 1.1.1.254 +sudo docker exec -dt user ksh -c "sed -i 's/\"call-rate\" value=\"100\"/\"call-rate\" value=\"5000\"/g' /opt/seagull/diameter-env/config/conf.client.xml" + +if [[ $calls == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $recover == 1 ]]; then + echo "sctpmh SCTP Multihoming Multipath Failover [OK]" + echo "OK" > /vagrant/status3.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status3.txt + echo "sctpmh SCTP Multihoming Multipath Failover [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + echo "-----------------------------" + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh-seagull/validation4.sh b/cicd/sctpmh-seagull/validation4.sh new file mode 100755 index 000000000..0bf76671f --- /dev/null +++ b/cicd/sctpmh-seagull/validation4.sh @@ -0,0 +1,172 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh +echo -e "sctpmh: SCTP Multihoming - E2E Multipath Failover Test. Client, LB and EP all Multihomed\n" +extIP="133.133.133.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" +echo -e "\nTraffic Flow: EP ---> LB ---> User" + +sudo docker exec -dt user ksh -c 'sed -i 's/source=31.31.31.1/source=0.0.0.0/g' /opt/seagull/diameter-env/config/conf.server.xml' + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 220 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > user.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt ep1 ksh -c "sed -i 's/\"call-rate\" value=\"5000\"/\"call-rate\" value=\"100\"/g' /opt/seagull/diameter-env/config/conf.client.xml" +sudo docker exec -dt ep1 ksh -c 'sed -i 's/dest=20.20.20.1/dest=133.133.133.1/g' /opt/seagull/diameter-env/config/conf.client.xml' +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 210 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > ep1.out' 2>&1 > /dev/null & + +sleep 20 + +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +down=0 +code=0 +call_old=0 +call_new=0 +fail_old=0 +fail_new=0 +recover=0 +frecover=0 +calls=0 +for((i=0;i<35;i++)) do + $dexec ep1 bash -c 'tail -n 25 /opt/seagull/diameter-env/run/ep1.out' + call_new=$(sudo docker exec -t ep1 bash -c 'tail -n 10 /opt/seagull/diameter-env/run/ep1.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + fail_new=$(sudo docker exec -t ep1 bash -c 'tail -n 10 /opt/seagull/diameter-env/run/ep1.out | grep "Failed calls"'| xargs | cut -d '|' -f 4) + echo -e "\n" + $dexec $master loxicmd get ct --servName=sctpmh2 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "133.133.133.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "134.134.134.1 | 32.32.32.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "135.135.135.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + + echo "Counters: $p1c_new $p2c_new $p3c_new" + + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [ACTIVE]" + p1=1 + echo -e "Turning off this path at User.\nEP----->LB--x-->User" + $hexec user ip link set euserr1 down; + down=1 + else + if [[ $down == 1 ]]; then + p1dok=1 + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 NOT ACTIVE - [OK]" + else + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + + echo -e "\n" + if [[ $recover == 1 ]]; then + printf "\t***Setup Recovered***" + fi + echo -e "\n\n" + + if [[ $fail_new -gt $fail_old && $down == 1 && $recover == 0 ]]; then + printf "Failed Calls: \t%10s \t[INCREASING]\n" $fail_new + fstart=1 + code=1 + calls=0 + else + if [[ $fail_new -eq $fail_old ]]; then + if [[ $down == 1 && $fstart == 1 ]]; then + printf "Failed Calls: \t%10s \t[STABLE]\n" $fail_new + frecover=1 + code=0 + else + printf "Failed Calls: \t%10s\n" $fail_new + fi + fi + fi + + if [[ $call_new -gt $call_old ]]; then + printf "Successful Calls: \t%10s \t[ACTIVE]\n" $call_new + calls=1 + if [[ $down == 1 && $frecover == 1 ]]; then + recover=1 + fi + else + printf "Successful Calls: \t%10s \t[NOT ACTIVE]\n" $call_new + fi + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + call_old=$call_new + fail_old=$fail_new + echo -e "\n" + sleep 5 +done + +#sudo rm -rf *.out +#sudo pkill sctp_test + +#Restore +$hexec user ip link set euserr1 up +$hexec user ip route add default via 1.1.1.254 +sudo docker exec -dt user ksh -c 'sed -i 's/source=0.0.0.0/source=31.31.31.1/g' /opt/seagull/diameter-env/config/conf.server.xml' +sudo docker exec -dt ep1 ksh -c 'sed -i 's/dest=133.133.133.1/dest=20.20.20.1/g' /opt/seagull/diameter-env/config/conf.client.xml' +sudo docker exec -dt ep1 ksh -c "sed -i 's/\"call-rate\" value=\"100\"/\"call-rate\" value=\"5000\"/g' /opt/seagull/diameter-env/config/conf.client.xml" + +if [[ $calls == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $recover == 1 ]]; then + echo "sctpmh SCTP Multihoming E2E Multipath Failover [OK]" + echo "OK" > /vagrant/status4.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status4.txt + echo "sctpmh SCTP Multihoming E2E Multipath Failover [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo "-----------------------------" + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh-seagull/validation5.sh b/cicd/sctpmh-seagull/validation5.sh new file mode 100755 index 000000000..8bb74733d --- /dev/null +++ b/cicd/sctpmh-seagull/validation5.sh @@ -0,0 +1,168 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh + +echo -e "sctpmh: SCTP Multihoming - C2LB HA Failover Test. Client and LB Multihomed, EP is uni-homed\n" +extIP="20.20.20.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" + +echo -e "\nTraffic Flow: User -> LB -> EP " + +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 120 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > ep1.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 100 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > user.out' 2>&1 > /dev/null & + +sleep 20 + +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +checkha=0 +hadone=0 +code=0 +nsyncOk=0 +frecover=1 + +for((i=0;i<15;i++)) do + syncOk=$nsyncOk + if [[ $checkha == 1 ]]; then + check_ha + echo -e "\nHA state Master:$master BACKUP-$backup\n" + nsyncOk=$(checkSync) + if [[ $nsyncOk == 2 ]]; then #No active connections in Master, no need to continue. + break; + fi + fi + $dexec user bash -c 'tail -n 25 /opt/seagull/diameter-env/run/user.out' + call_new=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + fail_new=$(sudo docker exec -t user bash -c 'tail -n 10 /opt/seagull/diameter-env/run/user.out | grep "Failed calls"'| xargs | cut -d '|' -f 4) + + $dexec $master loxicmd get ct --servName=sctpmh1 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "20.20.20.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "21.21.21.1 | 2.2.2.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh1 | grep "22.22.22.1 | 1.1.1.1" | xargs | cut -d '|' -f 10) + + echo "Counters: $p1c_new $p2c_new $p3c_new" + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [ACTIVE]" + p1=1 + else + echo "Path 1: 1.1.1.1 -> 20.20.20.1 [NOT ACTIVE]" + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 2.2.2.1 -> 21.21.21.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 1.1.1.1 -> 22.22.22.1 [NOT ACTIVE]" + fi + + echo -e "\n" + if [[ $recover == 1 ]]; then + printf "\t***Setup Recovered***" + fi + echo -e "\n\n" + + if [[ $fail_new -gt $fail_old && $hadone == 1 && $recover == 0 ]]; then + echo -e "Failed Calls: \t\t$fail_new \t\t[INCREASING]" + fstart=1 + frecover=0 + calls=0 + code=1 + else + if [[ $fail_new -eq $fail_old ]]; then + if [[ $hadone == 1 ]]; then + printf "Failed Calls: \t%10s \t[STABLE]\n" $fail_new + frecover=1 + code=0 + else + printf "Failed Calls: \t%10s\n" $fail_new + fi + + fi + fi + + if [[ $call_new -gt $call_old ]]; then + printf "Successful Calls: \t%10s \t[ACTIVE]\n" $call_new + calls=1 + if [[ $hadone == 1 && $frecover == 1 ]]; then + recover=1 + fi + else + printf "Successful Calls: \t%10s \t[NOT ACTIVE]\n" $call_new + fi + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + call_old=$call_new + fail_old=$fail_new + + if [[ $hadone == 0 ]]; then + nsyncOk=$(checkSync) + if [[ $nsyncOk == 1 ]]; then + restart_mloxilb + checkha=1 + hadone=1 + calls=0 + fi + fi + sleep 5 +done + +if [[ $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $nsyncOk == 1 && $recover == 1 ]]; then + echo "sctpmh SCTP Multihoming C2LB HA Failover [OK]" + echo "OK" > /vagrant/status5.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status5.txt + echo "sctpmh SCTP Multihoming C2LB HA Failover [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + echo "-----------------------------" + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh-seagull/validation6.sh b/cicd/sctpmh-seagull/validation6.sh new file mode 100755 index 000000000..768d6a485 --- /dev/null +++ b/cicd/sctpmh-seagull/validation6.sh @@ -0,0 +1,175 @@ +#!/bin/bash +source /vagrant/common.sh +source /vagrant/check_ha.sh + +echo -e "sctpmh: SCTP Multihoming - E2E HA Failover Test. Client, LB Multihomed and EP all multi-homed\n" +extIP="133.133.133.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" + +echo -e "\nTraffic Flow: EP ---> LB ---> User" + +sudo docker exec -dt user ksh -c 'sed -i 's/source=31.31.31.1/source=0.0.0.0/g' /opt/seagull/diameter-env/config/conf.server.xml' + +sudo docker exec -dt user ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 220 stdbuf -oL seagull -conf ../config/conf.server.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.server.xml > user.out' 2>&1 > /dev/null & +sleep 2 + +sudo docker exec -dt ep1 ksh -c 'sed -i 's/dest=20.20.20.1/dest=133.133.133.1/g' /opt/seagull/diameter-env/config/conf.client.xml' +sudo docker exec -dt ep1 ksh -c 'export LD_PRELOAD=/usr/local/bin/libsctplib.so.1.0.8; export LD_LIBRARY_PATH=/usr/local/bin; cd /opt/seagull/diameter-env/run/; timeout 200 stdbuf -oL seagull -conf ../config/conf.client.xml -dico ../config/base_s6a.xml -scen ../scenario/ulr-ula.client.xml > ep1.out' 2>&1 > /dev/null & + + +sleep 20 + +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +checkha=0 +hadone=0 +code=0 +nsyncOk=0 +frecover=1 + +for((i=0;i<15;i++)) do + syncOk=$nsyncOk + if [[ $checkha == 1 ]]; then + check_ha + echo -e "\nHA state Master:$master BACKUP-$backup\n" + nsyncOk=$(checkSync) + if [[ $nsyncOk == 2 ]]; then #No active connections in Master, no need to continue. + break; + fi + fi + $dexec ep1 bash -c 'tail -n 25 /opt/seagull/diameter-env/run/ep1.out' + call_new=$(sudo docker exec -t ep1 bash -c 'tail -n 10 /opt/seagull/diameter-env/run/ep1.out | grep "Successful calls"'| xargs | cut -d '|' -f 4) + fail_new=$(sudo docker exec -t ep1 bash -c 'tail -n 10 /opt/seagull/diameter-env/run/ep1.out | grep "Failed calls"'| xargs | cut -d '|' -f 4) + echo -e "\n" + $dexec $master loxicmd get ct --servName=sctpmh2 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "133.133.133.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "134.134.134.1 | 32.32.32.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "135.135.135.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + + + echo "Counters: $p1c_new $p2c_new $p3c_new" + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [ACTIVE]" + p1=1 + else + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + echo -e "\n" + if [[ $recover == 1 ]]; then + printf "\t***Setup Recovered***" + fi + echo -e "\n\n" + + + if [[ $fail_new -gt $fail_old && $hadone == 1 && $recover == 0 ]]; then + echo -e "Failed Calls: \t\t$fail_new \t\t[INCREASING]" + fstart=1 + frecover=0 + calls=0 + code=1 + else + if [[ $fail_new -eq $fail_old ]]; then + if [[ $hadone == 1 ]]; then + printf "Failed Calls: \t%10s \t[STABLE]\n" $fail_new + frecover=1 + code=0 + else + printf "Failed Calls: \t%10s\n" $fail_new + fi + fi + fi + + if [[ $call_new -gt $call_old ]]; then + printf "Successful Calls: \t%10s \t[ACTIVE]\n" $call_new + calls=1 + if [[ $hadone == 1 && $frecover == 1 ]]; then + recover=1 + fi + else + printf "Successful Calls: \t%10s \t[NOT ACTIVE]\n" $call_new + fi + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + call_old=$call_new + fail_old=$fail_new + + if [[ $hadone == 0 ]]; then + nsyncOk=$(checkSync) + if [[ $nsyncOk == 1 ]]; then + restart_mloxilb + checkha=1 + hadone=1 + fi + fi + sleep 5 +done + +#Revert +sudo docker exec -dt user ksh -c 'sed -i 's/source=0.0.0.0/source=31.31.31.1/g' /opt/seagull/diameter-env/config/conf.server.xml' +sudo docker exec -dt ep1 ksh -c 'sed -i 's/dest=133.133.133.1/dest=20.20.20.1/g' /opt/seagull/diameter-env/config/conf.client.xml' + +if [[ $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $nsyncOk == 1 && $recover == 1 ]]; then + echo "sctpmh SCTP Multihoming E2E HA Failover [OK]" + echo "OK" > /vagrant/status6.txt + restart_loxilbs +else + echo "NOK" > /vagrant/status6.txt + echo "sctpmh SCTP Multihoming E2E HA Failover [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + echo "-----------------------------" + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" From 3c905f51da1560006f7c1cd69923653ba2a20c0d Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Tue, 10 Sep 2024 17:39:42 +0900 Subject: [PATCH 02/34] gh-87 : sctpmh with sctp_test test suite cicd updated --- cicd/sctpmh/check_ha.sh | 165 ++++++++++++++++++++++++++----------- cicd/sctpmh/config.sh | 43 +++++----- cicd/sctpmh/validation.sh | 83 +++++++++++-------- cicd/sctpmh/validation1.sh | 9 +- cicd/sctpmh/validation2.sh | 6 +- cicd/sctpmh/validation3.sh | 6 +- cicd/sctpmh/validation4.sh | 8 +- cicd/sctpmh/validation5.sh | 38 +++------ cicd/sctpmh/validation6.sh | 140 +++++++++++++++++++++++++++++++ 9 files changed, 363 insertions(+), 135 deletions(-) create mode 100755 cicd/sctpmh/validation6.sh diff --git a/cicd/sctpmh/check_ha.sh b/cicd/sctpmh/check_ha.sh index b04922194..b31574fb0 100644 --- a/cicd/sctpmh/check_ha.sh +++ b/cicd/sctpmh/check_ha.sh @@ -4,62 +4,131 @@ master="llb1" backup="llb2" function check_ha() { -while : ; do - status1=$($hexec llb1 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') - status2=$($hexec llb2 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') - count=0 - if [[ $status1 == "MASTER" && $status2 == "BACKUP" ]]; - then - master="llb1" - backup="llb2" - break - elif [[ $status2 == "MASTER" && $status1 == "BACKUP" ]]; - then - master="llb2" - backup="llb1" - break - else - count=$(( $count + 1 )) - if [[ $count -ge 20 ]]; then + while : ; do + status1=$($hexec llb1 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') + status2=$($hexec llb2 curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/cistate/all' -H 'accept: application/json' | jq -r '.Attr[0].state') + count=0 + if [[ $status1 == "MASTER" && $status2 == "BACKUP" ]]; + then + master="llb1" + backup="llb2" + break + elif [[ $status2 == "MASTER" && $status1 == "BACKUP" ]]; + then + master="llb2" + backup="llb1" + break + else + count=$(( $count + 1 )) + if [[ $count -ge 20 ]]; then + echo "KA llb1-$status1, llb2-$status2 [NOK] - Exiting" >&2 + exit 1; + fi echo "KA llb1-$status1, llb2-$status2 [NOK]" >&2 - exit 1; + sleep 5 fi - sleep 5 - fi -done + done } function checkSync() { -count=1 -sync=0 -while [[ $count -le 5 ]] ; do -echo -e "\nStatus at MASTER:$master\n" >&2 -$dexec $master loxicmd get ct | grep est >&2 + count=1 + sync=0 + while [[ $count -le 5 ]] ; do + echo -e "\nStatus at MASTER:$master\n" >&2 + ct=`$dexec $master loxicmd get ct | grep est` + echo "${ct//'\n'/$'\n'}" >&2 -echo -e "\nStatus at BACKUP:$backup\n" >&2 -$dexec $backup loxicmd get ct | grep est >&2 + echo -e "\nStatus at BACKUP:$backup\n" >&2 + ct=`$dexec $backup loxicmd get ct | grep est` + echo "${ct//'\n'/$'\n'}" >&2 -nres1=$($hexec $master curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) -nres2=$($hexec $backup curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + nres1=$($hexec $master curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + nres2=$($hexec $backup curl -sX 'GET' 'http://0.0.0.0:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) -if [[ $nres1 == 0 ]]; then - echo -e "No active connections in Master:$master. Exiting!" >&2 - return 2 -fi + if [[ $nres1 == 0 ]]; then + echo -e "No active connections in Master:$master. Exiting!" >&2 + return 2 + fi -if [[ $nres1 == $nres2 && $nres1 != 0 ]]; then - echo -e "\nConnections sync successful!!!\n" >&2 - sync=1 - break; -fi -echo -e "\nConnections sync pending.. Let's wait a little more..\n" >&2 -count=$(( $count + 1 )) -sleep 2 -done + if [[ $nres1 == $nres2 && $nres1 != 0 ]]; then + echo -e "\nConnections sync successful!!!\n" >&2 + sync=1 + break; + fi + echo -e "\nConnections sync pending.. Let's wait a little more..\n" >&2 + count=$(( $count + 1 )) + sleep 2 + done -if [[ $sync == 0 ]]; then - echo -e "\nConnection Sync failed\n" >&2 - return 0 -fi -echo "$sync" + if [[ $sync == 0 ]]; then + echo -e "\nConnection Sync failed\n" >&2 + return 0 + fi + echo "$sync" } + +function restart_mloxilb() { + if [[ $master == "llb1" ]]; then + pat="cluster=172.17.0.3" + copts=" --cluster=172.17.0.3" + self=" --self=0" + ka=" --ka=172.17.0.3:172.17.0.2" + else + pat="cluster=172.17.0.2" + copts=" --cluster=172.17.0.2" + self=" --self=1" + ka=" --ka=172.17.0.2:172.17.0.3" + fi + echo "Restarting MASTER: $master" + pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) + echo "Killing $pid" >&2 + docker exec -dt $master kill -9 $pid + docker exec -dt $master ip link del llb0 + docker exec -dt $master nohup /root/loxilb-io/loxilb/loxilb $copts $self $ka > /dev/null & + pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 +} + +function restart_loxilbs() { + if [[ $master == "llb1" ]]; then + mpat="cluster=172.17.0.3" + mcopts=" --cluster=172.17.0.3" + mself=" --self=0" + mka=" --ka=172.17.0.3:172.17.0.2" + + bpat="cluster=172.17.0.2" + bcopts=" --cluster=172.17.0.2" + bself=" --self=1" + bka=" --ka=172.17.0.2:172.17.0.3" + else + mpat="cluster=172.17.0.2" + mcopts=" --cluster=172.17.0.2" + mself=" --self=1" + mka=" --ka=172.17.0.2:172.17.0.3" + + bpat="cluster=172.17.0.3" + bcopts=" --cluster=172.17.0.3" + bself=" --self=0" + bka=" --ka=172.17.0.3:172.17.0.2" + fi + echo "Restarting $master" + pid=$(docker exec -i $master ps -aef | grep $mpat | xargs | cut -d ' ' -f 2) + echo "Killing $mpid" >&2 + docker exec -dt $master kill -9 $pid + docker exec -dt $master ip link del llb0 + docker exec -dt $master nohup /root/loxilb-io/loxilb/loxilb $mcopts $mself $mka > /dev/null & + pid=$(docker exec -i $master ps -aef | grep $mpat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 + + echo "Restarting $backup" + pid=$(docker exec -i $backup ps -aef | grep $bpat | xargs | cut -d ' ' -f 2) + echo "Killing $pid" >&2 + docker exec -dt $backup kill -9 $pid + docker exec -dt $backup ip link del llb0 + docker exec -dt $backup nohup /root/loxilb-io/loxilb/loxilb $bcopts $bself $bka > /dev/null & + pid=$(docker exec -i $backup ps -aef | grep $bpat | xargs | cut -d ' ' -f 2) + echo "New loxilb pid: $pid" >&2 + +} + + diff --git a/cicd/sctpmh/config.sh b/cicd/sctpmh/config.sh index 411d4e5d4..2436deedb 100755 --- a/cicd/sctpmh/config.sh +++ b/cicd/sctpmh/config.sh @@ -21,18 +21,18 @@ echo "#########################################" echo "Connecting and configuring hosts" echo "#########################################" -connect_docker_hosts user r1 -connect_docker_hosts user r2 -connect_docker_hosts r1 sw1 -connect_docker_hosts r2 sw1 -connect_docker_hosts sw1 llb1 -connect_docker_hosts sw1 llb2 -connect_docker_hosts llb1 sw2 -connect_docker_hosts llb2 sw2 -connect_docker_hosts sw2 r3 -connect_docker_hosts sw2 r4 -connect_docker_hosts r3 ep1 -connect_docker_hosts r4 ep1 +connect_docker_hosts user r1 1500 +connect_docker_hosts user r2 1500 +connect_docker_hosts r1 sw1 1500 +connect_docker_hosts r2 sw1 1500 +connect_docker_hosts sw1 llb1 1500 +connect_docker_hosts sw1 llb2 1500 +connect_docker_hosts llb1 sw2 1500 +connect_docker_hosts llb2 sw2 1500 +connect_docker_hosts sw2 r3 1500 +connect_docker_hosts sw2 r4 1500 +connect_docker_hosts r3 ep1 1500 +connect_docker_hosts r4 ep1 1500 create_docker_host_cnbridge --host1 sw1 --host2 llb1 create_docker_host_cnbridge --host1 sw1 --host2 llb2 @@ -79,14 +79,15 @@ $hexec ep1 ip route change default via 31.31.31.254 # Backup paths in user $hexec user ip route add 124.124.124.1/32 via 2.2.2.254 -$hexec user ip route add 125.125.125.1/32 via 2.2.2.254 +#$hexec user ip route add 125.125.125.1/32 via 2.2.2.254 +#$hexec user ip route add 32.32.32.1/32 via 2.2.2.254 $hexec user ip route add 134.134.134.1/32 via 2.2.2.254 -$hexec user ip route add 135.135.135.1/32 via 2.2.2.254 +#$hexec user ip route add 135.135.135.1/32 via 2.2.2.254 -$hexec ep1 ip route add 124.124.124.1/32 via 32.32.32.254 -$hexec ep1 ip route add 125.125.125.1/32 via 32.32.32.254 +#$hexec ep1 ip route add 124.124.124.1/32 via 32.32.32.254 +#$hexec ep1 ip route add 125.125.125.1/32 via 31.31.31.254 $hexec ep1 ip route add 134.134.134.1/32 via 32.32.32.254 -$hexec ep1 ip route add 135.135.135.1/32 via 32.32.32.254 +$hexec ep1 ip route add 135.135.135.1/32 via 31.31.31.254 $hexec llb1 ip route add 1.1.1.0/24 via 11.11.11.253 $hexec llb1 ip route add 2.2.2.0/24 via 11.11.11.254 @@ -107,10 +108,10 @@ create_lb_rule llb2 123.123.123.1 --name=sctpmh1 --secips=124.124.124.1,125.125. create_lb_rule llb1 133.133.133.1 --name=sctpmh2 --secips=134.134.134.1,135.135.135.1 --sctp=2020:9999 --endpoints=1.1.1.1:1 --mode=fullnat create_lb_rule llb2 133.133.133.1 --name=sctpmh2 --secips=134.134.134.1,135.135.135.1 --sctp=2020:9999 --endpoints=1.1.1.1:1 --mode=fullnat -$dexec llb1 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_9999 --probetype=none -$dexec llb1 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_9999 --probetype=none -$dexec llb2 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_9999 --probetype=none -$dexec llb2 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_9999 --probetype=none +$dexec llb1 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_8080 --probetype=ping +$dexec llb1 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_8080 --probetype=ping +$dexec llb2 loxicmd create ep 1.1.1.1 --name=1.1.1.1_sctp_8080 --probetype=ping +$dexec llb2 loxicmd create ep 31.31.31.1 --name=31.31.31.1_sctp_8080 --probetype=ping create_lb_rule llb1 11.11.11.11 --tcp=80:8080 --endpoints=31.31.31.1:1 diff --git a/cicd/sctpmh/validation.sh b/cicd/sctpmh/validation.sh index 34cd2749d..8a62cfff2 100755 --- a/cicd/sctpmh/validation.sh +++ b/cicd/sctpmh/validation.sh @@ -1,40 +1,55 @@ #!/bin/bash code=0 -echo "SCTP Multihoming - Test case #1" -echo -e "*********************************************************************************" -./validation1.sh -if [[ $? == 1 ]]; then - code=1 -fi -echo -e "\n\n\nSCTP Multihoming - Test case #2" -echo -e "*********************************************************************************" -./validation2.sh -if [[ $? == 1 ]]; then - code=1 -fi -echo -e "\n\n\nSCTP Multihoming - Test case #3" -echo -e "*********************************************************************************" -./validation3.sh -if [[ $? == 1 ]]; then - code=1 -fi -echo -e "\n\n\nSCTP Multihoming - Test case #4" -echo -e "*********************************************************************************" -./validation4.sh -if [[ $? == 1 ]]; then - code=1 -fi -echo -e "\n\n\nSCTP Multihoming - Test case #5" -echo -e "*********************************************************************************" -sleep 60 -./validation5.sh -if [[ $? == 1 ]]; then - code=1 -fi -echo -e "\n\n\n*********************************************************************************" +tc=( "Basic Test - Client & EP Uni-homed and LB is Multi-homed" "Multipath Test, Client and LB Multihomed, EP is uni-homed" "C2LB Multipath Failover Test - Client and LB Multihomed, EP is uni-homed" "E2E Multipath Failover Test - Client, LB and EP all Multihomed" "C2LB HA Failover Test - Client and LB Multihomed, EP is uni-homed" "E2E HA Failover Test. Client, LB and EP all Multihomed" ) +padding="............................................................................................................." +border="**************************************************************************************************************************************************" + +for((j=0,i=1; i<=6; i++, j++)); do + echo "SCTP Multihoming - Test case #$i" + echo -e "\n\n\n$border\n" + ./validation$i.sh + echo -e "\n\n" + file=status$i.txt + status=`cat $file` + title=${tc[j]} + echo -e "\n\n" + + if [[ $status == "NOK" ]]; then + code=1 + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[FAILED]"; + else + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[PASSED]"; + fi + echo -e "\n\n\n$border\n\n" + + sleep 30 +done + +echo -e "\n\n\n$border\n" +printf "================================================== SCTP MULTIHOMING CONSOLIDATED RESULT ==========================================================\n" +for((j=0,i=1; i<=6; i++, j++)); do + file=status$i.txt + status=`cat $file` + title=${tc[j]} + echo -e "\n\n" + + if [[ $status == "NOK" ]]; then + code=1 + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[FAILED]"; + else + printf "Test case #%2s - %s%s %s\n" "$i" "$title" "${padding:${#title}}" "[PASSED]"; + fi +done + +echo -e "\n$border" + +echo -e "\n\n\n$border\n" if [[ $code == 0 ]]; then - echo -e "\n\n SCTP Multihoming CICD [OK]" + echo -e "SCTP Multihoming with sctp_test CICD [OK]" else - echo -e "\n\n SCTP Multihoming CICD [NOK]" + echo -e "SCTP Multihoming with sctp_test CICD [NOK]" fi +echo -e "\n$border\n" + +sudo rm -rf statu*.txt exit $code diff --git a/cicd/sctpmh/validation1.sh b/cicd/sctpmh/validation1.sh index 6043bf114..f41fffce4 100755 --- a/cicd/sctpmh/validation1.sh +++ b/cicd/sctpmh/validation1.sh @@ -1,10 +1,13 @@ #!/bin/bash source ../common.sh +source check_ha.sh echo -e "sctpmh: SCTP Multihoming Basic Test - Client & EP Uni-homed and LB is Multi-homed\n" extIP="123.123.123.1" port=2020 +check_ha + echo "SCTP Multihoming service sctp-lb -> $extIP:$port" echo -e "------------------------------------------------------------------------------------\n" @@ -26,7 +29,10 @@ sudo pkill sctp_darn if [[ "$res" == "$exp" ]]; then echo $res echo -e "\nsctpmh SCTP Multihoming service Basic Test [OK]\n" + echo "OK" > status1.txt + restart_loxilbs else + echo "NOK" > status1.txt echo "sctpmh SCTP Multihoming service Basic Test [NOK]" echo "Expected : $exp" echo "Received : $res" @@ -51,11 +57,12 @@ else echo "llb1 ep-info" $dexec llb1 loxicmd get ep echo "llb1 bpf-info" - $dexec llb1 ntc filter show dev eth0 ingress + $dexec llb1 tc filter show dev eth0 ingress echo "BFP trace -- " sudo timeout 5 cat /sys/kernel/debug/tracing/trace_pipe sudo killall -9 cat echo "BFP trace -- " + restart_loxilbs exit 1 fi echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh/validation2.sh b/cicd/sctpmh/validation2.sh index c9dbfd192..3829422b0 100755 --- a/cicd/sctpmh/validation2.sh +++ b/cicd/sctpmh/validation2.sh @@ -12,7 +12,7 @@ echo -e "----------------------------------------------------------------------- echo -e "\nHA state Master:$master BACKUP-$backup\n" -$hexec ep1 sctp_test -H 0.0.0.0 -P 9999 -l > ep1.out & +$hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 $hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > user.out & @@ -72,7 +72,10 @@ sudo pkill sctp_test sudo rm *.out if [[ $fin == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 ]]; then echo "sctpmh SCTP Multihoming Multipath [OK]" + echo "OK" > status2.txt + restart_loxilbs else + echo "NOK" > status2.txt echo "sctpmh SCTP Multihoming Multipath [NOK]" echo -e "\nuser" sudo ip netns exec user ip route @@ -99,6 +102,7 @@ else $dexec llb2 loxicmd get lb echo "llb2 ep-info" $dexec llb2 loxicmd get ep + restart_loxilbs exit 1 fi echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh/validation3.sh b/cicd/sctpmh/validation3.sh index 9231b03d0..f2d3a8939 100755 --- a/cicd/sctpmh/validation3.sh +++ b/cicd/sctpmh/validation3.sh @@ -15,7 +15,7 @@ echo -e "\nHA state Master:$master BACKUP-$backup\n" echo -e "\nTraffic Flow: User -> LB -> EP " -$hexec ep1 sctp_test -H 0.0.0.0 -P 9999 -l > ep1.out & +$hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 $hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > user.out & @@ -92,7 +92,10 @@ $hexec user ip route add default via 1.1.1.254 if [[ $fin == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 ]]; then echo "sctpmh SCTP Multihoming Multipath Failover [OK]" + echo "OK" > status3.txt + restart_loxilbs else + echo "NOK" > status3.txt echo "sctpmh SCTP Multihoming Multipath Failover [NOK]" echo -e "\nuser" sudo ip netns exec user ip route @@ -119,6 +122,7 @@ else echo "llb2 ep-info" $dexec llb2 loxicmd get ep echo "-----------------------------" + restart_loxilbs exit 1 fi echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh/validation4.sh b/cicd/sctpmh/validation4.sh index b5ce09f19..93f76e10a 100755 --- a/cicd/sctpmh/validation4.sh +++ b/cicd/sctpmh/validation4.sh @@ -1,7 +1,7 @@ #!/bin/bash source ../common.sh source check_ha.sh -echo -e "sctpmh: SCTP Multihoming - Multipath Failover Test. Client, LB and EP all Multihomed\n" +echo -e "sctpmh: SCTP Multihoming - E2E Multipath Failover Test. Client, LB and EP all Multihomed\n" extIP="133.133.133.1" port=2020 @@ -13,7 +13,7 @@ echo -e "----------------------------------------------------------------------- echo -e "\nHA state Master:$master BACKUP-$backup\n" echo -e "\nTraffic Flow: EP ---> LB ---> User" -$hexec user sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 9999 -l > user.out & +$hexec user sctp_test -H 0.0.0.0 -P 9999 -l > user.out & sleep 2 $hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > ep1.out & @@ -91,7 +91,10 @@ $hexec user ip route add default via 1.1.1.254 if [[ $fin == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 ]]; then echo "sctpmh SCTP Multihoming E2E Multipath Failover [OK]" + echo "OK" > status4.txt + restart_loxilbs else + echo "NOK" > status4.txt echo "sctpmh SCTP Multihoming E2E Multipath Failover [NOK]" echo -e "\nuser" sudo ip netns exec user ip route @@ -118,6 +121,7 @@ else $dexec llb2 loxicmd get lb echo "llb2 ep-info" $dexec llb2 loxicmd get ep + restart_loxilbs exit 1 fi echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh/validation5.sh b/cicd/sctpmh/validation5.sh index a2d52bc61..61d4a40bd 100755 --- a/cicd/sctpmh/validation5.sh +++ b/cicd/sctpmh/validation5.sh @@ -14,11 +14,12 @@ echo -e "----------------------------------------------------------------------- echo -e "\nHA state Master:$master BACKUP-$backup\n" echo -e "\nTraffic Flow: User -> LB -> EP " +sudo pkill sctp_test -$hexec ep1 sctp_test -H 0.0.0.0 -P 9999 -l > ep1.out & +$hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 -$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 100 -x 200000 > user.out & +$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 1400 -x 300000 > user.out & #Path counters p1c_old=0 @@ -32,29 +33,8 @@ hadone=0 code=0 nsyncOk=0 -function restart_mloxilb() { - if [[ $master == "llb1" ]]; then - pat="cluster=172.17.0.3" - copts=" --cluster=172.17.0.3" - self=" --self=0" - ka=" --ka=172.17.0.3:172.17.0.2" - else - pat="cluster=172.17.0.2" - copts=" --cluster=172.17.0.2" - self=" --self=1" - ka=" --ka=172.17.0.2:172.17.0.3" - fi - pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) - echo Killing $pid >&2 - docker exec -dt $master kill -9 $pid - docker exec -dt $master ip link del llb0 - docker exec -dt $master nohup /root/loxilb-io/loxilb/loxilb $copts $self $ka > /dev/null & - pid=$(docker exec -i $master ps -aef | grep $pat | xargs | cut -d ' ' -f 2) - echo "New loxilb pid: $pid" >&2 -} - for((i=0;i<200;i++)) do - fin=`tail -n 100 user.out | grep "Client: Sending packets.(200000/200000)"` + fin=`tail -n 100 user.out | grep "Client: Sending packets.(300000/300000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." @@ -65,7 +45,7 @@ for((i=0;i<200;i++)) do check_ha echo -e "\nHA state Master:$master BACKUP-$backup\n" nsyncOk=$(checkSync) - if [[ $nsyncOk == 2 ]]; then + if [[ $nsyncOk == 2 ]]; then #No active connections in Master, no need to continue. break; fi fi @@ -115,9 +95,12 @@ sudo rm -rf *.out sudo pkill sctp_test if [[ $fin == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $syncOk == 1 ]]; then - echo "sctpmh SCTP Multihoming HA Failover [OK]" + echo "sctpmh SCTP Multihoming C2LB HA Failover [OK]" + echo "OK" > status5.txt + restart_loxilbs else - echo "sctpmh SCTP Multihoming HA Failover [NOK]" + echo "NOK" > status5.txt + echo "sctpmh SCTP Multihoming C2LB HA Failover [NOK]" echo -e "\nuser" sudo ip netns exec user ip route echo -e "\nr1" @@ -143,6 +126,7 @@ else echo "llb2 ep-info" $dexec llb2 loxicmd get ep echo "-----------------------------" + restart_loxilbs exit 1 fi echo -e "------------------------------------------------------------------------------------\n\n\n" diff --git a/cicd/sctpmh/validation6.sh b/cicd/sctpmh/validation6.sh new file mode 100755 index 000000000..bbccecd3a --- /dev/null +++ b/cicd/sctpmh/validation6.sh @@ -0,0 +1,140 @@ +#!/bin/bash +source ../common.sh +source check_ha.sh + +echo -e "sctpmh: SCTP Multihoming - E2E HA Failover Test. Client, LB and EP all Multi-homed\n" +extIP="133.133.133.1" +port=2020 + +check_ha + +echo "SCTP Multihoming service sctp-lb(Multipath traffic) -> $extIP:$port" +echo -e "------------------------------------------------------------------------------------\n" + +echo -e "\nHA state Master:$master BACKUP-$backup\n" + +echo -e "\nTraffic Flow: EP ---> LB ---> User" + +sudo pkill sctp_test + +#$hexec user sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 9999 -l > user.out & +$hexec user sctp_test -H 0.0.0.0 -P 9999 -l > user.out & +sleep 2 + +$hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -m 1400 -x 500000 > ep1.out & + +#Path counters +p1c_old=0 +p1c_new=0 +p2c_old=0 +p2c_new=0 +p3c_old=0 +p3c_new=0 +checkha=0 +hadone=0 +code=0 +nsyncOk=0 + +for((i=0;i<200;i++)) do + fin=`tail -n 100 ep1.out | grep "Client: Sending packets.(500000/500000)"` + if [[ ! -z $fin ]]; then + fin=1 + echo "sctp_test done." + break; + fi + + syncOk=$nsyncOk + if [[ $checkha == 1 ]]; then + check_ha + echo -e "\nHA state Master:$master BACKUP-$backup\n" + nsyncOk=$(checkSync) + if [[ $nsyncOk == 2 ]]; then #No active connections in Master, no need to continue. + break; + fi + fi + echo -e "\n" + $dexec $master loxicmd get ct --servName=sctpmh2 + echo -e "\n" + p1c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "133.133.133.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + p2c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "134.134.134.1 | 32.32.32.1" | xargs | cut -d '|' -f 10) + p3c_new=$(sudo docker exec -i $master loxicmd get ct --servName=sctpmh2 | grep "135.135.135.1 | 31.31.31.1" | xargs | cut -d '|' -f 10) + + + echo "Counters: $p1c_new $p2c_new $p3c_new" + if [[ $p1c_new -gt $p1c_old ]]; then + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [ACTIVE]" + p1=1 + else + echo "Path 1: 31.31.31.1 -> 133.133.133.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + + if [[ $p2c_new -gt $p2c_old ]]; then + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [ACTIVE]" + p2=1 + else + echo "Path 2: 32.32.32.1 -> 134.134.134.1 -> 2.2.2.1 [NOT ACTIVE]" + fi + + if [[ $p3c_new -gt $p3c_old ]]; then + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [ACTIVE]" + p3=1 + else + echo "Path 3: 31.31.31.1 -> 135.135.135.1 -> 1.1.1.1 [NOT ACTIVE]" + fi + echo -e "\n" + + p1c_old=$p1c_new + p2c_old=$p1c_new + p2c_old=$p1c_new + + if [[ $hadone == 0 ]]; then + nsyncOk=$(checkSync) + if [[ $nsyncOk == 1 ]]; then + restart_mloxilb + checkha=1 + hadone=1 + fi + fi + sleep 5 +done + + +sudo rm -rf *.out +sudo pkill sctp_test + +if [[ $fin == 1 && $p1 == 1 && $p2 == 1 && $p3 == 1 && $code == 0 && $nsyncOk == 1 ]]; then + echo "sctpmh SCTP Multihoming E2E HA Failover [OK]" + echo "OK" > status6.txt + restart_loxilbs +else + echo "NOK" > status6.txt + echo "sctpmh SCTP Multihoming E2E HA Failover [NOK]" + echo -e "\nuser" + sudo ip netns exec user ip route + echo -e "\nr1" + sudo ip netns exec r1 ip route + echo -e "\nr2" + sudo ip netns exec r2 ip route + echo -e "\nllb1" + sudo ip netns exec llb1 ip route + echo -e "\nllb2" + sudo ip netns exec llb2 ip route + echo -e "\nr3" + sudo ip netns exec r3 ip route + echo -e "\nr4" + sudo ip netns exec r4 ip route + echo "-----------------------------" + + echo -e "\nllb1 lb-info" + $dexec llb1 loxicmd get lb + echo "llb1 ep-info" + $dexec llb1 loxicmd get ep + echo -e "\nllb2 lb-info" + $dexec llb2 loxicmd get lb + echo "llb2 ep-info" + $dexec llb2 loxicmd get ep + echo "-----------------------------" + restart_loxilbs + exit 1 +fi +echo -e "------------------------------------------------------------------------------------\n\n\n" From b1f225d4e67adeb992276aaf838ea04818cfc852 Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Wed, 11 Sep 2024 11:04:44 +0900 Subject: [PATCH 03/34] gh-87 : sctpmh with sctp_test test suite cicd updated for random size packet test --- cicd/sctpmh-seagull/check_ha.sh | 4 ++-- cicd/sctpmh-seagull/common.sh | 14 +++++++++++--- cicd/sctpmh-seagull/validation.sh | 6 +++--- cicd/sctpmh-seagull/validation1.sh | 2 +- cicd/sctpmh/config.sh | 24 ++++++++++++------------ cicd/sctpmh/validation2.sh | 4 ++-- cicd/sctpmh/validation3.sh | 7 +++++-- cicd/sctpmh/validation4.sh | 6 ++++-- cicd/sctpmh/validation5.sh | 6 +++--- cicd/sctpmh/validation6.sh | 6 +++--- 10 files changed, 46 insertions(+), 33 deletions(-) diff --git a/cicd/sctpmh-seagull/check_ha.sh b/cicd/sctpmh-seagull/check_ha.sh index f06e85953..b31574fb0 100644 --- a/cicd/sctpmh-seagull/check_ha.sh +++ b/cicd/sctpmh-seagull/check_ha.sh @@ -21,9 +21,10 @@ function check_ha() { else count=$(( $count + 1 )) if [[ $count -ge 20 ]]; then - echo "KA llb1-$status1, llb2-$status2 [NOK]" >&2 + echo "KA llb1-$status1, llb2-$status2 [NOK] - Exiting" >&2 exit 1; fi + echo "KA llb1-$status1, llb2-$status2 [NOK]" >&2 sleep 5 fi done @@ -34,7 +35,6 @@ function checkSync() { sync=0 while [[ $count -le 5 ]] ; do echo -e "\nStatus at MASTER:$master\n" >&2 - #$dexec $master loxicmd get ct | grep est >&2 ct=`$dexec $master loxicmd get ct | grep est` echo "${ct//'\n'/$'\n'}" >&2 diff --git a/cicd/sctpmh-seagull/common.sh b/cicd/sctpmh-seagull/common.sh index c5a28ac5f..d11adb0e3 100644 --- a/cicd/sctpmh-seagull/common.sh +++ b/cicd/sctpmh-seagull/common.sh @@ -76,7 +76,7 @@ spawn_docker_host() { fi shift 2 ;; - -d | --ka-config ) + -n | --ka-config ) kpath="$2" if [[ -z ${ka+x} ]]; then ka="in" @@ -87,6 +87,10 @@ spawn_docker_host() { extra_opts="$2" shift 2 ;; + -x | --docker-args) + docker_extra_opts="$2" + shift 2 + ;; -*|--*) echo "Unknown option $1" exit @@ -109,11 +113,11 @@ spawn_docker_host() { fi if [[ ! -z ${ka+x} ]]; then sudo mkdir -p /etc/shared/$dname/ - docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log -v /etc/shared/$dname:/etc/shared $loxilb_config --name $dname $lxdocker + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt $docker_extra_opts --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log -v /etc/shared/$dname:/etc/shared $loxilb_config --name $dname $lxdocker get_llb_peerIP $dname docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $ka_opts $extra_opts else - docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log $loxilb_config --name $dname $lxdocker $bgp_opts + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt $docker_extra_opts --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log $loxilb_config --name $dname $lxdocker $bgp_opts docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $extra_opts fi elif [[ "$dtype" == "host" ]]; then @@ -553,6 +557,10 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} + if [[ ${args[*]} == *"--mode=fullproxy"* ]]; then + return + fi + hook=$($dexec $1 tc filter show dev eth0 ingress | grep tc_packet_func) if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; diff --git a/cicd/sctpmh-seagull/validation.sh b/cicd/sctpmh-seagull/validation.sh index 25b73e7b1..d9f416bb7 100755 --- a/cicd/sctpmh-seagull/validation.sh +++ b/cicd/sctpmh-seagull/validation.sh @@ -46,11 +46,11 @@ echo -e "\n$border" echo -e "\n\n\n$border\n" if [[ $code == 0 ]]; then - echo -e "SCTP Multihoming CICD [OK]" + echo -e "SCTP multihoming with seagull CICD [OK]" else - echo -e "SCTP Multihoming CICD [NOK]" + echo -e "SCTP Multihoming with seagull CICD [NOK]" fi -echo -e "\n$border\n" +echo -e "\n$border\n"` exit $code diff --git a/cicd/sctpmh-seagull/validation1.sh b/cicd/sctpmh-seagull/validation1.sh index 20e0454f8..6bde78ab0 100755 --- a/cicd/sctpmh-seagull/validation1.sh +++ b/cicd/sctpmh-seagull/validation1.sh @@ -56,7 +56,7 @@ else echo "llb1 ep-info" $dexec llb1 loxicmd get ep echo "llb1 bpf-info" - $dexec llb1 ntc filter show dev eth0 ingress + $dexec llb1 tc filter show dev eth0 ingress echo "BFP trace -- " sudo timeout 5 cat /sys/kernel/debug/tracing/trace_pipe sudo killall -9 cat diff --git a/cicd/sctpmh/config.sh b/cicd/sctpmh/config.sh index 2436deedb..b14694a07 100755 --- a/cicd/sctpmh/config.sh +++ b/cicd/sctpmh/config.sh @@ -21,18 +21,18 @@ echo "#########################################" echo "Connecting and configuring hosts" echo "#########################################" -connect_docker_hosts user r1 1500 -connect_docker_hosts user r2 1500 -connect_docker_hosts r1 sw1 1500 -connect_docker_hosts r2 sw1 1500 -connect_docker_hosts sw1 llb1 1500 -connect_docker_hosts sw1 llb2 1500 -connect_docker_hosts llb1 sw2 1500 -connect_docker_hosts llb2 sw2 1500 -connect_docker_hosts sw2 r3 1500 -connect_docker_hosts sw2 r4 1500 -connect_docker_hosts r3 ep1 1500 -connect_docker_hosts r4 ep1 1500 +connect_docker_hosts user r1 +connect_docker_hosts user r2 +connect_docker_hosts r1 sw1 +connect_docker_hosts r2 sw1 +connect_docker_hosts sw1 llb1 +connect_docker_hosts sw1 llb2 +connect_docker_hosts llb1 sw2 +connect_docker_hosts llb2 sw2 +connect_docker_hosts sw2 r3 +connect_docker_hosts sw2 r4 +connect_docker_hosts r3 ep1 +connect_docker_hosts r4 ep1 create_docker_host_cnbridge --host1 sw1 --host2 llb1 create_docker_host_cnbridge --host1 sw1 --host2 llb2 diff --git a/cicd/sctpmh/validation2.sh b/cicd/sctpmh/validation2.sh index 3829422b0..122fb03a8 100755 --- a/cicd/sctpmh/validation2.sh +++ b/cicd/sctpmh/validation2.sh @@ -15,7 +15,7 @@ echo -e "\nHA state Master:$master BACKUP-$backup\n" $hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 -$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > user.out & +$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -c 6 -x 10000 > user.out & #Path counters p1c_old=0 @@ -26,7 +26,7 @@ p3c_old=0 p3c_new=0 for((i=0;i<100;i++)) do - fin=`tail -n 100 user.out | grep "Client: Sending packets.(50000/50000)"` + fin=`tail -n 100 user.out | grep "Client: Sending packets.(10000/10000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." diff --git a/cicd/sctpmh/validation3.sh b/cicd/sctpmh/validation3.sh index f2d3a8939..b80ca3eed 100755 --- a/cicd/sctpmh/validation3.sh +++ b/cicd/sctpmh/validation3.sh @@ -18,7 +18,7 @@ echo -e "\nTraffic Flow: User -> LB -> EP " $hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 -$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > user.out & +$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -c 6 -x 1000 > user.out & #Path counters p1c_old=0 p1c_new=0 @@ -28,8 +28,11 @@ p3c_old=0 p3c_new=0 down=0 code=0 + +sleep 5 + for((i=0;i<200;i++)) do - fin=`tail -n 100 user.out | grep "Client: Sending packets.(50000/50000)"` + fin=`tail -n 100 user.out | grep "Client: Sending packets.(1000/1000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." diff --git a/cicd/sctpmh/validation4.sh b/cicd/sctpmh/validation4.sh index 93f76e10a..4bb191834 100755 --- a/cicd/sctpmh/validation4.sh +++ b/cicd/sctpmh/validation4.sh @@ -16,7 +16,7 @@ echo -e "\nTraffic Flow: EP ---> LB ---> User" $hexec user sctp_test -H 0.0.0.0 -P 9999 -l > user.out & sleep 2 -$hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -m 100 -x 50000 > ep1.out & +$hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -c 6 -x 1000 > ep1.out & #Path counters p1c_old=0 @@ -27,8 +27,10 @@ p3c_old=0 p3c_new=0 down=0 code=0 +sleep 2 + for((i=0;i<200;i++)) do - fin=`tail -n 100 ep1.out | grep "Client: Sending packets.(50000/50000)"` + fin=`tail -n 100 ep1.out | grep "Client: Sending packets.(1000/1000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." diff --git a/cicd/sctpmh/validation5.sh b/cicd/sctpmh/validation5.sh index 61d4a40bd..757a65d4b 100755 --- a/cicd/sctpmh/validation5.sh +++ b/cicd/sctpmh/validation5.sh @@ -19,7 +19,7 @@ sudo pkill sctp_test $hexec ep1 sctp_test -H 31.31.31.1 -P 9999 -l > ep1.out & sleep 2 -$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -m 1400 -x 300000 > user.out & +$hexec user stdbuf -oL sctp_test -H 1.1.1.1 -B 2.2.2.1 -P 20000 -h $extIP -p $port -s -c 6 -x 10000 > user.out & #Path counters p1c_old=0 @@ -33,8 +33,8 @@ hadone=0 code=0 nsyncOk=0 -for((i=0;i<200;i++)) do - fin=`tail -n 100 user.out | grep "Client: Sending packets.(300000/300000)"` +for((i=0;i<400;i++)) do + fin=`tail -n 100 user.out | grep "Client: Sending packets.(10000/10000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." diff --git a/cicd/sctpmh/validation6.sh b/cicd/sctpmh/validation6.sh index bbccecd3a..585ed17c3 100755 --- a/cicd/sctpmh/validation6.sh +++ b/cicd/sctpmh/validation6.sh @@ -21,7 +21,7 @@ sudo pkill sctp_test $hexec user sctp_test -H 0.0.0.0 -P 9999 -l > user.out & sleep 2 -$hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -m 1400 -x 500000 > ep1.out & +$hexec ep1 stdbuf -oL sctp_test -H 31.31.31.1 -B 32.32.32.1 -P 20000 -h $extIP -p $port -s -c 6 -x 10000 > ep1.out & #Path counters p1c_old=0 @@ -35,8 +35,8 @@ hadone=0 code=0 nsyncOk=0 -for((i=0;i<200;i++)) do - fin=`tail -n 100 ep1.out | grep "Client: Sending packets.(500000/500000)"` +for((i=0;i<500;i++)) do + fin=`tail -n 100 ep1.out | grep "Client: Sending packets.(10000/10000)"` if [[ ! -z $fin ]]; then fin=1 echo "sctp_test done." From ef3e631effc637ffad4e76a889e94857a5dcc177 Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Wed, 11 Sep 2024 11:55:49 +0900 Subject: [PATCH 04/34] gh-87 : Chore - removed unwanted typo --- cicd/sctpmh-seagull/validation.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cicd/sctpmh-seagull/validation.sh b/cicd/sctpmh-seagull/validation.sh index d9f416bb7..93d6ca8a4 100755 --- a/cicd/sctpmh-seagull/validation.sh +++ b/cicd/sctpmh-seagull/validation.sh @@ -50,7 +50,7 @@ if [[ $code == 0 ]]; then else echo -e "SCTP Multihoming with seagull CICD [NOK]" fi -echo -e "\n$border\n"` +echo -e "\n$border\n" exit $code From 6f3a1c44f30c0f3994cf9aa5a9114e54c93e894f Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Wed, 11 Sep 2024 22:46:24 +0900 Subject: [PATCH 05/34] gh-87 : Chore - removed tmp files --- cicd/sctpmh-seagull/.vagrant/bundler/global.sol | 1 - cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb | 9 --------- 2 files changed, 10 deletions(-) delete mode 100644 cicd/sctpmh-seagull/.vagrant/bundler/global.sol delete mode 100644 cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb diff --git a/cicd/sctpmh-seagull/.vagrant/bundler/global.sol b/cicd/sctpmh-seagull/.vagrant/bundler/global.sol deleted file mode 100644 index 072d3ec90..000000000 --- a/cicd/sctpmh-seagull/.vagrant/bundler/global.sol +++ /dev/null @@ -1 +0,0 @@ -{"dependencies":[["vagrant-disksize",["= 0.1.3"]],["net-ssh",[">= 2.6.5","< 8.0.0"]],["net-scp",[">= 1.1"]],["log4r",["~> 1.1"]],["vagrant-scp",["= 0.5.9"]]],"checksum":"d41cdc087dc2595e62da764647bfcacc91965875ce15159c44cdee684a184f69","vagrant_version":"2.3.7"} \ No newline at end of file diff --git a/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb b/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb deleted file mode 100644 index c3c05b095..000000000 --- a/cicd/sctpmh-seagull/.vagrant/rgloader/loader.rb +++ /dev/null @@ -1,9 +0,0 @@ -# This file loads the proper rgloader/loader.rb file that comes packaged -# with Vagrant so that encoded files can properly run with Vagrant. - -if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"] - require File.expand_path( - "rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]) -else - raise "Encoded files can't be read outside of the Vagrant installer." -end From da44f133d34b6d0bd785202d511278f643228b93 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Thu, 12 Sep 2024 11:55:06 +0900 Subject: [PATCH 06/34] fixes to sctp probe when rss is enabled --- go.mod | 4 ++-- go.sum | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index f334c0372..bcf7eb973 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/go-openapi/validate v0.22.0 github.com/jessevdk/go-flags v1.5.0 github.com/loxilb-io/ipvs v0.1.0 - github.com/loxilb-io/loxilib v0.8.9-0.20240906040045-9ad9b8b549d3 + github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec github.com/osrg/gobgp/v3 v3.29.0 github.com/prometheus-community/pro-bing v0.1.0 github.com/prometheus/client_model v0.3.0 @@ -53,7 +53,7 @@ require ( github.com/imdario/mergo v0.3.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/loxilb-io/sctp v0.0.0-20230519081703-6d1baec82fd4 // indirect + github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/ipvs v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 0919b40b0..84911aecd 100644 --- a/go.sum +++ b/go.sum @@ -169,8 +169,12 @@ github.com/loxilb-io/ipvs v0.1.0 h1:TpTkwh5CLgJ7YW86rvWyqJPEpQFqs2TNbRG/IECeq+w= github.com/loxilb-io/ipvs v0.1.0/go.mod h1:EKjimnzyVL9AXMMNfPWeokxF1uNeuDrEGF5gPFMdmIo= github.com/loxilb-io/loxilib v0.8.9-0.20240906040045-9ad9b8b549d3 h1:wM/yKX43GxROxnU9Qop8QMYFjlDOIALRgi9i7CxsM0s= github.com/loxilb-io/loxilib v0.8.9-0.20240906040045-9ad9b8b549d3/go.mod h1:LoQCxBz+N0fO9rGwRmPHrQPHol/jUf4MNpph63Cydkg= +github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec h1:2iozRGoNW+F3hos/OHWL8v5QQvkOnJSV83jqHeVrKZU= +github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec/go.mod h1:HZwYNAmuyvPZwzB5jon8s73V+Tzro8NPuQnYa9oeUQE= github.com/loxilb-io/sctp v0.0.0-20230519081703-6d1baec82fd4 h1:oDc2lsbfuQEcVP3k+Pw4v6Xdm3t4M9vBc1Y9egszv6g= github.com/loxilb-io/sctp v0.0.0-20230519081703-6d1baec82fd4/go.mod h1:1a6hv8ISVQhnW5IVpW9o+OL6BAFlWiVpC0O4d19g+wQ= +github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f h1:sm8UnXJa4dAV/wTEbc59F+Gf3FlfvSJY+OcPFfY9Eck= +github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f/go.mod h1:g3xKRvSWoeijv487mRGw3sLDacD9bC+wRQ4QebiafiQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= From c1990e22dbc0a244340a6775b07e39ce594298ee Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Thu, 12 Sep 2024 12:01:06 +0900 Subject: [PATCH 07/34] fixes to sctp probe when rss is enabled --- go.mod | 4 ++-- go.sum | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index bcf7eb973..37e0c92e2 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/go-openapi/validate v0.22.0 github.com/jessevdk/go-flags v1.5.0 github.com/loxilb-io/ipvs v0.1.0 - github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec + github.com/loxilb-io/loxilib v0.8.9-0.20240912025939-1e5e76723499 github.com/osrg/gobgp/v3 v3.29.0 github.com/prometheus-community/pro-bing v0.1.0 github.com/prometheus/client_model v0.3.0 @@ -53,7 +53,7 @@ require ( github.com/imdario/mergo v0.3.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f // indirect + github.com/loxilb-io/sctp v0.0.0-20240912025756-01894eac308b // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/ipvs v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 84911aecd..737567ab7 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,14 @@ github.com/loxilb-io/loxilib v0.8.9-0.20240906040045-9ad9b8b549d3 h1:wM/yKX43GxR github.com/loxilb-io/loxilib v0.8.9-0.20240906040045-9ad9b8b549d3/go.mod h1:LoQCxBz+N0fO9rGwRmPHrQPHol/jUf4MNpph63Cydkg= github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec h1:2iozRGoNW+F3hos/OHWL8v5QQvkOnJSV83jqHeVrKZU= github.com/loxilb-io/loxilib v0.8.9-0.20240912025144-088c2b9843ec/go.mod h1:HZwYNAmuyvPZwzB5jon8s73V+Tzro8NPuQnYa9oeUQE= +github.com/loxilb-io/loxilib v0.8.9-0.20240912025939-1e5e76723499 h1:zX/WEmsumpyLPMMkcFzxp3r60x0sqHDSOeJ+CwdfD5c= +github.com/loxilb-io/loxilib v0.8.9-0.20240912025939-1e5e76723499/go.mod h1:72c3DmIKC53G5f4eNhTElemB08S64Xm/4QsDtwc66vw= github.com/loxilb-io/sctp v0.0.0-20230519081703-6d1baec82fd4 h1:oDc2lsbfuQEcVP3k+Pw4v6Xdm3t4M9vBc1Y9egszv6g= github.com/loxilb-io/sctp v0.0.0-20230519081703-6d1baec82fd4/go.mod h1:1a6hv8ISVQhnW5IVpW9o+OL6BAFlWiVpC0O4d19g+wQ= github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f h1:sm8UnXJa4dAV/wTEbc59F+Gf3FlfvSJY+OcPFfY9Eck= github.com/loxilb-io/sctp v0.0.0-20240912024735-b9c5910e672f/go.mod h1:g3xKRvSWoeijv487mRGw3sLDacD9bC+wRQ4QebiafiQ= +github.com/loxilb-io/sctp v0.0.0-20240912025756-01894eac308b h1:QZHlUZTWMpghNQW/OzdKFY2PhhPFMPAjsfRSLZkAONU= +github.com/loxilb-io/sctp v0.0.0-20240912025756-01894eac308b/go.mod h1:g3xKRvSWoeijv487mRGw3sLDacD9bC+wRQ4QebiafiQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= From 7b4429a554bad4aeb2b4f9894c500f798a5fec52 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 13 Sep 2024 00:26:58 +0900 Subject: [PATCH 08/34] loxilb-io/kube-loxilb#169 Updated kube-loxilb yaml to conform to multi CIDR pool support semantics --- cicd/docker-k0s-lb/kube-loxilb.yml | 2 +- cicd/docker-k3s-calico/kube-loxilb.yml | 2 +- cicd/docker-k3s-cilium/kube-loxilb.yml | 2 +- cicd/eks/kube-loxilb.yaml | 3 +-- cicd/k0s-incluster/kube-loxilb.yml | 2 +- cicd/k0s-weave/kube-loxilb.yml | 3 +-- cicd/k3s-base-sanity/kube-loxilb.yml | 2 +- cicd/k3s-calico-dual-stack/kube-loxilb.yml | 4 ++-- cicd/k3s-calico-incluster/kube-loxilb.yml | 3 +-- cicd/k3s-calico-single-node-incluster/kube-loxilb.yml | 3 +-- cicd/k3s-calico/kube-loxilb.yml | 2 +- cicd/k3s-cilium-cluster/kube-loxilb.yml | 2 +- cicd/k3s-cilium/kube-loxilb.yml | 2 +- cicd/k3s-ext-ep/kube-loxilb.yml | 3 +-- cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml | 2 +- cicd/k3s-flannel-cluster/kube-loxilb.yml | 2 +- cicd/k3s-flannel-incluster-l2/kube-loxilb.yml | 2 +- cicd/k3s-flannel-incluster/kube-loxilb.yml | 2 +- cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml | 2 +- cicd/k3s-flannel-multus/kube-loxilb.yml | 3 +-- cicd/k3s-flannel-multus/multus/multus-sctp-service.yml | 2 +- cicd/k3s-flannel/kube-loxilb.yml | 2 +- cicd/k3s-incluster/kube-loxilb.yml | 2 +- cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml | 2 +- cicd/k3s-multi-master-service-proxy/kube-loxilb.yml | 2 +- cicd/k3s-rabbitmq-incluster/kube-loxilb.yml | 2 +- cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml | 2 +- cicd/k3s-sctpmh-2/kube-loxilb.yml | 3 +-- cicd/k3s-sctpmh-2/sctp-svc-lb.yml | 2 +- cicd/k3s-sctpmh-seagull/kube-loxilb.yml | 3 +-- cicd/k3s-sctpmh-seagull/multus/multus-sctp-service.yml | 2 +- cicd/k3s-sctpmh-seagull/multus/multus-seagull-service.yml | 2 +- cicd/k3s-sctpmh/kube-loxilb.yml | 3 +-- cicd/k3s-sctpmh/sctp-svc-lb.yml | 2 +- cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml | 3 +-- cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml | 1 - 36 files changed, 36 insertions(+), 47 deletions(-) diff --git a/cicd/docker-k0s-lb/kube-loxilb.yml b/cicd/docker-k0s-lb/kube-loxilb.yml index 2d068185c..ae9d4e678 100644 --- a/cicd/docker-k0s-lb/kube-loxilb.yml +++ b/cicd/docker-k0s-lb/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.82.100/32 + - --cidrPools=defaultPool=192.168.82.100/32 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/docker-k3s-calico/kube-loxilb.yml b/cicd/docker-k3s-calico/kube-loxilb.yml index 05ea21ab2..e49c3aec7 100644 --- a/cicd/docker-k3s-calico/kube-loxilb.yml +++ b/cicd/docker-k3s-calico/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.163.247/32 + - --cidrPools=defaultPool=192.168.163.247/32 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/docker-k3s-cilium/kube-loxilb.yml b/cicd/docker-k3s-cilium/kube-loxilb.yml index 05ea21ab2..e49c3aec7 100644 --- a/cicd/docker-k3s-cilium/kube-loxilb.yml +++ b/cicd/docker-k3s-cilium/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.163.247/32 + - --cidrPools=defaultPool=192.168.163.247/32 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/eks/kube-loxilb.yaml b/cicd/eks/kube-loxilb.yaml index f2d544756..3c60aed6c 100644 --- a/cicd/eks/kube-loxilb.yaml +++ b/cicd/eks/kube-loxilb.yaml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://$LOXILB_PRIVATE_IP:11111 - - --externalCIDR=$LOXILB_PRIVATE_IP/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=$LOXILB_PRIVATE_IP/32 #- --monitor #- --setBGP - --setLBMode=2 diff --git a/cicd/k0s-incluster/kube-loxilb.yml b/cicd/k0s-incluster/kube-loxilb.yml index 6a7e7aa41..ef86b0f16 100644 --- a/cicd/k0s-incluster/kube-loxilb.yml +++ b/cicd/k0s-incluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.82.100/32 + - --cidrPools=defaultPool=192.168.82.100/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k0s-weave/kube-loxilb.yml b/cicd/k0s-weave/kube-loxilb.yml index cdc5c7464..4a0b9744f 100644 --- a/cicd/k0s-weave/kube-loxilb.yml +++ b/cicd/k0s-weave/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://14.14.14.1:11111 - - --externalCIDR=123.123.123.1/24 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-base-sanity/kube-loxilb.yml b/cicd/k3s-base-sanity/kube-loxilb.yml index 18c4da508..83b695f7b 100644 --- a/cicd/k3s-base-sanity/kube-loxilb.yml +++ b/cicd/k3s-base-sanity/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://12.12.12.2:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor - --setBGP=64512 - --extBGPPeers=10.10.10.254:64512,12.12.12.254:64514,11.11.11.254:64511 diff --git a/cicd/k3s-calico-dual-stack/kube-loxilb.yml b/cicd/k3s-calico-dual-stack/kube-loxilb.yml index 27f522a85..b9fe885c9 100644 --- a/cicd/k3s-calico-dual-stack/kube-loxilb.yml +++ b/cicd/k3s-calico-dual-stack/kube-loxilb.yml @@ -111,8 +111,8 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111 - - --externalCIDR=123.123.123.1/24 - - --externalCIDR6=2001::1/128 + - --cidrPools=defaultPool=123.123.123.1/24 + - --cidr6Pools=defaultPool==2001::1/128 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-calico-incluster/kube-loxilb.yml b/cicd/k3s-calico-incluster/kube-loxilb.yml index 9c9090591..ed875f3b1 100644 --- a/cicd/k3s-calico-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-incluster/kube-loxilb.yml @@ -112,8 +112,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=123.123.123.1/24 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 - --setBGP=64512 - --listenBGPPort=1791 - --setRoles=0.0.0.0 diff --git a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml index fa66e217c..bec0145d5 100644 --- a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml @@ -112,8 +112,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.82.128:11111 - - --externalCIDR=192.168.80.5/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=192.168.80.5/32 #- --setBGP=64512 #- --listenBGPPort=1791 - --setRoles=0.0.0.0 diff --git a/cicd/k3s-calico/kube-loxilb.yml b/cicd/k3s-calico/kube-loxilb.yml index a9284b24f..4a0b9744f 100644 --- a/cicd/k3s-calico/kube-loxilb.yml +++ b/cicd/k3s-calico/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://14.14.14.1:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-cilium-cluster/kube-loxilb.yml b/cicd/k3s-cilium-cluster/kube-loxilb.yml index 5246d4bfe..8dd66ed3e 100644 --- a/cicd/k3s-cilium-cluster/kube-loxilb.yml +++ b/cicd/k3s-cilium-cluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-cilium/kube-loxilb.yml b/cicd/k3s-cilium/kube-loxilb.yml index a9284b24f..4a0b9744f 100644 --- a/cicd/k3s-cilium/kube-loxilb.yml +++ b/cicd/k3s-cilium/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://14.14.14.1:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-ext-ep/kube-loxilb.yml b/cicd/k3s-ext-ep/kube-loxilb.yml index 0a62bfc11..85670b3a6 100644 --- a/cicd/k3s-ext-ep/kube-loxilb.yml +++ b/cicd/k3s-ext-ep/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=20.20.20.1/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=20.20.20.1/32 #- --monitor #- --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml index 9357b17b0..2e36bf345 100644 --- a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=192.168.80.20/32 + - --cidrPools=defaultPool=192.168.80.20/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k3s-flannel-cluster/kube-loxilb.yml b/cicd/k3s-flannel-cluster/kube-loxilb.yml index 5246d4bfe..8dd66ed3e 100644 --- a/cicd/k3s-flannel-cluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml index 0f1ed7a5c..415d2c8a5 100644 --- a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml @@ -125,7 +125,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=192.168.80.200/32 + - --cidrPools=defaultPool=192.168.80.200/32 #- --setBGP=64512 - --setRoles=0.0.0.0 #- --monitor diff --git a/cicd/k3s-flannel-incluster/kube-loxilb.yml b/cicd/k3s-flannel-incluster/kube-loxilb.yml index 910c5e196..b18da9f0b 100644 --- a/cicd/k3s-flannel-incluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster/kube-loxilb.yml @@ -112,7 +112,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 - --setBGP=64512 - --setRoles=0.0.0.0 - --extBGPPeers=192.168.90.9:64511 diff --git a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml index 93a52b373..1b2f760f8 100644 --- a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml +++ b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml @@ -112,7 +112,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=192.168.80.9/32 + - --cidrPools=defaultPool=192.168.80.9/32 #- --zone=aws #- --setBGP=64512 #- --setRoles=0.0.0.0 diff --git a/cicd/k3s-flannel-multus/kube-loxilb.yml b/cicd/k3s-flannel-multus/kube-loxilb.yml index 48f1da654..b1bf14968 100644 --- a/cicd/k3s-flannel-multus/kube-loxilb.yml +++ b/cicd/k3s-flannel-multus/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://4.0.6.149:11111 - - --externalCIDR=4.0.5.2/32 - - --externalSecondaryCIDRs=4.0.4.1/24,4.0.3.1/24 + - --cidrPools=defaultPool=4.0.5.2/32,pool2=4.0.4.1/24,pool3=4.0.3.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-flannel-multus/multus/multus-sctp-service.yml b/cicd/k3s-flannel-multus/multus/multus-sctp-service.yml index 93f14ca9f..49a2b42f4 100644 --- a/cicd/k3s-flannel-multus/multus/multus-sctp-service.yml +++ b/cicd/k3s-flannel-multus/multus/multus-sctp-service.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: multus-sctp-service annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/multus-nets: macvlan1,macvlan2 loxilb.io/lbmode: "fullnat" spec: diff --git a/cicd/k3s-flannel/kube-loxilb.yml b/cicd/k3s-flannel/kube-loxilb.yml index 18c4da508..83b695f7b 100644 --- a/cicd/k3s-flannel/kube-loxilb.yml +++ b/cicd/k3s-flannel/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://12.12.12.2:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor - --setBGP=64512 - --extBGPPeers=10.10.10.254:64512,12.12.12.254:64514,11.11.11.254:64511 diff --git a/cicd/k3s-incluster/kube-loxilb.yml b/cicd/k3s-incluster/kube-loxilb.yml index 6a7e7aa41..ef86b0f16 100644 --- a/cicd/k3s-incluster/kube-loxilb.yml +++ b/cicd/k3s-incluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.82.100/32 + - --cidrPools=defaultPool=192.168.82.100/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml index f48008297..f57f3c44b 100644 --- a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml @@ -125,7 +125,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=192.168.80.200/32 + - --cidrPools=defaultPool=192.168.80.200/32 #- --setBGP=64512 - --setRoles=0.0.0.0 - --excludeRoleList=192.168.80.101,192.168.80.102 diff --git a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml index f48008297..f57f3c44b 100644 --- a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml @@ -125,7 +125,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=192.168.80.200/32 + - --cidrPools=defaultPool=192.168.80.200/32 #- --setBGP=64512 - --setRoles=0.0.0.0 - --excludeRoleList=192.168.80.101,192.168.80.102 diff --git a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml index f6d54ad8f..414e820b3 100644 --- a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=192.168.80.20/32 + - --cidrPools=defaultPool=192.168.80.20/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml index 9357b17b0..2e36bf345 100644 --- a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=192.168.80.20/32 + - --cidrPools=defaultPool=192.168.80.20/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k3s-sctpmh-2/kube-loxilb.yml b/cicd/k3s-sctpmh-2/kube-loxilb.yml index cd36ced12..f727d1a2a 100644 --- a/cicd/k3s-sctpmh-2/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-2/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://14.14.14.1:11111 - - --externalCIDR=123.123.123.1/24 - - --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=123.123.123.1/24,pool2=124.124.124.1/24,pool3=125.125.125.1/24 #- --monitor - --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k3s-sctpmh-2/sctp-svc-lb.yml b/cicd/k3s-sctpmh-2/sctp-svc-lb.yml index 32b749bcc..2abe38bc3 100644 --- a/cicd/k3s-sctpmh-2/sctp-svc-lb.yml +++ b/cicd/k3s-sctpmh-2/sctp-svc-lb.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: sctp-lb1 annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/lbmode: "fullnat" loxilb.io/liveness: "yes" spec: diff --git a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml index 5595ba85d..f06f18e0b 100644 --- a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://4.0.6.10:11111,http://4.0.6.11:11111 - - --externalCIDR=4.0.5.100/32 - - --externalSecondaryCIDRs=4.0.4.100/24 + - --cidrPools=defaultPool=4.0.5.100/32,pool2=4.0.4.100/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k3s-sctpmh-seagull/multus/multus-sctp-service.yml b/cicd/k3s-sctpmh-seagull/multus/multus-sctp-service.yml index 93f14ca9f..49a2b42f4 100644 --- a/cicd/k3s-sctpmh-seagull/multus/multus-sctp-service.yml +++ b/cicd/k3s-sctpmh-seagull/multus/multus-sctp-service.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: multus-sctp-service annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/multus-nets: macvlan1,macvlan2 loxilb.io/lbmode: "fullnat" spec: diff --git a/cicd/k3s-sctpmh-seagull/multus/multus-seagull-service.yml b/cicd/k3s-sctpmh-seagull/multus/multus-seagull-service.yml index d43d6a5eb..54364ff41 100644 --- a/cicd/k3s-sctpmh-seagull/multus/multus-seagull-service.yml +++ b/cicd/k3s-sctpmh-seagull/multus/multus-seagull-service.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: multus-seagull-service annotations: - loxilb.io/num-secondary-networks: "1" + loxilb.io/poolSelectSecondary: "pool2" loxilb.io/multus-nets: macvlan1,macvlan2 loxilb.io/lbmode: "fullnat" loxilb.io/probetype: "ping" diff --git a/cicd/k3s-sctpmh/kube-loxilb.yml b/cicd/k3s-sctpmh/kube-loxilb.yml index 202ee22c2..2d407e76f 100644 --- a/cicd/k3s-sctpmh/kube-loxilb.yml +++ b/cicd/k3s-sctpmh/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://12.12.12.1:11111,http://14.14.14.1:11111 - - --externalCIDR=123.123.123.1/24 - - --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=123.123.123.1/24,pool2=124.124.124.1/24,pool3=125.125.125.1/24 #- --monitor - --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k3s-sctpmh/sctp-svc-lb.yml b/cicd/k3s-sctpmh/sctp-svc-lb.yml index 692069ab7..3f3e5e88d 100644 --- a/cicd/k3s-sctpmh/sctp-svc-lb.yml +++ b/cicd/k3s-sctpmh/sctp-svc-lb.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: sctp-lb1 annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/lbmode: "fullnat" loxilb.io/liveness: "yes" spec: diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml index fe0293137..b70b66ffb 100644 --- a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml +++ b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml @@ -163,8 +163,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=192.168.80.5/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=192.168.80.5/32 #- --setBGP=64512 #- --listenBGPPort=1791 - --setRoles=0.0.0.0 diff --git a/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml index 5ac9a69b7..199d2a406 100644 --- a/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml +++ b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml @@ -3,7 +3,6 @@ kind: Service metadata: name: sctp-lb-fullnat annotations: - #loxilb.io/num-secondary-networks: "2" loxilb.io/liveness: "yes" loxilb.io/lbmode: "fullnat" spec: From 072b6e9ebd932188016fbbcb9316811832a36195 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sat, 14 Sep 2024 00:06:26 +0900 Subject: [PATCH 09/34] loxilb-io/kube-loxilb#169 Updated kube-loxilb yaml to conform to multi CIDR pool support semantics --- cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml | 3 +-- cicd/k8s-calico-ipvs/yaml/sctp_fullnat.yml | 2 +- cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml | 3 +-- cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml | 3 +-- cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml | 2 +- cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml | 3 +-- cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml | 3 +-- cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml | 2 +- cicd/k8s-calico/yaml/kube-loxilb.yml | 2 +- cicd/microk8s-incluster/kube-loxilb.yml | 2 +- 10 files changed, 10 insertions(+), 15 deletions(-) diff --git a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml index fd0ef2b93..f9da5df2f 100644 --- a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=123.123.123.1/24 - - --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=123.123.123.1/24,pool2=124.124.124.1/24,pool3=125.125.125.1/24 #- --monitor #- --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k8s-calico-ipvs/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipvs/yaml/sctp_fullnat.yml index 6b43037a5..b6e3fb686 100644 --- a/cicd/k8s-calico-ipvs/yaml/sctp_fullnat.yml +++ b/cicd/k8s-calico-ipvs/yaml/sctp_fullnat.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: sctp-lb-fullnat annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/liveness: "yes" loxilb.io/lbmode: "fullnat" spec: diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml index 0c8b3f250..c14902abb 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 - - --externalCIDR=192.168.80.5/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=192.168.80.5/32 #- --monitor #- --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml index 276346d91..4155acc91 100644 --- a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 - - --externalCIDR=192.168.80.5/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=192.168.80.5/32 #- --monitor #- --setBGP=64511 #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 diff --git a/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml index 6b43037a5..b6e3fb686 100644 --- a/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml +++ b/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml @@ -3,7 +3,7 @@ kind: Service metadata: name: sctp-lb-fullnat annotations: - loxilb.io/num-secondary-networks: "2" + loxilb.io/poolSelectSecondary: "pool2,pool3" loxilb.io/liveness: "yes" loxilb.io/lbmode: "fullnat" spec: diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml index e27181fd4..54bdcb89f 100644 --- a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 - - --externalCIDR=20.20.20.1/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=20.20.20.1/32 #- --monitor - --setBGP=64511 - --extBGPPeers=192.168.90.9:64512 diff --git a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml index 01a9adc39..21933422e 100644 --- a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml @@ -111,8 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 - - --externalCIDR=20.20.20.1/32 - #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + - --cidrPools=defaultPool=20.20.20.1/32 #- --monitor - --setBGP=64511 - --extBGPPeers=192.168.80.9:64512 diff --git a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml index 5246d4bfe..8dd66ed3e 100644 --- a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/k8s-calico/yaml/kube-loxilb.yml b/cicd/k8s-calico/yaml/kube-loxilb.yml index 5246d4bfe..8dd66ed3e 100644 --- a/cicd/k8s-calico/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico/yaml/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.80.9:11111 - - --externalCIDR=123.123.123.1/24 + - --cidrPools=defaultPool=123.123.123.1/24 #- --monitor #- --setBGP #- --setLBMode=1 diff --git a/cicd/microk8s-incluster/kube-loxilb.yml b/cicd/microk8s-incluster/kube-loxilb.yml index 6a7e7aa41..ef86b0f16 100644 --- a/cicd/microk8s-incluster/kube-loxilb.yml +++ b/cicd/microk8s-incluster/kube-loxilb.yml @@ -111,7 +111,7 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://172.17.0.2:11111 - - --externalCIDR=192.168.82.100/32 + - --cidrPools=defaultPool=192.168.82.100/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP From d882a387fc092c2ab8684852c0bf78e83e6f9bb0 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sat, 14 Sep 2024 00:07:54 +0900 Subject: [PATCH 10/34] loxilb-io/kube-loxilb#169 Updated kube-loxilb yaml to conform to multi CIDR pool support semantics --- cicd/k8s-nat64/kube-loxilb.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cicd/k8s-nat64/kube-loxilb.yaml b/cicd/k8s-nat64/kube-loxilb.yaml index 655df07dd..6a1beee6d 100644 --- a/cicd/k8s-nat64/kube-loxilb.yaml +++ b/cicd/k8s-nat64/kube-loxilb.yaml @@ -111,8 +111,8 @@ spec: - /bin/kube-loxilb args: - --loxiURL=http://192.168.59.101:11111,http://192.168.59.111:11111 - #- --externalCIDR=123.123.123.1/24 - - --externalCIDR=3ffe::1/96 + #- --cidrPools=defaultPool=123.123.123.1/24 + - --cidrPools=defaultPool=3ffe::1/96 - --setBGP=64511 - --setLBMode=2 #- --config=/opt/loxilb/agent/kube-loxilb.conf From 59148016b94ef64d7be857d82ea68b7bcef71d3b Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 20 Sep 2024 08:24:28 +0900 Subject: [PATCH 11/34] gh-87 updated manifests and added scenario of k8s flannel incluster --- .../k8s-flannel-incluster-multus.yml | 36 +++ cicd/docker-k0s-lb/kube-loxilb.yml | 1 + cicd/docker-k3s-calico/kube-loxilb.yml | 1 + cicd/docker-k3s-cilium/kube-loxilb.yml | 1 + cicd/eks/kube-loxilb.yaml | 1 + cicd/k0s-incluster/kube-loxilb.yml | 1 + cicd/k0s-weave/kube-loxilb.yml | 1 + cicd/k3s-base-sanity/kube-loxilb.yml | 1 + cicd/k3s-calico-dual-stack/kube-loxilb.yml | 1 + cicd/k3s-calico-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-calico/kube-loxilb.yml | 1 + cicd/k3s-cilium-cluster/kube-loxilb.yml | 1 + cicd/k3s-cilium/kube-loxilb.yml | 1 + cicd/k3s-ext-ep/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-flannel-cluster/kube-loxilb.yml | 1 + cicd/k3s-flannel-incluster-l2/kube-loxilb.yml | 1 + cicd/k3s-flannel-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-flannel-multus/kube-loxilb.yml | 1 + cicd/k3s-flannel/kube-loxilb.yml | 1 + cicd/k3s-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-rabbitmq-incluster/kube-loxilb.yml | 1 + .../manifests/kube-loxilb.yml | 1 + cicd/k3s-sctpmh-2/kube-loxilb.yml | 1 + cicd/k3s-sctpmh-seagull/kube-loxilb.yml | 1 + cicd/k3s-sctpmh/kube-loxilb.yml | 1 + .../yaml/kube-loxilb.yaml | 1 + cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml | 1 + .../yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico/yaml/kube-loxilb.yml | 1 + cicd/k8s-flannel-incluster-multus/Vagrantfile | 81 ++++++ cicd/k8s-flannel-incluster-multus/config.sh | 67 +++++ .../multus/multus-daemonset.yml | 236 ++++++++++++++++++ .../multus/multus-macvlan.yml | 27 ++ .../multus/multus-pod-02.yml | 22 ++ .../multus/multus-pod.yml | 14 ++ .../multus/multus-service.yml | 16 ++ .../multus/multus-vlan.yml | 21 ++ .../node_scripts/common.sh | 93 +++++++ .../node_scripts/host.sh | 8 + .../node_scripts/loxilb.sh | 9 + .../node_scripts/master.sh | 69 +++++ .../node_scripts/worker.sh | 34 +++ cicd/k8s-flannel-incluster-multus/rmconfig.sh | 5 + .../validation.sh | 30 +++ .../yaml/kube-flannel.yml | 210 ++++++++++++++++ .../yaml/kube-loxilb.yaml | 186 ++++++++++++++ .../yaml/kubeadm-config.yaml | 70 ++++++ .../yaml/loxilb-localvip.yaml | 110 ++++++++ .../yaml/loxilb.yaml | 71 ++++++ .../yaml/sctp_fullnat.yml | 44 ++++ .../yaml/sctp_onearm.yml | 41 +++ .../yaml/settings.yaml | 45 ++++ .../yaml/tcp_fullnat.yml | 29 +++ .../yaml/tcp_onearm.yml | 30 +++ .../yaml/udp_fullnat.yml | 30 +++ .../yaml/udp_onearm.yml | 30 +++ cicd/k8s-nat64/kube-loxilb.yaml | 1 + cicd/microk8s-incluster/kube-loxilb.yml | 1 + 67 files changed, 1703 insertions(+) create mode 100644 .github/workflows/k8s-flannel-incluster-multus.yml create mode 100644 cicd/k8s-flannel-incluster-multus/Vagrantfile create mode 100755 cicd/k8s-flannel-incluster-multus/config.sh create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-service.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/common.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/host.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/master.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh create mode 100755 cicd/k8s-flannel-incluster-multus/rmconfig.sh create mode 100755 cicd/k8s-flannel-incluster-multus/validation.sh create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/settings.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml diff --git a/.github/workflows/k8s-flannel-incluster-multus.yml b/.github/workflows/k8s-flannel-incluster-multus.yml new file mode 100644 index 000000000..154e800c0 --- /dev/null +++ b/.github/workflows/k8s-flannel-incluster-multus.yml @@ -0,0 +1,36 @@ +name: K8s-Flannel-Incluster-Multus-Sanity-CI +on: + # schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k8s-flannel-incluster-multus' +jobs: + test-runner: + name: k8s-flannel-incluster-multus-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/k8s-flannel-incluster-multus + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k8s-flannel-incluster-multus || true + ./rmconfig.sh + cd - diff --git a/cicd/docker-k0s-lb/kube-loxilb.yml b/cicd/docker-k0s-lb/kube-loxilb.yml index ae9d4e678..a225785cc 100644 --- a/cicd/docker-k0s-lb/kube-loxilb.yml +++ b/cicd/docker-k0s-lb/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/docker-k3s-calico/kube-loxilb.yml b/cicd/docker-k3s-calico/kube-loxilb.yml index e49c3aec7..4fddc08f4 100644 --- a/cicd/docker-k3s-calico/kube-loxilb.yml +++ b/cicd/docker-k3s-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/docker-k3s-cilium/kube-loxilb.yml b/cicd/docker-k3s-cilium/kube-loxilb.yml index e49c3aec7..4fddc08f4 100644 --- a/cicd/docker-k3s-cilium/kube-loxilb.yml +++ b/cicd/docker-k3s-cilium/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/eks/kube-loxilb.yaml b/cicd/eks/kube-loxilb.yaml index 3c60aed6c..92c5a674e 100644 --- a/cicd/eks/kube-loxilb.yaml +++ b/cicd/eks/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k0s-incluster/kube-loxilb.yml b/cicd/k0s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/k0s-incluster/kube-loxilb.yml +++ b/cicd/k0s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k0s-weave/kube-loxilb.yml b/cicd/k0s-weave/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k0s-weave/kube-loxilb.yml +++ b/cicd/k0s-weave/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-base-sanity/kube-loxilb.yml b/cicd/k3s-base-sanity/kube-loxilb.yml index 83b695f7b..d74551814 100644 --- a/cicd/k3s-base-sanity/kube-loxilb.yml +++ b/cicd/k3s-base-sanity/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-dual-stack/kube-loxilb.yml b/cicd/k3s-calico-dual-stack/kube-loxilb.yml index b9fe885c9..6fdc7a51d 100644 --- a/cicd/k3s-calico-dual-stack/kube-loxilb.yml +++ b/cicd/k3s-calico-dual-stack/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-incluster/kube-loxilb.yml b/cicd/k3s-calico-incluster/kube-loxilb.yml index ed875f3b1..2c9d2dd36 100644 --- a/cicd/k3s-calico-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml index bec0145d5..efb2e4580 100644 --- a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico/kube-loxilb.yml b/cicd/k3s-calico/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k3s-calico/kube-loxilb.yml +++ b/cicd/k3s-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-cilium-cluster/kube-loxilb.yml b/cicd/k3s-cilium-cluster/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k3s-cilium-cluster/kube-loxilb.yml +++ b/cicd/k3s-cilium-cluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-cilium/kube-loxilb.yml b/cicd/k3s-cilium/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k3s-cilium/kube-loxilb.yml +++ b/cicd/k3s-cilium/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-ext-ep/kube-loxilb.yml b/cicd/k3s-ext-ep/kube-loxilb.yml index 85670b3a6..08278d69c 100644 --- a/cicd/k3s-ext-ep/kube-loxilb.yml +++ b/cicd/k3s-ext-ep/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml index 2e36bf345..f5bda7eea 100644 --- a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-cluster/kube-loxilb.yml b/cicd/k3s-flannel-cluster/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k3s-flannel-cluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml index 415d2c8a5..b7dc4df2c 100644 --- a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-incluster/kube-loxilb.yml b/cicd/k3s-flannel-incluster/kube-loxilb.yml index b18da9f0b..8f57b2e55 100644 --- a/cicd/k3s-flannel-incluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml index 1b2f760f8..e9dd3f9f5 100644 --- a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml +++ b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-multus/kube-loxilb.yml b/cicd/k3s-flannel-multus/kube-loxilb.yml index b1bf14968..51a9b489e 100644 --- a/cicd/k3s-flannel-multus/kube-loxilb.yml +++ b/cicd/k3s-flannel-multus/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel/kube-loxilb.yml b/cicd/k3s-flannel/kube-loxilb.yml index 83b695f7b..d74551814 100644 --- a/cicd/k3s-flannel/kube-loxilb.yml +++ b/cicd/k3s-flannel/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-incluster/kube-loxilb.yml b/cicd/k3s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/k3s-incluster/kube-loxilb.yml +++ b/cicd/k3s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml index f57f3c44b..401225522 100644 --- a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml index f57f3c44b..401225522 100644 --- a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml index 414e820b3..2149b4bc7 100644 --- a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml index 2e36bf345..f5bda7eea 100644 --- a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh-2/kube-loxilb.yml b/cicd/k3s-sctpmh-2/kube-loxilb.yml index f727d1a2a..421ca81ad 100644 --- a/cicd/k3s-sctpmh-2/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-2/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml index f06f18e0b..137b6dcbe 100644 --- a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh/kube-loxilb.yml b/cicd/k3s-sctpmh/kube-loxilb.yml index 2d407e76f..77a5d683c 100644 --- a/cicd/k3s-sctpmh/kube-loxilb.yml +++ b/cicd/k3s-sctpmh/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml index b70b66ffb..1ca024293 100644 --- a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml +++ b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml index f9da5df2f..72e1d1ce7 100644 --- a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml index c14902abb..4285bde5c 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml index 4155acc91..ba0c400a0 100644 --- a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml index 54bdcb89f..8f7623ab0 100644 --- a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml index 21933422e..bf600de94 100644 --- a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico/yaml/kube-loxilb.yml b/cicd/k8s-calico/yaml/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k8s-calico/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-flannel-incluster-multus/Vagrantfile b/cicd/k8s-flannel-incluster-multus/Vagrantfile new file mode 100644 index 000000000..1c0c4d065 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/Vagrantfile @@ -0,0 +1,81 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.box = settings["software"]["cluster"]["box"] + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "124.124.124.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "node_scripts/host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + config.vm.define "master" do |master| + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "124.124.124.10", :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 200 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.network :private_network, ip: "124.124.124.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end +end diff --git a/cicd/k8s-flannel-incluster-multus/config.sh b/cicd/k8s-flannel-incluster-multus/config.sh new file mode 100755 index 000000000..5970158c4 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/config.sh @@ -0,0 +1,67 @@ +#!/bin/bash +VMs=$(vagrant global-status | grep -i virtualbox) +while IFS= read -a VMs; do + read -a vm <<< "$VMs" + cd ${vm[4]} 2>&1>/dev/null + echo "Destroying ${vm[1]}" + vagrant destroy -f ${vm[1]} + cd - 2>&1>/dev/null +done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + echo "Cluster is ready" + break; + fi + echo "Will try after 10s" + sleep 10 +done + +if [[ $fin == 0 ]]; then + echo "Cluster is not ready" + exit 1 +fi diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml new file mode 100644 index 000000000..40fa51932 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml @@ -0,0 +1,236 @@ +# Note: +# This deployment file is designed for 'quickstart' of multus, easy installation to test it, +# hence this deployment yaml does not care about following things intentionally. +# - various configuration options +# - minor deployment scenario +# - upgrade/update/uninstall scenario +# Multus team understand users deployment scenarios are diverse, hence we do not cover +# comprehensive deployment scenario. We expect that it is covered by each platform deployment. +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + serviceAccountName: multus + containers: + - name: kube-multus + image: ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot + command: ["/thin_entrypoint"] + args: + - "--multus-conf-file=auto" + - "--multus-autoconfig-dir=/host/etc/cni/net.d" + - "--cni-conf-dir=/host/etc/cni/net.d" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + initContainers: + - name: install-multus-binary + image: ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot + command: ["/install_multus"] + args: + - "--type" + - "thin" + resources: + requests: + cpu: "10m" + memory: "15Mi" + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cnibin + mountPath: /host/opt/cni/bin + mountPropagation: Bidirectional + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml new file mode 100644 index 000000000..4b14d1789 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml @@ -0,0 +1,27 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: macvlan1 +spec: + config: '{ + "cniVersion": "0.3.1", + "type": "macvlan", + "master": "eth1", + "mode": "bridge", + "ipam": { + "type": "host-local", + "ranges": [ + [ { + "subnet": "4.0.6.0/24", + "rangeStart": "4.0.6.3", + "rangeEnd": "4.0.6.100", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "4.0.6.149" + } ] + ] + } + }' diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml new file mode 100644 index 000000000..aa0754a77 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-02 + labels: + app: pod-02 + #annotations: + # k8s.v1.cni.cncf.io/networks: vlan5 +spec: + containers: + - name: nginx + image: ghcr.io/nicolaka/netshoot:latest + command: + - sleep + - "infinity" + ports: + - containerPort: 80 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml new file mode 100644 index 000000000..32dfd15c7 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-01 + labels: + app: pod-01 + annotations: + k8s.v1.cni.cncf.io/networks: vlan5 +spec: + containers: + - name: nginx + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml new file mode 100644 index 000000000..17038e336 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: multus-service + annotations: + loxilb.io/multus-nets: vlan5 + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + app: pod-01 + ports: + - port: 55002 + targetPort: 80 + type: LoadBalancer diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml new file mode 100644 index 000000000..ccedf60ee --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml @@ -0,0 +1,21 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: vlan5 +spec: + config: '{ + "name": "vlan5-net", + "cniVersion": "0.3.1", + "type": "vlan", + "master": "eth2", + "mtu": 1450, + "vlanId": 5, + "linkInContainer": false, + "ipam": { + "type": "whereabouts", + "range": "123.123.123.192/28" + }, + "dns": { + "nameservers": [ "8.8.8.8" ] + } + }' diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh new file mode 100755 index 000000000..c01ad688f --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" +CRIO_VERSION=1.27 +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + +sudo apt-get update -y +sudo apt-get install -y kubelet kubectl kubeadm +sudo apt-get update -y +sudo apt-get install -y jq +sudo apt-get install -y ipvsadm + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh new file mode 100755 index 000000000..9eb9c8efb --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh @@ -0,0 +1,8 @@ +# Setup the bastion host +sudo apt-get update +sudo apt-get -y install socat lksctp-tools +sudo ip link add link eth2 name eth2.5 type vlan id 5 +sudo ip addr add 123.123.123.206/24 dev eth2.5 +sudo ip link set eth2.5 up + +echo "Host is up" diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh new file mode 100755 index 000000000..6df67208f --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh @@ -0,0 +1,9 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh new file mode 100755 index 000000000..43d431e53 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo sed -i 's#10.85.0.0/16#10.244.0.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Flannel Network Plugin +kubectl apply -f /vagrant/yaml/kube-flannel.yml + +# Install loxilb checksum module +#curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - + +# Install whereabouts +git clone https://github.com/k8snetworkplumbingwg/whereabouts && cd whereabouts +kubectl apply \ + -f doc/crds/daemonset-install.yaml \ + -f doc/crds/whereabouts.cni.cncf.io_ippools.yaml \ + -f doc/crds/whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml && cd - + +# Install multus +kubectl apply -f /vagrant/multus/multus-daemonset.yml + +# Wait for pods to be ready +kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout=240s >> /dev/null 2>&1 || true +kubectl apply -f /vagrant/multus/multus-vlan.yml +sleep 60 +kubectl apply -f /vagrant/yaml/loxilb.yaml diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh new file mode 100755 index 000000000..0fd5eaee9 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +if [[ $(hostname -s) == "worker1" ]]; then + sudo sed -i 's#10.85.0.0/16#10.244.1.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +else + sudo sed -i 's#10.85.0.0/16#10.244.2.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +fi + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout=240s >> /dev/null 2>&1 || true +kubectl apply -f /vagrant/yaml/kube-loxilb.yaml +kubectl apply -f /vagrant/multus/multus-pod.yml +sleep 60 +kubectl apply -f /vagrant/multus/multus-service.yml + +EOF + +#curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - diff --git a/cicd/k8s-flannel-incluster-multus/rmconfig.sh b/cicd/k8s-flannel-incluster-multus/rmconfig.sh new file mode 100755 index 000000000..1eb0df750 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/rmconfig.sh @@ -0,0 +1,5 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f host diff --git a/cicd/k8s-flannel-incluster-multus/validation.sh b/cicd/k8s-flannel-incluster-multus/validation.sh new file mode 100755 index 000000000..0db8906ee --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/validation.sh @@ -0,0 +1,30 @@ +#!/bin/bash +source ../common.sh +echo k8s-flannel-incluster + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null + +out=$(vagrant ssh host -c "curl -s --connect-timeout 10 http://123.123.123.205:55002" 2> /dev/null) +#echo $out +if [[ ${out} == *"nginx"* ]]; then + echo -e "k8s-flannel-incluster TCP\t[OK]" +else + echo -e "k8s-flannel-incluster TCP\t[FAILED]" + code=1 +fi + +exit $code diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml b/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml new file mode 100644 index 000000000..aaf3d7404 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml @@ -0,0 +1,210 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: kube-flannel + labels: + k8s-app: flannel + pod-security.kubernetes.io/enforce: privileged +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-flannel +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: flannel + name: flannel + namespace: kube-flannel +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-flannel + labels: + tier: node + k8s-app: flannel + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "EnableNFTables": false, + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-flannel + labels: + tier: node + app: flannel + k8s-app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-plugin + image: docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel2 + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: docker.io/flannel/flannel:v0.25.6 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: docker.io/flannel/flannel:v0.25.6 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + - --iface=eth1 + resources: + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni-plugin + hostPath: + path: /opt/cni/bin + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml b/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml new file mode 100644 index 000000000..6b5a9f50b --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml @@ -0,0 +1,186 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - namespaces + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses + - gatewayclasses/status + - gateways + - gateways/status + - tcproutes + - udproutes + verbs: ["get", "watch", "list", "patch", "update"] + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - bgppeer.loxilb.io + resources: + - bgppeerservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinedsets.loxilb.io + resources: + - bgppolicydefinedsetsservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinition.loxilb.io + resources: + - bgppolicydefinitionservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicyapply.loxilb.io + resources: + - bgppolicyapplyservices + verbs: + - get + - watch + - list + - create + - update + - delete + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: kube-loxilb-app +spec: + replicas: 1 + selector: + matchLabels: + app: kube-loxilb-app + template: + metadata: + labels: + app: kube-loxilb-app + spec: + #hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --cidrPools=defaultPool=123.123.123.205/32 + #- --setBGP=64512 + #- --listenBGPPort=1791 + - --setRoles=0.0.0.0 + #- --zone=az1 + #- --monitor + #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..e8de10b86 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml @@ -0,0 +1,70 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null + kubeletExtraArgs: + node-ip: 192.168.80.250 +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.29.2 +networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.245.0.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml b/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml new file mode 100644 index 000000000..3bcfce436 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + hostPID: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + # - key: "node-role.kubernetes.io/master" + # operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + initContainers: + - name: mkllb-cgroup + command: + - sh + - -ec + - | + ls /usr/local/sbin/mkllb_cgroup && chmod 777 /usr/local/sbin/mkllb_cgroup; + cp -f /usr/local/sbin/mkllb_cgroup /hbin/mkllb_cgroup; + nsenter --cgroup=/hproc/1/ns/cgroup --mount=/hproc/1/ns/mnt /bin/mkllb_cgroup; + echo done; + rm /hbin/mkllb_cgroup; + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + volumeMounts: + - name: hproc + mountPath: /hproc + - name: hbin + mountPath: /hbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: IfNotPresent + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--localsockpolicy" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + volumeMounts: + - name: llb-cgroup + mountPath: /opt/loxilb/cgroup + securityContext: + privileged: true + runAsUser: 0 + capabilities: + add: + - SYS_ADMIN + volumes: + - name: hproc + hostPath: + path: /proc + type: Directory + - name: hbin + hostPath: + path: /bin + type: Directory + - name: llb-cgroup + hostPath: + path: /opt/loxilb/cgroup + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml b/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml new file mode 100644 index 000000000..e386d728d --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + #namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + annotations: + k8s.v1.cni.cncf.io/networks: vlan5 + spec: + #hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + #- key: "node-role.kubernetes.io/master" + # operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + #command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico" ] + command: [ "/root/loxilb-io/loxilb/loxilb" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + #namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..199d2a406 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-fullnat-test + ports: + - port: 57004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + #image: loxilbio/sctp-darn:latest + image: ghcr.io/loxilb-io/alpine-socat:latest + imagePullPolicy: Always + #command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml new file mode 100644 index 000000000..b4b736962 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-onearm-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-onearm-test + labels: + what: sctp-onearm-test +spec: + containers: + - name: sctp-onearm-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml b/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml new file mode 100644 index 000000000..9f57a1998 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml @@ -0,0 +1,45 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.250 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 10.244.0.0/16 + service_cidr: 10.245.0.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + version: 202401.31.0 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.29.2 + os: xUbuntu_22.04 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..3303ac35e --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 57002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml new file mode 100644 index 000000000..87d317015 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" + loxilb.io/zoneselect: "az1" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 56002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-onearm-test + labels: + what: tcp-onearm-test +spec: + containers: + - name: tcp-onearm-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml new file mode 100644 index 000000000..67b729019 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-fullnat-test + ports: + - port: 57003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-fullnat-test + labels: + what: udp-fullnat-test +spec: + containers: + - name: udp-fullnat-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-nat64/kube-loxilb.yaml b/cicd/k8s-nat64/kube-loxilb.yaml index 6a1beee6d..9c1bb0e0a 100644 --- a/cicd/k8s-nat64/kube-loxilb.yaml +++ b/cicd/k8s-nat64/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/microk8s-incluster/kube-loxilb.yml b/cicd/microk8s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/microk8s-incluster/kube-loxilb.yml +++ b/cicd/microk8s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get From 0b3e78743773b0c8cc21f392dc7b2ae6574f89af Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Mon, 23 Sep 2024 11:38:09 +0900 Subject: [PATCH 12/34] gh-87: CICD added - Client-to-LoxiLB IPsec with HA in K8s --- cicd/k8s-calico-ipsec-ha/README | 19 + cicd/k8s-calico-ipsec-ha/Vagrantfile | 95 ++++ .../k8s-calico-ipsec-ha/bird_config/bird.conf | 254 +++++++++++ cicd/k8s-calico-ipsec-ha/config.sh | 63 +++ cicd/k8s-calico-ipsec-ha/configs/config | 19 + cicd/k8s-calico-ipsec-ha/configs/join.sh | 1 + .../host_ipsec_config/charon.conf | 408 ++++++++++++++++++ .../host_ipsec_config/ipsec.conf | 129 ++++++ .../host_ipsec_config/ipsec.secrets | 4 + cicd/k8s-calico-ipsec-ha/host_validation.sh | 28 ++ cicd/k8s-calico-ipsec-ha/host_validation2.sh | 49 +++ .../host_validation2_with_sctp.sh | 81 ++++ .../host_validation_with_sctp.sh | 37 ++ .../llb1_ipsec_config/charon.conf | 376 ++++++++++++++++ .../llb1_ipsec_config/ipsec.conf | 77 ++++ .../llb1_ipsec_config/ipsec.secrets | 3 + .../llb2_ipsec_config/charon.conf | 376 ++++++++++++++++ .../llb2_ipsec_config/ipsec.conf | 82 ++++ .../llb2_ipsec_config/ipsec.secrets | 3 + .../node_scripts/common.sh | 93 ++++ cicd/k8s-calico-ipsec-ha/node_scripts/host.sh | 29 ++ .../node_scripts/loxilb1.sh | 23 + .../node_scripts/loxilb2.sh | 26 ++ .../node_scripts/master.sh | 60 +++ .../node_scripts/worker.sh | 18 + cicd/k8s-calico-ipsec-ha/rmconfig.sh | 7 + cicd/k8s-calico-ipsec-ha/validation.sh | 121 ++++++ .../validation_with_sctp.sh | 121 ++++++ cicd/k8s-calico-ipsec-ha/yaml/kube-loxilb.yml | 135 ++++++ .../yaml/kubeadm-config.yaml | 69 +++ cicd/k8s-calico-ipsec-ha/yaml/loxilb-peer.yml | 75 ++++ .../k8s-calico-ipsec-ha/yaml/sctp_default.yml | 35 ++ .../k8s-calico-ipsec-ha/yaml/sctp_fullnat.yml | 35 ++ cicd/k8s-calico-ipsec-ha/yaml/settings.yaml | 45 ++ cicd/k8s-calico-ipsec-ha/yaml/tcp_default.yml | 33 ++ cicd/k8s-calico-ipsec-ha/yaml/tcp_fullnat.yml | 33 ++ cicd/k8s-calico-ipsec-ha/yaml/udp_fullnat.yml | 30 ++ 37 files changed, 3092 insertions(+) create mode 100644 cicd/k8s-calico-ipsec-ha/README create mode 100644 cicd/k8s-calico-ipsec-ha/Vagrantfile create mode 100644 cicd/k8s-calico-ipsec-ha/bird_config/bird.conf create mode 100755 cicd/k8s-calico-ipsec-ha/config.sh create mode 100644 cicd/k8s-calico-ipsec-ha/configs/config create mode 100755 cicd/k8s-calico-ipsec-ha/configs/join.sh create mode 100644 cicd/k8s-calico-ipsec-ha/host_ipsec_config/charon.conf create mode 100644 cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.conf create mode 100644 cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.secrets create mode 100755 cicd/k8s-calico-ipsec-ha/host_validation.sh create mode 100755 cicd/k8s-calico-ipsec-ha/host_validation2.sh create mode 100755 cicd/k8s-calico-ipsec-ha/host_validation2_with_sctp.sh create mode 100755 cicd/k8s-calico-ipsec-ha/host_validation_with_sctp.sh create mode 100644 cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/charon.conf create mode 100644 cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.conf create mode 100644 cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.secrets create mode 100644 cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/charon.conf create mode 100644 cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.conf create mode 100644 cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.secrets create mode 100644 cicd/k8s-calico-ipsec-ha/node_scripts/common.sh create mode 100755 cicd/k8s-calico-ipsec-ha/node_scripts/host.sh create mode 100644 cicd/k8s-calico-ipsec-ha/node_scripts/loxilb1.sh create mode 100644 cicd/k8s-calico-ipsec-ha/node_scripts/loxilb2.sh create mode 100644 cicd/k8s-calico-ipsec-ha/node_scripts/master.sh create mode 100644 cicd/k8s-calico-ipsec-ha/node_scripts/worker.sh create mode 100755 cicd/k8s-calico-ipsec-ha/rmconfig.sh create mode 100755 cicd/k8s-calico-ipsec-ha/validation.sh create mode 100755 cicd/k8s-calico-ipsec-ha/validation_with_sctp.sh create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/kube-loxilb.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/loxilb-peer.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/sctp_default.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/sctp_fullnat.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/settings.yaml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/tcp_default.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/tcp_fullnat.yml create mode 100644 cicd/k8s-calico-ipsec-ha/yaml/udp_fullnat.yml diff --git a/cicd/k8s-calico-ipsec-ha/README b/cicd/k8s-calico-ipsec-ha/README new file mode 100644 index 000000000..2edcbf3e1 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/README @@ -0,0 +1,19 @@ +## Test Case Description + +This scenario will demonstrate LoxiLB with ipsec in HA mode(clustering). The setup will have 2 LoxiLB nodes, K8s(1 Master Nodes & 2 Worker Nodes) cluster with Calico CNI in ipvs mode. LoxiLB will be running as external Service LB. Workloads will be spawned in all the cluster nodes. + +Client will be connected to the LoxiLB with L3 network over IPSec tunnels. Client and LoxiLB will do eBGP peering over IPSec tunnels where Cluster nodes and LoxiLB will do iBGP. LoxiLB will advertise the Service CIDR or VirtualIP to the client and cluster nodes. + +Service CIDR will also be a Virtual IP, different from the K8s cluster network. + +In scenarios where LoxiLB runs outside of the cluster in HA mode, it is advised to create LB services in fullnat mode for ease of connectivity. + +Please follow the link for detailed explanation about similar scenario(except ipsec): https://www.loxilb.io/post/k8s-deploying-hitless-and-ha-load-balancing + +If you wish to create this scenario in your lab then install Vagrant and follow the steps below: + +1. Run ./config.sh to setup the K8s cluster, client and LoxiLB nodes + +2. Run ./validation.sh to run the TCP HA test or ./validation_with_sctp.sh to run TCP & SCTP HA Test. Test Results will be displayed at the end. + +3. Run ./rmconfig.sh to cleanup the setup. diff --git a/cicd/k8s-calico-ipsec-ha/Vagrantfile b/cicd/k8s-calico-ipsec-ha/Vagrantfile new file mode 100644 index 000000000..f2a5f620a --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/Vagrantfile @@ -0,0 +1,95 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.box = settings["software"]["cluster"]["box"] + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "node_scripts/host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + (1..loxilbs).each do |node_number| + config.vm.define "llb#{node_number}" do |loxilb| + loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"] + loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"] + loxilb.vm.hostname = "llb#{node_number}" + ip = node_number + 251 + loxilb.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + loxilb.vm.network :private_network, ip: "192.168.90.#{ip}", :netmask => "255.255.255.0" + loxilb.vm.provision :shell, :path => "node_scripts/loxilb#{node_number}.sh" + loxilb.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end + + config.vm.define "master" do |master| + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 200 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end +end diff --git a/cicd/k8s-calico-ipsec-ha/bird_config/bird.conf b/cicd/k8s-calico-ipsec-ha/bird_config/bird.conf new file mode 100644 index 000000000..c4e7ef827 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/bird_config/bird.conf @@ -0,0 +1,254 @@ +# This is a basic configuration file, which contains boilerplate options and +# some basic examples. It allows the BIRD daemon to start but will not cause +# anything else to happen. +# +# Please refer to the BIRD User's Guide documentation, which is also available +# online at http://bird.network.cz/ in HTML format, for more information on +# configuring BIRD and adding routing protocols. + +# Configure logging +#log syslog all; +log "/var/log/bird.log" { debug, trace, info, remote, warning, error, auth, fatal, bug }; + +# Set router ID. It is a unique identification of your router, usually one of +# IPv4 addresses of the router. It is recommended to configure it explicitly. +router id 192.168.90.9; + +# Turn on global debugging of all protocols (all messages or just selected classes) +# debug protocols all; +# debug protocols { events, states }; + +# Turn on internal watchdog +# watchdog warning 5 s; +# watchdog timeout 30 s; + +# You can define your own constants +# define my_asn = 65000; +# define my_addr = 198.51.100.1; + +# Tables master4 and master6 are defined by default +# ipv4 table master4; +# ipv6 table master6; + +# Define more tables, e.g. for policy routing or as MRIB +# ipv4 table mrib4; +# ipv6 table mrib6; + +# The Device protocol is not a real routing protocol. It does not generate any +# routes and it only serves as a module for getting information about network +# interfaces from the kernel. It is necessary in almost any configuration. +protocol device { +} + +# The direct protocol is not a real routing protocol. It automatically generates +# direct routes to all network interfaces. Can exist in as many instances as you +# wish if you want to populate multiple routing tables with direct routes. +protocol direct { + #disabled; # Disable by default + ipv4; # Connect to default IPv4 table + #ipv6; # ... and to default IPv6 table +} + +# The Kernel protocol is not a real routing protocol. Instead of communicating +# with other routers in the network, it performs synchronization of BIRD +# routing tables with the OS kernel. One instance per table. +protocol kernel { + ipv4 { # Connect protocol to IPv4 table by channel +# table master4; # Default IPv4 table is master4 +# import all; # Import to table, default is import all + export all; # Export to protocol. default is export none + }; +# learn; # Learn alien routes from the kernel +# kernel table 10; # Kernel table to synchronize with (default: main) + merge paths on; +} + +# Another instance for IPv6, skipping default options +protocol kernel { + ipv6 { export all; }; +} + +# Static routes (Again, there can be multiple instances, for different address +# families and to disable/enable various groups of static routes on the fly). +protocol static { + ipv4; # Again, IPv4 channel with default options + +# route 0.0.0.0/0 via 198.51.100.10; +# route 192.0.2.0/24 blackhole; +# route 10.0.0.0/8 unreachable; +# route 10.2.0.0/24 via "eth0"; +# # Static routes can be defined with optional attributes +# route 10.1.1.0/24 via 198.51.100.3 { rip_metric = 3; }; +# route 10.1.2.0/24 via 198.51.100.3 { ospf_metric1 = 100; }; +# route 10.1.3.0/24 via 198.51.100.4 { ospf_metric2 = 100; }; +} + +# Pipe protocol connects two routing tables. Beware of loops. +# protocol pipe { +# table master4; # No ipv4/ipv6 channel definition like in other protocols +# peer table mrib4; +# import all; # Direction peer table -> table +# export all; # Direction table -> peer table +# } + +# RIP example, both RIP and RIPng are supported +# protocol rip { +# ipv4 { +# # Export direct, static routes and ones from RIP itself +# import all; +# export where source ~ [ RTS_DEVICE, RTS_STATIC, RTS_RIP ]; +# }; +# interface "eth*" { +# update time 10; # Default period is 30 +# timeout time 60; # Default timeout is 180 +# authentication cryptographic; # No authentication by default +# password "hello" { algorithm hmac sha256; }; # Default is MD5 +# }; +# } + +# OSPF example, both OSPFv2 and OSPFv3 are supported +# protocol ospf v3 { +# ipv6 { +# import all; +# export where source = RTS_STATIC; +# }; +# area 0 { +# interface "eth*" { +# type broadcast; # Detected by default +# cost 10; # Interface metric +# hello 5; # Default hello perid 10 is too long +# }; +# interface "tun*" { +# type ptp; # PtP mode, avoids DR selection +# cost 100; # Interface metric +# hello 5; # Default hello perid 10 is too long +# }; +# interface "dummy0" { +# stub; # Stub interface, just propagate it +# }; +# }; +#} + +# Define simple filter as an example for BGP import filter +# See https://gitlab.labs.nic.cz/labs/bird/wikis/BGP_filtering for more examples +# filter rt_import +# { +# if bgp_path.first != 64496 then accept; +# if bgp_path.len > 64 then accept; +# if bgp_next_hop != from then accept; +# reject; +# } + +# BGP example, explicit name 'uplink1' is used instead of default 'bgp1' +# protocol bgp uplink1 { +# description "My BGP uplink"; +# local 198.51.100.1 as 65000; +# neighbor 198.51.100.10 as 64496; +# hold time 90; # Default is 240 +# password "secret"; # Password used for MD5 authentication +# +# ipv4 { # regular IPv4 unicast (1/1) +# import filter rt_import; +# export where source ~ [ RTS_STATIC, RTS_BGP ]; +# }; +# +# ipv6 { # regular IPv6 unicast (2/1) +# import filter rt_import; +# export filter { # The same as 'where' expression above +# if source ~ [ RTS_STATIC, RTS_BGP ] +# then accept; +# else reject; +# }; +# }; +# +# ipv4 multicast { # IPv4 multicast topology (1/2) +# table mrib4; # explicit IPv4 table +# import filter rt_import; +# export all; +# }; +# +# ipv6 multicast { # IPv6 multicast topology (2/2) +# table mrib6; # explicit IPv6 table +# import filter rt_import; +# export all; +# }; +#} + +# Template example. Using templates to define IBGP route reflector clients. +# template bgp rr_clients { +# local 10.0.0.1 as 65000; +# neighbor as 65000; +# rr client; +# rr cluster id 1.0.0.1; +# +# ipv4 { +# import all; +# export where source = RTS_BGP; +# }; +# +# ipv6 { +# import all; +# export where source = RTS_BGP; +# }; +# } +# +# protocol bgp client1 from rr_clients { +# neighbor 10.0.1.1; +# } +# +# protocol bgp client2 from rr_clients { +# neighbor 10.0.2.1; +# } +# +# protocol bgp client3 from rr_clients { +# neighbor 10.0.3.1; +# } +# + +protocol static my_routes1 { + ipv4; + route 30.30.30.0/24 via 77.77.100.1; +} + +protocol static my_routes2 { + ipv4; + route 30.30.30.0/24 via 77.77.101.1; +} + +filter export_my_routes1 { + if proto = "my_routes1" then { + accept; + } + reject; +} + +filter export_my_routes2 { + if proto = "my_routes2" then { + accept; + } + reject; +} + + +protocol bgp llb1 { + local as 64512; + #neighbor 192.168.90.252 as 64511; + neighbor 77.77.100.254 as 64511; + + ipv4 { + import all; + export filter export_my_routes1; + }; +} + +protocol bgp llb2 { + local as 64512; + #neighbor 192.168.90.253 as 64511; + neighbor 77.77.101.254 as 64511; + + ipv4 { + import all; + export filter export_my_routes1; + }; +} + diff --git a/cicd/k8s-calico-ipsec-ha/config.sh b/cicd/k8s-calico-ipsec-ha/config.sh new file mode 100755 index 000000000..be48bd3bc --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/config.sh @@ -0,0 +1,63 @@ +#!/bin/bash +#VMs=$(vagrant global-status | grep -i virtualbox) +#while IFS= read -a VMs; do +# read -a vm <<< "$VMs" +# cd ${vm[4]} 2>&1>/dev/null +# echo "Destroying ${vm[1]}" +# vagrant destroy -f ${vm[1]} +# cd - 2>&1>/dev/null +#done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_default.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_default.yml' 2> /dev/null +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done +if [[ $fin == 0 ]]; then + echo "Cluster is not ready" + exit 1 +fi +echo "Cluster is ready" diff --git a/cicd/k8s-calico-ipsec-ha/configs/config b/cicd/k8s-calico-ipsec-ha/configs/config new file mode 100644 index 000000000..c92d9659b --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/configs/config @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJU3hkSVAzQkFxYjh3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBNU1qRXdPREE1TXpGYUZ3MHpOREE1TVRrd09ERTBNekZhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURGVkRob2k4ZmNHcHVXQ0ZDK1QveDVhT2pHZkZmZ2h5TWt1WTM3QWY1dDBWMitxYklPY09OWnZLeWYKTm8wNUJWZUU3UjNHVEs5TkVMRFI3Y05WclNIdldXQ3k0Qnhmb3hyNjNIRS9HQzMzTU1OczFaTmlYZk41SmRTNwp2QXdCbkJ1SFhqeHpUVkd1VnJjMWpmZWoyM1dvYkh1UHVYbUtsaG4wYm93Q3oyR3JjSURwTGNlUFljTXh0ZXdkCm5ITXJ5SDkyQWVMNVNzVFdlWDYwazlDV1VUREJoM3hKSTB0NVFCRWx6elNQb0FJcUlWWkhrVXZkUm9QWjQ2Q3EKTllCZEpoYU1MdlJVRDVhU1pRMjRPSjhnblp3WW1LK29oejV2VlRvamwvUGFzMlcybStQOXRIM216dXhjK0xEaApEUFFIVXJaQlZTMCtyUzFneDNDcHVLdWFmREtmQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSc0NRTlJPR2xabkdxeXB0OU94Q3haQm5KRkRUQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVJUYjllWlJmWgpsMWFmRExtTEZzbTdnM1lQU3R6bnIyVi9RS3VXNTVUa2pvaWlOUC9kMzY5Y2U4QlJWTHBXV1VOdURRRUhWS21aCjhLSnJxbXZOMzBFaDVqM05OV1MxR1liamlTTmdnNXFiTHFUL3FWb3Z1SzAvL0NSRVEyekNtT0JIaGM5VkhJdFgKS1lmZk1TWnl3czI0WGNKYno0aEdEazFpbk9tRS9xWnFQeEJFTXNpMmZmSFBXTnRoTGh5RitvTkhucU5hN3F6dwpkRmE1RlgyTVpjN25xVUQ4TW56VnJuV2ZRRTBmT3VDVEVDZUJnSjFHYU1pK1p5SmVGU2lpc1NDbjg2enAwR0lICnlrZjhJckU1UStQV1Q0dTNxbndobjN3QzNBQ3ZkRFF0cWpIYzF1SVpIZXlXdE9TQWtQM1UyZWZPSDM1c2tKalYKcWo4MFUzTjBlcEVkCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://192.168.80.250:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLRENDQWhDZ0F3SUJBZ0lIRUF3aCtWNUZ6ekFOQmdrcWhraUc5dzBCQVFzRkFEQVZNUk13RVFZRFZRUUQKRXdwcmRXSmxjbTVsZEdWek1CNFhEVEkwTURreU1UQTRNRGt6TVZvWERUSTFNRGt5TVRBNE1UUXpORm93UERFZgpNQjBHQTFVRUNoTVdhM1ZpWldGa2JUcGpiSFZ6ZEdWeUxXRmtiV2x1Y3pFWk1CY0dBMVVFQXhNUWEzVmlaWEp1ClpYUmxjeTFoWkcxcGJqQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxhbFhzMTAKbzc4WGQyT3dWTGdITHBVSTlMR2piNnhvN3ByVDRoV25kc01BRmgvUVNYTXRaYnZJRmZPeU9ERGtWTWFhYTBXagpVU3BXalo0b2luZHVGTE5CdElRaEdZUXQzTGRYbG1jeGJpZjFQKzkwRFkxOWtZbXllU05BL0lEVHRyNHV6U0FLCmt4VjVZcFM4eFk2U1pFWExkYzlEUDNHU2tBZGhaWlRFekpOZUIyckVpMzlITzJ1R2NZVDYzaktTM09TNE4vTksKSHh2aVRmRVRPWlNKbmEvZkgvWThpWUkwVlN0blN1bTVtY284ZnA5bFhaN0lRMW1NWGtoQ1NjWFNhcldpdFM2VQpRNFI2LzlUUldBd1h5cHRuOWVYWHNZS3pkb1VrMVlhdEJWMTE4MDdIeVpqU0diOFVSOWxWUjlURER1Q3k0WXZDCjg2YmIwbFEzb2xXZEJmY0NBd0VBQWFOV01GUXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUdBMVVkSlFRTU1Bb0cKQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVYkFrRFVUaHBXWnhxc3FiZgpUc1FzV1FaeVJRMHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQWpjcFllUnRQU01lVk9YL3BRT2RweEY3NXVICktZbnpqU1l5VzgrSE4zdG93SGszbE1MVFJZYURVdzdLU2N1OTlSVHJxTTdaYmdLcDFMRjhWNUtkZmIzeklMaDkKTytWdEdlRGZZbWxIc0FaMS9XNWpUdkJHY3dGYTR1U1l4SW9BRzYrMkQxQ2tUM3hVc2h4QXowaXA4M2xLUWpCWQpDRTF6aVYyYUI3QXZjbXJZblRTSW9KWVFQdW5GeWFWV3Q0aEppYmVYZ0pMOFVaY2ZmeUx5RHhCOENubjdwcWdrCk1Gb004cm9mWkxxR1JMd29kVGhwQWozVGxhYW02NUo3MGh3Zys5N09JVHB2d1FkblhHRmkvWlQzbElFUTlEL0QKYmJ6N3g5Q0hKUVpBallWUURYNy8raGluTnMzMHhnMnloOVFiOGhzeWZSNGNVVXA0dFgzbFhDYkVWK1k9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHFWZXpYU2p2eGQzWTdCVXVBY3VsUWowc2FOdnJHanVtdFBpRmFkMnd3QVdIOUJKCmN5MWx1OGdWODdJNE1PUlV4cHByUmFOUktsYU5uaWlLZDI0VXMwRzBoQ0VaaEMzY3QxZVdaekZ1Si9VLzczUU4KalgyUmliSjVJMEQ4Z05PMnZpN05JQXFURlhsaWxMekZqcEprUmN0MXowTS9jWktRQjJGbGxNVE1rMTRIYXNTTApmMGM3YTRaeGhQcmVNcExjNUxnMzgwb2ZHK0pOOFJNNWxJbWRyOThmOWp5SmdqUlZLMmRLNmJtWnlqeCtuMlZkCm5zaERXWXhlU0VKSnhkSnF0YUsxTHBSRGhIci8xTkZZREJmS20yZjE1ZGV4Z3JOMmhTVFZocTBGWFhYelRzZkoKbU5JWnZ4UkgyVlZIMU1NTzRMTGhpOEx6cHR2U1ZEZWlWWjBGOXdJREFRQUJBb0lCQURlNHQvV0NjanBvVkZvcgpmNDB0VTc3UUhxYmJMOFN2dHF1eG5MWHlnejcybGNsYTZDMmxXZll5RXRCb1BOQmR1S2Rta1BlTDNjVkc1cW1yCndLS3pDTW12Yi8wbmcvdFphbzdjdjl2M3JwMnVLclZJK0tHRC81Uzh5RXhERlVYUVVWNHdOUkJqYVlBUnFrdXEKVTdNdFlqNFExbjVIQytJQVRzU1JxRVg4ekZUZ3hPV2Z2eE0zeGE2MEJybFY0UlZxWDBONGtRNVE0R2pRSDg3dApUbG9YWWZIdFU4OWlyemJJMVdmQWhLQjkyakROWnFhUWNKbmJac0phd0hyc2tZU1pxT0xWWG4zK3dDdTFHVitYClFGSkVSU1FNdjZwUGtzaE4xMmp2eFdPblkxdWhWR2xTcTRQOHY4czI1cTNmSHZ2MWdDTEpOYXFTbmV2VUNnMEIKZm9wblZuRUNnWUVBMEN4SGx1cWRSRjJIalZ5WUNzT0ViT0RHZjF6RSt1cUsvemRiWndwWnFmQVhmM1owT1gxdAptNHpYSHNpdDBCQVVkb2FqcEhyQzMwMHM0TG5YUE9jWUNBaUlLSFo4MFBwTmNadzRjd0Z3ZWgyYnZRVzhTWHZsCitkcUk1NTBETVh3b0daU25EaWZ2dk11UzBnaGlwVkI0M3BkeWxndTRqL200aHRRRU9HYUxxVzBDZ1lFQTRKdTMKcWpRalFkS3hKMVc2d2VBLzAzcEhqUjdVcE5tR2hTZG9ONldLS3VRdGVObHR1d09zcmZ4cWsxQ3Jram9ZK3pkcgp0N3A5NUlaemVyZlA5dWpHM092REwxWUdHK0NqaXJySFdTQ2VEVkYvZERod0RTVi9rUU83L0duL3g1blprTU10CjI3SythYWN2bUFTbStwRlg4RkNCcW1FZzVHYURLL0JSTFAwblVuTUNnWUJNRE1FaWl4ZGxFRjRpOEg3Qy8rWnkKK0RMTCtKSWxzR1dURXlBYkpwYlhGRlVoUlo3MEdiUmZMVGF4a2xLZFBpS2JvbGhLRUdiOHVPNzZNaXBGbjQ4Ugo4RHY5dkR1aEJMQWlIeFlvUFpCZnJFMW00cCtFb1BURk1HYnZabGJ5VHc4L09TVkdjS1NPYmpoMjdvVkxLM2pjCnFLczkwWTVkV3JkazJqT29meDNxaVFLQmdRQ2k3ckxrS0NBdlhweWVFU1dDQUorc1ZEYlZXemtjUUtQQnNkMTcKQ0gzYk1Wei9IcExvSi9rNjJUR2luZEZvS1BiNFBiTEpPK0taUlNNK01Wc09ITHd0aEdVNTBHaHNEcC8wUWh2YwpQcm9JcFVjVTB1QU4yVmJacG1EVzhUblgzSFlqK2tJbzdsZ01vbmYzQ2VEclFVWU9rUWdPeFppcnR1V09tU3o1CnVzYXNtd0tCZ0RySFc4bS9RM2RDUlEvYkpFVjVSNG9XY3JZTDFWUzQ1T2pXU1FhSXZtNEV2K3IxL3EwR0FobWkKcWUvM21sazdCMURHdWhOZERDRjJ1QThYMUd3Y2hSSzd1MCtIUytsSytWNkhHZU0vbWcrLzB2Zk5YbDJBMGRaMwpXNE1iSVp1bWhmaExCckVZWFZOR0RGMThwK3RUSDJSY0NvNDRkRkVHR2hucW4wUXFLdmNVCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-ipsec-ha/configs/join.sh b/cicd/k8s-calico-ipsec-ha/configs/join.sh new file mode 100755 index 000000000..e9b891f48 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/configs/join.sh @@ -0,0 +1 @@ +kubeadm join 192.168.80.250:6443 --token q7uek8.tofg1q4ikqrr9dex --discovery-token-ca-cert-hash sha256:637dfc7589f4acea45a58d14ce25f7d3d538873f60b6f6ba859b44b0ddb90ccf diff --git a/cicd/k8s-calico-ipsec-ha/host_ipsec_config/charon.conf b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/charon.conf new file mode 100644 index 000000000..5b55fbee6 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/charon.conf @@ -0,0 +1,408 @@ +# Options for the charon IKE daemon. +charon { + + # Deliberately violate the IKE standard's requirement and allow the use of + # private algorithm identifiers, even if the peer implementation is unknown. + # accept_private_algs = no + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Whether to use DPD to check if the current path still works after any + # changes to interfaces/addresses. + # check_current_path = no + + # Send the Cisco FlexVPN vendor ID payload (IKEv2 only). + # cisco_flexvpn = no + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Violate RFC 5998 and use EAP-only authentication even if the peer did not + # send an EAP_ONLY_AUTHENTICATION notify during IKE_AUTH. + # force_eap_only_authentication = no + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + # install_virtual_ip = yes + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Number of seconds the keep alive interval may be exceeded before a DPD is + # sent instead of a NAT keep alive (0 to disable). This is only useful if a + # clock is used that includes time spent suspended (e.g. CLOCK_BOOTTIME). + # keep_alive_dpd_margin = 0s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange groups. + # ke_group = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # Whether to include CAs in a server's CertificateRequest message. + # send_certreq_authorities = yes + + # List of TLS signature schemes. + # signature = + + # List of TLS cipher suites. + # suites = + + # Maximum TLS version to negotiate. + # version_max = 1.2 + + # Minimum TLS version to negotiate. + # version_min = 1.2 + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.conf b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.conf new file mode 100644 index 000000000..e4bfeba63 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.conf @@ -0,0 +1,129 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + + +conn host-to-llb1 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=30.30.30.1,77.77.100.1 + rightsubnet=20.20.20.1,77.77.100.254 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=192.168.90.9 + right=192.168.90.252 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=100 + auto=start + +conn host-to-llb2 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + #leftsubnet=192.168.10.175 + #rightsubnet=192.168.10.200 + #leftupdown=/etc/strongswan/ipsec-vti.sh + #left=7.7.101.1 + #right=7.7.101.254 + leftsubnet=30.30.30.1,77.77.101.1 + rightsubnet=20.20.20.1,77.77.101.254 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=192.168.90.9 + right=192.168.90.253 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=101 + auto=start diff --git a/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.secrets b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.secrets new file mode 100644 index 000000000..bf7d0f83f --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_ipsec_config/ipsec.secrets @@ -0,0 +1,4 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +192.168.90.9 192.168.90.252 : PSK "loxilb@1234!" +192.168.90.9 192.168.90.253 : PSK "loxilb@1234!" diff --git a/cicd/k8s-calico-ipsec-ha/host_validation.sh b/cicd/k8s-calico-ipsec-ha/host_validation.sh new file mode 100755 index 000000000..efd5aa638 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_validation.sh @@ -0,0 +1,28 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +code=0 + +echo Service IP: $extIP + +numECMP=$(birdc show route | grep $extIP -A 3 | grep via | wc -l) + +birdc show route | grep $extIP -A 3 + +if [ $numECMP == "2" ]; then + echo "Host route [OK]" +else + echo "Host route [NOK]" +fi +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" + +# iperf client accessing fullnat service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56002 -t 100 -i 1 -b 100M -B 30.30.30.1 &> iperff.out & + +# iperf client accessing default service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56003 -t 100 -i 1 -b 100M -B 30.30.30.1 &> iperfd.out & + +echo "iperf client started" +echo "phase-1 done" diff --git a/cicd/k8s-calico-ipsec-ha/host_validation2.sh b/cicd/k8s-calico-ipsec-ha/host_validation2.sh new file mode 100755 index 000000000..3ee6116cd --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_validation2.sh @@ -0,0 +1,49 @@ +#!/bin/bash +fin=0 +for((i=0;i<50;i++)) +do + echo -e "\n --- Host Client status after HA ---\n" + echo -e "\niperf fullnat" + tail -n 1 iperff.out + + echo -e "\niperf default" + tail -n 1 iperfd.out + + ifin1=$(tail -n 5 iperff.out | grep "0.0000-100" | xargs | cut -d ' ' -f 7) + ifin2=$(tail -n 5 iperff.out | grep "0.0000-100" | xargs | cut -d ' ' -f 7) + + if [[ ! -z $ifin1 ]]; then + iperfd_res=1 + echo "iperfdefault) done." + fi + if [[ ! -z $ifin2 ]]; then + iperff_res=1 + echo "iperf(fullnat) done." + fi + + if [[ $iperfd_res == 1 && $iperfd_res == 1 ]]; then + echo "iperf done." + break + fi + sleep 5 +done + + +pkill iperf +echo -e "\n\n**********************************************************\n\n" +if [[ $iperff_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha TCP\t\t(fullnat)\t[OK]" +else + echo -e "K8s-calico-ipsec-ha TCP\t\t(fullnat)\t[FAILED]" + code=1 +fi + +if [[ $iperfd_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha TCP\t\t(default\t[OK]" +else + echo -e "K8s-calico-ipsec-ha TCP\t\t(default)\t[FAILED]" + code=1 +fi +echo -e "\n\n**********************************************************" +echo $code > /vagrant/status.txt +exit $code diff --git a/cicd/k8s-calico-ipsec-ha/host_validation2_with_sctp.sh b/cicd/k8s-calico-ipsec-ha/host_validation2_with_sctp.sh new file mode 100755 index 000000000..e1ed2501d --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_validation2_with_sctp.sh @@ -0,0 +1,81 @@ +#!/bin/bash +fin=0 +for((i=0;i<50;i++)) +do + echo -e "\n --- Host Client status after HA ---\n" + echo -e "\niperf fullnat" + tail -n 1 iperff.out + + echo -e "\niperf default" + tail -n 1 iperfd.out + + ifin1=$(tail -n 5 iperff.out | grep "0.0000-100" | xargs | cut -d ' ' -f 7) + ifin2=$(tail -n 5 iperff.out | grep "0.0000-100" | xargs | cut -d ' ' -f 7) + + if [[ ! -z $ifin1 ]]; then + iperfd_res=1 + echo "iperfdefault) done." + fi + if [[ ! -z $ifin2 ]]; then + iperff_res=1 + echo "iperf(fullnat) done." + fi + + echo -e "\nsctp_test fullnat" + tail -n 30 sdf.out | grep "Client: Sending packets" + + echo -e "\nsctp_test default" + tail -n 30 sdd.out | grep "Client: Sending packets" + + sfin1=`tail -n 100 sdd.out | grep "Client: Sending packets.(70000/70000)"` + sfin2=`tail -n 100 sdf.out | grep "Client: Sending packets.(70000/70000)"` + if [[ ! -z $sfin1 ]]; then + sdd_res=1 + echo "sctp_test(default) done." + fi + if [[ ! -z $sfin2 ]]; then + sdf_res=1 + echo "sctp_test(fullnat) done." + fi + + if [[ $sdd_res == 1 && $sdf_res == 1 && $iperfd_res == 1 && $iperfd_res == 1 ]]; then + echo "iperf and sctp_test done." + break + fi + sleep 5 +done + + +pkill iperf +pkill sctp_test +echo -e "\n\n**********************************************************\n\n" +if [[ $iperff_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha TCP\t\t(fullnat)\t[OK]" +else + echo -e "K8s-calico-ipsec-ha TCP\t\t(fullnat)\t[FAILED]" + code=1 +fi + +if [[ $iperfd_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha TCP\t\t(default\t[OK]" +else + echo -e "K8s-calico-ipsec-ha TCP\t\t(default)\t[FAILED]" + code=1 +fi + +if [[ $sdf_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha SCTP\t(fullnat)\t[OK]" +else + echo -e "K8s-calico-ipsec-ha SCTP\t(fullnat)\t[FAILED]" + code=1 +fi + +if [[ $sdd_res == 1 ]]; then + echo -e "K8s-calico-ipsec-ha SCTP\t(default)\t[OK]" +else + echo -e "K8s-calico-ipsec-ha SCTP\t(default)\t[FAILED]" + code=1 +fi +echo -e "\n\n**********************************************************" +echo $code > /vagrant/status.txt +exit $code diff --git a/cicd/k8s-calico-ipsec-ha/host_validation_with_sctp.sh b/cicd/k8s-calico-ipsec-ha/host_validation_with_sctp.sh new file mode 100755 index 000000000..9994a5b3f --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/host_validation_with_sctp.sh @@ -0,0 +1,37 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +code=0 + +echo Service IP: $extIP + +numECMP=$(birdc show route | grep $extIP -A 3 | grep via | wc -l) + +birdc show route | grep $extIP -A 3 + +if [ $numECMP == "2" ]; then + echo "Host route [OK]" +else + echo "Host route [NOK]" +fi +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" + +# iperf client accessing fullnat service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56002 -t 100 -i 1 -b 100M -B 30.30.30.1 &> iperff.out & + +# iperf client accessing default service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56003 -t 100 -i 1 -b 100M -B 30.30.30.1 &> iperfd.out & + +echo "iperf client started" + +sleep 1 + +stdbuf -oL nohup sctp_test -H 30.30.30.1 -h 20.20.20.1 -p 56004 -s -m 1400 -x 70000 &> sdf.out & +stdbuf -oL nohup sctp_test -H 30.30.30.1 -h 20.20.20.1 -p 56005 -s -m 1400 -x 70000 &> sdd.out & + +echo "sctp_test client started" + +echo "phase-1 done" +exit 0 diff --git a/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/charon.conf b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/charon.conf new file mode 100644 index 000000000..926ae24aa --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.conf b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.conf new file mode 100644 index 000000000..f9973bb19 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.conf @@ -0,0 +1,77 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + +conn llb1-to-host + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=20.20.20.1,77.77.100.254 + rightsubnet=30.30.30.1,77.77.100.1 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=192.168.90.252 + right=192.168.90.9 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=100 + auto=start diff --git a/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.secrets b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.secrets new file mode 100644 index 000000000..5cec0326f --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb1_ipsec_config/ipsec.secrets @@ -0,0 +1,3 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +192.168.90.252 192.168.90.9 : PSK "loxilb@1234!" diff --git a/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/charon.conf b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/charon.conf new file mode 100644 index 000000000..926ae24aa --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.conf b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.conf new file mode 100644 index 000000000..98b7eb4c6 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.conf @@ -0,0 +1,82 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + +conn llb2-to-host` + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + #leftsubnet=192.168.10.200 + #rightsubnet=192.168.10.175 + #leftupdown=/etc/strongswan/ipsec-vti.sh + #left=7.7.101.254 + #right=7.7.101.1 + leftsubnet=20.20.20.1,77.77.101.254 + rightsubnet=30.30.30.1,77.77.101.1 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=192.168.90.253 + right=192.168.90.9 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=101 + auto=start diff --git a/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.secrets b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.secrets new file mode 100644 index 000000000..a13a6efd2 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/llb2_ipsec_config/ipsec.secrets @@ -0,0 +1,3 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +192.168.90.253 192.168.90.9 : PSK "loxilb@1234!" diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh new file mode 100644 index 000000000..c01ad688f --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" +CRIO_VERSION=1.27 +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + +sudo apt-get update -y +sudo apt-get install -y kubelet kubectl kubeadm +sudo apt-get update -y +sudo apt-get install -y jq +sudo apt-get install -y ipvsadm + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/host.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/host.sh new file mode 100755 index 000000000..deaaf1438 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/host.sh @@ -0,0 +1,29 @@ +# Install Bird to work with k3s +sudo apt-get update +sudo apt-get -y install bird2 lksctp-tools iperf +sudo apt-get install -y iputils-ping curl vim iptables strongswan strongswan-swanctl + +sudo ip addr add 30.30.30.1/32 dev lo + +sudo ip link add vti100 type vti key 100 remote 192.168.90.252 local 192.168.90.9 +sudo ip link set vti100 up +sudo ip addr add 77.77.100.1/24 remote 77.77.100.254/24 dev vti100 +sudo sysctl -w "net.ipv4.conf.vti100.disable_policy=1" + +sudo ip link add vti101 type vti key 101 remote 192.168.90.253 local 192.168.90.9 +sudo ip link set vti101 up +sudo ip addr add 77.77.101.1/24 remote 77.77.101.254/24 dev vti101 +sudo sysctl -w "net.ipv4.conf.vti101.disable_policy=1" + +sudo cp /vagrant/host_ipsec_config/ipsec.conf /etc/ +sudo cp /vagrant/host_ipsec_config/ipsec.secrets /etc/ +sudo cp /vagrant/host_ipsec_config/charon.conf /etc/strongswan.d/ +sudo systemctl restart strongswan-starter + +sudo cp -f /vagrant/bird_config/bird.conf /etc/bird/bird.conf +if [ ! -f /var/log/bird.log ]; then + sudo touch /var/log/bird.log +fi +sudo chown bird:bird /var/log/bird.log +sudo service bird restart +echo "Host is up" diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb1.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb1.sh new file mode 100644 index 000000000..d76762c31 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb1.sh @@ -0,0 +1,23 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +apt-get install -y iputils-ping curl vim iptables strongswan strongswan-swanctl +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -b --cluster=192.168.80.253 --self=0 +docker cp loxilb:/usr/local/sbin/loxicmd ./ +#docker exec -dt loxilb /root/loxilb-io/loxilb/loxilb -b --cluster=192.168.80.253 --self=0 +dexec="docker exec -dt" +$dexec loxilb ip link add vti100 type vti key 100 remote 192.168.90.9 local 192.168.90.252 +$dexec loxilb ip link set vti100 up +$dexec loxilb ip addr add 77.77.100.254/24 remote 77.77.100.1/24 dev vti100 +$dexec loxilb sysctl -w "net.ipv4.conf.vti100.disable_policy=1" + +sudo cp /vagrant/llb1_ipsec_config/ipsec.conf /etc/ +sudo cp /vagrant/llb1_ipsec_config/ipsec.secrets /etc/ +sudo cp /vagrant/llb1_ipsec_config/charon.conf /etc/strongswan.d/ +sudo systemctl restart strongswan-starter + diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb2.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb2.sh new file mode 100644 index 000000000..d14946669 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/loxilb2.sh @@ -0,0 +1,26 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +apt-get install -y iputils-ping curl vim iptables strongswan strongswan-swanctl +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -b --cluster=192.168.80.252 --self=1 +docker cp loxilb:/usr/local/sbin/loxicmd ./ +#docker exec -dt loxilb /root/loxilb-io/loxilb/loxilb -b --cluster=192.168.80.252 --self=1 + +dexec="docker exec -dt" +$dexec loxilb ip link add vti101 type vti key 101 remote 192.168.90.9 local 192.168.90.253 +$dexec loxilb ip link set vti101 up +$dexec loxilb ip addr add 77.77.101.254/24 remote 77.77.101.1/24 dev vti101 +$dexec loxilb sysctl -w "net.ipv4.conf.vti101.disable_policy=1" + + +sudo cp /vagrant/llb2_ipsec_config/ipsec.conf /etc/ +sudo cp /vagrant/llb2_ipsec_config/ipsec.secrets /etc/ +sudo cp /vagrant/llb2_ipsec_config/charon.conf /etc/strongswan.d/ +sudo systemctl restart strongswan-starter + + diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh new file mode 100644 index 000000000..a5cdecaac --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +# Install Calico Network Plugin + +curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O + +kubectl apply -f calico.yaml +kubectl patch configmap/calico-config -n kube-system --type merge \ + -p '{"data":{"veth_mtu": "8900"}}' +kubectl rollout restart daemonset calico-node -n kube-system + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Metrics Server + +kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml + +# Install loxilb +kubectl apply -f /vagrant/yaml/kube-loxilb.yml +kubectl apply -f /vagrant/yaml/loxilb-peer.yml diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/worker.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/worker.sh new file mode 100644 index 000000000..a5754170b --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/worker.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +EOF diff --git a/cicd/k8s-calico-ipsec-ha/rmconfig.sh b/cicd/k8s-calico-ipsec-ha/rmconfig.sh new file mode 100755 index 000000000..4c990e5e0 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/rmconfig.sh @@ -0,0 +1,7 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f llb1 +vagrant destroy -f llb2 +vagrant destroy -f host diff --git a/cicd/k8s-calico-ipsec-ha/validation.sh b/cicd/k8s-calico-ipsec-ha/validation.sh new file mode 100755 index 000000000..29d6c289a --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/validation.sh @@ -0,0 +1,121 @@ +#!/bin/bash +source ../common.sh +echo k8s-calico-ipsec-ha + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +for((i=0; i<120; i++)) +do + extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-default") + read -a strarr <<< "$extLB" + len=${#strarr[*]} + if [[ $((len)) -lt 6 ]]; then + echo "Can't find tcp-lb service" + sleep 1 + continue + fi + if [[ ${strarr[3]} != *"none"* ]]; then + extIP="$(cut -d'-' -f2 <<<${strarr[3]})" + break + fi + echo "No external LB allocated" + sleep 1 +done + +## Any routing updates ?? +#sleep 30 + +echo Service IP : $extIP +echo $extIP > extIP +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nLB List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo ./loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo ./loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nEP List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo ./loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo ./loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nTEST RESULTS" +echo "******************************************************************************" + +master="llb1" +backup="llb2" + +state=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/cistate/all' -H 'accept: application/json') + +if [[ $state == *"BACKUP"* ]]; then + master="llb2" + backup="llb1" +fi + +echo -e "\n MASTER\t: $master" +echo -e " BACKUP\t: $backup\n" + +vagrant ssh host -c 'sudo /vagrant/host_validation.sh' 2> /dev/null + +sleep 15 +echo -e "phase-2 begins..\n" + +count=1 +sync=0 +while [[ $count -le 10 ]] ; do +echo -e "\nStatus at MASTER:$master\n" +vagrant ssh $master -c "sudo ./loxicmd get ct | grep est" 2> /dev/null + +echo -e "\nStatus at BACKUP:$backup\n" +vagrant ssh $backup -c "sudo ./loxicmd get ct | grep est" 2> /dev/null + +nres1=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) +nres2=$(curl -sX 'GET' 'http://192.168.80.253:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + +if [[ ! -z $nres1 && $nres1 != 0 && $nres1 == $nres2 ]]; then + echo -e "\nConnections sync successful!!!\n" + sync=1 + break; +fi +echo -e "\nConnections sync pending.. Let's wait a little more..\n" +count=$(( $count + 1 )) +sleep 2 +done + +if [[ $sync == 0 ]]; then + echo -e "\nConnection Sync failed\n" + vagrant ssh host -c 'sudo pkill iperf; sudo pkill sctp_test; sudo rm -rf *.out' + exit 1 +fi + +echo "Restarting MASTER:$master.." +vagrant ssh $master -c 'sudo docker restart loxilb' 2> /dev/null + +sleep 30 + +sudo rm extIP +vagrant ssh host -c 'sudo /vagrant/host_validation2.sh' 2> /dev/null +code=`cat status.txt` +rm status.txt +exit $code diff --git a/cicd/k8s-calico-ipsec-ha/validation_with_sctp.sh b/cicd/k8s-calico-ipsec-ha/validation_with_sctp.sh new file mode 100755 index 000000000..1a539c856 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/validation_with_sctp.sh @@ -0,0 +1,121 @@ +#!/bin/bash +source ../common.sh +echo k8s-calico-ipsec-ha + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +for((i=0; i<120; i++)) +do + extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-default") + read -a strarr <<< "$extLB" + len=${#strarr[*]} + if [[ $((len)) -lt 6 ]]; then + echo "Can't find tcp-lb service" + sleep 1 + continue + fi + if [[ ${strarr[3]} != *"none"* ]]; then + extIP="$(cut -d'-' -f2 <<<${strarr[3]})" + break + fi + echo "No external LB allocated" + sleep 1 +done + +## Any routing updates ?? +#sleep 30 + +echo Service IP : $extIP +echo $extIP > extIP +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nLB List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo ./loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo ./loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nEP List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo ./loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo ./loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nTEST RESULTS" +echo "******************************************************************************" + +master="llb1" +backup="llb2" + +state=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/cistate/all' -H 'accept: application/json') + +if [[ $state == *"BACKUP"* ]]; then + master="llb2" + backup="llb1" +fi + +echo -e "\n MASTER\t: $master" +echo -e " BACKUP\t: $backup\n" + +vagrant ssh host -c 'sudo /vagrant/host_validation_with_sctp.sh' 2> /dev/null + +sleep 15 +echo -e "phase-2 begins..\n" + +count=1 +sync=0 +while [[ $count -le 20 ]] ; do +echo -e "\nStatus at MASTER:$master\n" +vagrant ssh $master -c "sudo ./loxicmd get ct | grep est" 2> /dev/null + +echo -e "\nStatus at BACKUP:$backup\n" +vagrant ssh $backup -c "sudo ./loxicmd get ct | grep est" 2> /dev/null + +nres1=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) +nres2=$(curl -sX 'GET' 'http://192.168.80.253:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + +if [[ ! -z $nres1 && $nres1 != 0 && $nres1 == $nres2 ]]; then + echo -e "\nConnections sync successful!!!\n" + sync=1 + break; +fi +echo -e "\nConnections sync pending.. Let's wait a little more..\n" +count=$(( $count + 1 )) +sleep 2 +done + +if [[ $sync == 0 ]]; then + echo -e "\nConnection Sync failed\n" + vagrant ssh host -c 'sudo pkill iperf; sudo pkill sctp_test; sudo rm -rf *.out' + exit 1 +fi + +echo "Restarting MASTER:$master.." +vagrant ssh $master -c 'sudo docker restart loxilb' 2> /dev/null + +sleep 30 + +sudo rm extIP +vagrant ssh host -c 'sudo /vagrant/host_validation2_with_sctp.sh' 2> /dev/null +code=`cat status.txt` +rm status.txt +exit $code diff --git a/cicd/k8s-calico-ipsec-ha/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipsec-ha/yaml/kube-loxilb.yml new file mode 100644 index 000000000..5cfe9f0e7 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/kube-loxilb.yml @@ -0,0 +1,135 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - namespaces + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 + - --cidrPools=defaultPool=20.20.20.1/32 + #- --monitor + - --setBGP=64511 + - --extBGPPeers=77.77.100.1:64512,77.77.101.1:64512 + - --setRoles=0.0.0.0 + - --listenBGPPort=1791 #Mandatory to mention if running with Calico CNI + #- --monitor + #- --setBGP + - --setLBMode=2 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..245a62553 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml @@ -0,0 +1,69 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.29.2 +networking: + dnsDomain: cluster.local + podSubnet: 172.16.1.0/16 + serviceSubnet: 172.17.1.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +mode: ipvs +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-calico-ipsec-ha/yaml/loxilb-peer.yml b/cicd/k8s-calico-ipsec-ha/yaml/loxilb-peer.yml new file mode 100644 index 000000000..023f04f21 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/loxilb-peer.yml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-peer + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-peer-app + template: + metadata: + name: loxilb-peer + labels: + app: loxilb-peer-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Make sure loxilb gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: "node-role.kubernetes.io/master" +# operator: DoesNotExist +# - key: "node-role.kubernetes.io/control-plane" +# operator: DoesNotExist + containers: + - name: loxilb-peer-app + image: "ghcr.io/loxilb-io/loxilb:latest" + command: [ "/root/loxilb-io/loxilb/loxilb", "--peer" ] + ports: + - containerPort: 11111 + - containerPort: 1791 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-peer-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-peer-app + ports: + - name: loxilb-peer-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-peer-bgp + port: 1791 + targetPort: 1791 + protocol: TCP + - name: loxilb-peer-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP + + diff --git a/cicd/k8s-calico-ipsec-ha/yaml/sctp_default.yml b/cicd/k8s-calico-ipsec-ha/yaml/sctp_default.yml new file mode 100644 index 000000000..324087241 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/sctp_default.yml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-default + annotations: + loxilb.io/lbmode: "default" + loxilb.io/liveness: "yes" + loxilb.io/probetype: "ping" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-default-test + ports: + - port: 56005 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-default-test + labels: + what: sctp-default-test +spec: + containers: + - name: sctp-default-test + image: loxilbio/sctp-darn:latest + imagePullPolicy: Always + #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"] + #command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + command: ["sctp_test","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k8s-calico-ipsec-ha/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipsec-ha/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..ff69e7058 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/sctp_fullnat.yml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/lbmode: "fullnat" + loxilb.io/liveness: "yes" + loxilb.io/probetype: "ping" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-fullnat-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + image: loxilbio/sctp-darn:latest + imagePullPolicy: Always + #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"] + #command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + command: ["sctp_test","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k8s-calico-ipsec-ha/yaml/settings.yaml b/cicd/k8s-calico-ipsec-ha/yaml/settings.yaml new file mode 100644 index 000000000..492519dce --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/settings.yaml @@ -0,0 +1,45 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.250 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 172.16.1.0/16 + service_cidr: 172.17.1.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + version: 202401.31.0 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.29.2 + os: xUbuntu_22.04 diff --git a/cicd/k8s-calico-ipsec-ha/yaml/tcp_default.yml b/cicd/k8s-calico-ipsec-ha/yaml/tcp_default.yml new file mode 100644 index 000000000..2742671bb --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/tcp_default.yml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-default + annotations: + loxilb.io/lbmode: "default" + #loxilb.io/liveness: "yes" + #loxilb.io/probetype: "ping" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-default-test + ports: + - port: 56003 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-default-test + labels: + what: tcp-default-test +spec: + containers: + - name: tcp-default-test + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 diff --git a/cicd/k8s-calico-ipsec-ha/yaml/tcp_fullnat.yml b/cicd/k8s-calico-ipsec-ha/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..0125b69b7 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/tcp_fullnat.yml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" + loxilb.io/probetype: "ping" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 56002 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 diff --git a/cicd/k8s-calico-ipsec-ha/yaml/udp_fullnat.yml b/cicd/k8s-calico-ipsec-ha/yaml/udp_fullnat.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-calico-ipsec-ha/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 From 269ede3078707918d43ca20c24066f474953e6d1 Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Mon, 23 Sep 2024 11:40:51 +0900 Subject: [PATCH 13/34] gh-87: CICD added - Client-to-LoxiLB IPsec with HA in K8s --- .github/workflows/k8s-calico-ipsec-ha.yml | 36 +++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/k8s-calico-ipsec-ha.yml diff --git a/.github/workflows/k8s-calico-ipsec-ha.yml b/.github/workflows/k8s-calico-ipsec-ha.yml new file mode 100644 index 000000000..24d0f9201 --- /dev/null +++ b/.github/workflows/k8s-calico-ipsec-ha.yml @@ -0,0 +1,36 @@ +name: K8s-Calico-Cluster-IPSec-HA-Sanity-CI +on: + schedule: + # Runs "At 19:00 UTC every day-of-week" + - cron: '0 19 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k8s-calico-cluster-ipsec-ha' +jobs: + test-runner: + name: k8s-calico-cluster-ipsec-ha-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/k8s-calico-ipsec-ha + ./config.sh + ./validation_with_sctp.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k8s-calico-ipsec-ha || true + ./rmconfig.sh + cd - From 5e534f32fa568ed595874a38e8fcb216dc2c3ca3 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 23 Sep 2024 16:49:00 +0900 Subject: [PATCH 14/34] gh-87 Fix for k8s cicd failing due to cri-o dependencies --- cicd/k8s-calico-incluster/configs/config | 19 ---- cicd/k8s-calico-incluster/configs/join.sh | 1 - .../node_scripts/common.sh | 81 ++++++++++------- .../node_scripts/master.sh | 7 +- .../yaml/kubeadm-config.yaml | 5 ++ cicd/k8s-calico-ipvs/configs/config | 19 ---- cicd/k8s-calico-ipvs/configs/join.sh | 1 - cicd/k8s-calico-ipvs/node_scripts/common.sh | 81 ++++++++++------- cicd/k8s-calico-ipvs/node_scripts/master.sh | 7 +- cicd/k8s-calico-ipvs/yaml/kubeadm-config.yaml | 5 ++ .../node_scripts/common.sh | 81 ++++++++++------- .../node_scripts/master.sh | 7 +- .../yaml/kubeadm-config.yaml | 5 ++ cicd/k8s-calico-ipvs2/node_scripts/common.sh | 81 ++++++++++------- cicd/k8s-calico-ipvs2/node_scripts/master.sh | 7 +- .../k8s-calico-ipvs2/yaml/kubeadm-config.yaml | 5 ++ .../node_scripts/common.sh | 81 ++++++++++------- .../node_scripts/master.sh | 7 +- .../yaml/kubeadm-config.yaml | 5 ++ cicd/k8s-calico-ipvs3/node_scripts/common.sh | 81 ++++++++++------- cicd/k8s-calico-ipvs3/node_scripts/master.sh | 7 +- .../k8s-calico-ipvs3/yaml/kubeadm-config.yaml | 5 ++ .../node_scripts/common.sh | 90 ++++++++++++------- .../node_scripts/master.sh | 9 +- cicd/k8s-calico/node_scripts/common.sh | 78 ++++++++++------ cicd/k8s-calico/node_scripts/master.sh | 9 +- 26 files changed, 449 insertions(+), 335 deletions(-) delete mode 100644 cicd/k8s-calico-incluster/configs/config delete mode 100755 cicd/k8s-calico-incluster/configs/join.sh delete mode 100644 cicd/k8s-calico-ipvs/configs/config delete mode 100755 cicd/k8s-calico-ipvs/configs/join.sh diff --git a/cicd/k8s-calico-incluster/configs/config b/cicd/k8s-calico-incluster/configs/config deleted file mode 100644 index fc66b8aed..000000000 --- a/cicd/k8s-calico-incluster/configs/config +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1EY3pNREV6TURVek1sb1hEVE16TURjeU56RXpNRFV6TWxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVNyClltV0M3Q0lSYlZFZy92M0FZUk5rRTZnUk5CQ2k3MThCMitHbUllVkQ4c2d5aXoxdWprdDZnbDcwQXhIRDkwSlUKcnVzSnFTc2ZvdDZ3YWJodU5MR3pMdy9ZK0xwZlRNMG5pRmorM3NlVlZiTExQOWxlRUx0Y2R5MnNIWDRQSU5KNApHcmNWM0lETjYrNGZOUWZkT1pjcGtIMjVkMmFKa01sM1YrdTFUbExTK0VSckRhQnNpOTJESXFkb0wxdlhwbm8xCjh6TnpYV2J3M1EyQ1dldWlOaW11eHNIWDM0MlpzRnNJY2FwYWhqa0MxVFZCbkNVOVowSXJSR2pVaW4rbkwvRVcKQUp2SHhCVEVMWkFmd1VkcG10ODBIcGFGVDNZMlcxYW1VWmR2b2w1V0RUaE83T3R4eGpUeTVrSXAwVlhIL1Q2WApRalRLb0RIUERsUWVNc01aQ1BjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFWFMrWUs1ampsOWNSc3hPQW9qNktMWTRkVGpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2h1MFhnZ1d1MkdqZStId3BTQwpSaE5kbUVwWnVPSjcyVlhrUGlsTUxJeHZ6Rys1NG1PbWZPOFNjRTJqaitTVG90VXk2N1hvMklMU1loNHpydFRXCitPcnFYMmJqaC9rZTRhOU5MSTJORjlmc1Z6ZS9YVElRQ0Uwbnlkb21lOC91b0s2b29DRGc1bm5FM1Y0NXFiRE0KdVJ4VGU5dUkvV1J1ZHVuSW95MXNPTHh2My9yZE1DeVZvRkljdm9ZazlES2NBU1F5Z09CRE1uaEc4RHBrZE44Ngo5eW01SDdYMVBvNkZVVCt0TCtKOHlmRFRhc0VXRDhRODRuVmRVckE3TGdtNnZYbmFGeTNCQ3dJUXZGRjNhbTlPCnZ3ZzJ5bzdPZ1RmdU9YUWpjVHZNWmpmNUp4OGlKQXc4WkN1aGkxVlpjQitpWnNDb2I1cUxHdENnbWxNMlNpTmMKaTVnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - server: https://192.168.80.10:6443 - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubernetes-admin - name: kubernetes-admin@kubernetes -current-context: kubernetes-admin@kubernetes -kind: Config -preferences: {} -users: -- name: kubernetes-admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJWVF5Tkszb3lBa2N3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBM016QXhNekExTXpKYUZ3MHlOREEzTWpreE16QTFNelZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXJ2QXR6OHhxd1RBYUxjWk4KRmI4R1A3VlVranlESFRRRy95R3Q5WXVleUNIeUE5RG9pRFF6dkRnSStwSlFqMmx3QXhUVjR5N1k2U1VZM1BiTgpKd01Kd2F3VG1HZUowVmpuWThpbFF4RHAxdk5sM0k0bGc0VVFDanZUb0k0Y2doejM3Wk1yMVB3MmRVeHBwUGkxCjVHSjA0bTVVbUJPZWJrc1dOOWRpWk5FYmYxUWRHaENwZHFyRHAwMWRqNER2MFZFbEhsdDBzT0FmYkdvS2EreDEKTHlwckVvamJWQkE2NGVRRVFRWGJCcXlGZHpweTdPbWJCSG1vVnhVRXBFQ0dFb2JzRVV5eFFoRysxRmxnd01ZYQpzTkRtQnRDcW42SzVoMUMzV20wYzRmdElEM2pwYmUybzhLbVQrekdLYWJCYmJUY1kxZWJrRjBHWHcwcXY2WWNjCmIybEVtd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSRjB2bUN1WTQ1ZlhFYk1UZ0tJK2lpMk9IVQo0ekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBYVk3WllSVTJtT3JvcExGVGRuUjc5Q2ovQlI1UW4rTHU2cVErCkQ5ZVJ2WmxGQjB1TFl6OFNicWpwR1lCTmVvWU55bXlsd1NKZnl0V3p5WVpDQXRhQmNZZUNqbld2elhVQi82YVgKeTduaGJFVWRUUkVRRXZEc2M4eUxOVkx1OThEcjd1OUVIUWxZWm9NM2pIZFF6dFlQNW00M1JHVWxJTW1jN05NZgpsSk1tK1RvTmZkUHo0VlpqNmJjQ3VYbmtGdnZPc0VsUXNMViswZHVHQkpDM2JFZGNmR01najh6Qm1ML3QvWXIzCitMYWNpeFpQeVVCRjdKVzBNOUp0dFpzQ2hXbWZraHBHYm5qRElncXNnK1lzRldvempBMkMxcS9hUyttdUd2YjkKZ2JkVTZvOXA5alZmR0tEbFVDa2JYbDVId01YS09PZ0RQV3pVWFp0UEdTUVJpcjE0Ync9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcnZBdHo4eHF3VEFhTGNaTkZiOEdQN1ZVa2p5REhUUUcveUd0OVl1ZXlDSHlBOURvCmlEUXp2RGdJK3BKUWoybHdBeFRWNHk3WTZTVVkzUGJOSndNSndhd1RtR2VKMFZqblk4aWxReERwMXZObDNJNGwKZzRVUUNqdlRvSTRjZ2h6MzdaTXIxUHcyZFV4cHBQaTE1R0owNG01VW1CT2Via3NXTjlkaVpORWJmMVFkR2hDcApkcXJEcDAxZGo0RHYwVkVsSGx0MHNPQWZiR29LYSt4MUx5cHJFb2piVkJBNjRlUUVRUVhiQnF5RmR6cHk3T21iCkJIbW9WeFVFcEVDR0VvYnNFVXl4UWhHKzFGbGd3TVlhc05EbUJ0Q3FuNks1aDFDM1dtMGM0ZnRJRDNqcGJlMm8KOEttVCt6R0thYkJiYlRjWTFlYmtGMEdYdzBxdjZZY2NiMmxFbXdJREFRQUJBb0lCQUN1M3hoc2FJTXVxczhBZwp3SDdnd0RVSG9kengxbXBqNkNPMlRQMENLV29tWVk3bWxGWUZoYkJSNkp5R0dDL2V6NmxWZWFaT3ZOSjIvT0dyCm85Vk9BeEF0YXJBNW44MTdoRWdCaXB0YURMWTFHWTJtMEdVdnliUmxBeHdxcDZFMGtCa0ZJSDBYa3B4NXZpVUcKS3A2cXBEODZCMVlDQVNQYkMvQmttU2hNd2F4dDlNMkYzeVZNRExnN2RpYXlZZUx1MHhtNXd4VXVwUmVkU1hYdgpPcHppWE5tdGZGR01QUkRVWXdNUGoycUNzNlZUdHErQlhoOUVWQVU3OGlkOU50bU5KQ2M5Zk1MLzUzekg3OVlhCnJjb2VXZFRMNlNRYVB6YUlSWEx6Mm90VG5nVHJ2RnlNY2lrTWdVVVZ5M3ZndlpySUFRd3J4elQ5TEJXYWhVRkwKMFVRd0gzRUNnWUVBNUNXTC9jRGxaTGxGYUp5ZTFMNnhZK1ZOT2lnQStBVHVQWDdtczdjV2t0Slk4YjErQ3IzcwpUYTRmTmlpYUM1Zk9RT0RkcmhNdS9GUzBCcHVjRk44OVVCL2xaZSsrbStJY0tpVDRZM0lHTmYyKzBDT3Z0ZGFmCkkrZ2lIaW5JTnV2T3Fkek83TW5WUEc0Q2NubUJHU3NybnovTnI1TFJnREF1SWh6NEhhUGxTdFVDZ1lFQXhFdXEKSkl4c3RvMGVKWDdORnNUMW9TUFl5a3FTWnh4clB4TFdpdHFCSzQvV0NTMW1HWTJUemRkS3hGaTlnVWdBaitmNApWSmg0aXlrTXdKVWtJOUQ0YllPR2JqdW1XTXlMKzRZWm5zbFBIS2FwcVBkQkJiM0UzVlJTK1hyOHJxaEhxVEhpCms2ME9RN1Qya0Z6SWlySy9teWlMb2J1YnYxKzlVVytoL2xOekthOENnWUJhalh5Tzd5MGRXVnZ2TlpybEhmc1MKaDBTcnZJMEY1QThiWVc3NERjZHI1d2xlaWJPcFY5Q2UxR21XK1c2TEEybmQzbUtlWVFiWktGVjcrZTl0YVYzUQptNWhWYVY3aVNGQ2RlYWNNOFlqOWpRVmJYNDZ5UWNsUVd5YVBpazNwWHBiY1hNUFV3QmRlc050UHpHSXROekZOCk4rblBzaHB0SXJKczM4cXJHUTQ5TVFLQmdRQ0hYVTVsaWRqbVFvYUpnTm5aVzlXdlc5TUNIVTY4Z0dLTXltYmMKdGpYaFhuMVJNdGQzdzZRcmpNM29mUEdpRjQ4YnJmSVlGRlQ4VWtDVEJjWTRWTUVjZEZqZDU1Q2RKK0ZZZ0c5bQppcGhkdjZpNzlsWUdxWWo2d0UyLzhVb1MvOFQ3TG9WN0pSbnpJdlh0TTY2dnh2aE8vVFRkUVV6ME9nZUtBeHVKCkVPOFh6UUtCZ0FnOUpTTFBrRDV6NzdtcWN3Tk9pa1VBa1RUek52ZWxsRW9yK2RVWHRyVUlKdmtKT2xBbmZJekMKMlpRM3p4YzRpTVd1a3hHc2Z2VzFIZngxOUdBNzBVWXZkNFVab09mYjVRYWtBaGh2WUh6VEdlNnZ1VXBoZS9KTQo5QXdwQ3YzcEg5TW1VWk5wbzlhcWhQTGNnUzd5Uy9Xc1pVbFlpUzNrRUtYOUhOcUtiMHVsCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-incluster/configs/join.sh b/cicd/k8s-calico-incluster/configs/join.sh deleted file mode 100755 index 17a251f0e..000000000 --- a/cicd/k8s-calico-incluster/configs/join.sh +++ /dev/null @@ -1 +0,0 @@ -kubeadm join 192.168.80.10:6443 --token wxki6c.cifh2d82k592rpwf --discovery-token-ca-cert-hash sha256:f581308b2a8fb3647d7e1297d2dac741529bb84c711d3ae9193ab4574fcb3aae diff --git a/cicd/k8s-calico-incluster/node_scripts/common.sh b/cicd/k8s-calico-incluster/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-incluster/node_scripts/common.sh +++ b/cicd/k8s-calico-incluster/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-incluster/node_scripts/master.sh b/cicd/k8s-calico-incluster/node_scripts/master.sh index bcc757853..13db17a17 100644 --- a/cicd/k8s-calico-incluster/node_scripts/master.sh +++ b/cicd/k8s-calico-incluster/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml b/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ipvs/configs/config b/cicd/k8s-calico-ipvs/configs/config deleted file mode 100644 index 752bd5ddf..000000000 --- a/cicd/k8s-calico-ipvs/configs/config +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3dNVEF5TkRZek9Gb1hEVE16TURneU9UQXlORFl6T0Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSzhTCkZwZTh0UVlQOVdvVTZYNVAwanBuSHMxM1hYQ3NnVk03QjJDRG5NWGpFeEExbm9xWElybk1YbFlmVTZ2blhLbSsKUDRqRGFUM0puK1hxaHFLUml3dmkra01lL1FUSVJiUThwVmVtUnBXaDB0dWNPbTJmeno0UkQxeS83ZmJHd0VGWgpCM3BRNm9CSVdXR0F3VERISDdoU0RlU0czdnhFRzdMdVNnUElxYWM1MEpkVWYrVXJ5SUlzdTlnYkJZN2NGcjhzCm03YmFTUkF5NkZyWlRveTBPRW1sT2NWWTBWYUpZdWpPdmVkTk5KRkw1RTlCenArbjA0UVcwVjRRd0NDaUIwQ3EKRjJTdWo4aDRSZWk0VTE5WmkvN2FyYyt5MTZUbURXem00Y0FRa05DWEY4dExYRTlMb3hUVEgvdGJFUHhORUk0KwpDT1BGZ0puVjd4R0l2QVh5UEVzQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJTHBkU2YrYzhZS0tlb0ZQVk1pUFZQWVREaUdNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS2JtYnBzTlF4UlFvTjRuMnUxOQo5Ump0WTJvMmUvSFA3NCthdlVKRitqM3FqRGFSdnhBS3JFRVNLMGJ1WmpFNzdQcmNtRnB3UFRmQ2wxazRDV2hyCnhPM1U0a2tXamQrWXlGc3BVYjJIaUE5R3ZhTXczWDBxaWtDRGZjNGxIaExXOWtRMWNpUHNyMmhPQURyZ2hNVUgKWWxEM2RBSnRuZzRXVm5TU2NiODk3Vm9DUU56NExlNi9WaWszRXA2ZG9pb1ZQbTZsNW9JNlU1ODJPMlorSGkwYgo4bzgxK3QxRVBvZ01tQkJWMVdWSzd4UkVLRFJuMmgzQjY4NHc0TGgyNkdyQ1NZVGlwRGhjUGJmRFJvS2o4aEFZClM0YWFRSDJLZ2kvdnkvY0VPVXc1Sk9VdmZBWUlmQ0R1d3k4K1NlOUc5NStaZStoTGdUcHkvZ3hTMHNhN20xTVQKM3FRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - server: https://192.168.80.10:6443 - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubernetes-admin - name: kubernetes-admin@kubernetes -current-context: kubernetes-admin@kubernetes -kind: Config -preferences: {} -users: -- name: kubernetes-admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJTjd6VjBzQnlrQUF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1ERXdNalEyTXpoYUZ3MHlOREE0TXpFd01qUTJOREZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTdEekJqTkU5UlpMemcvK3UKMVRjTzN2R21ybS9jT1Y1VEVvZVpvZTlTMzFPa2VxektQTElGZGFvcXBDY2VLN3V2WTg5WEFWV1JVNm4rV080egpCWFllakZ3Y3lzS2lPM3pFQ29ERm9NNEQvRllzZ3ZvWUtNWXFGWmdlaUdvS3UzYzhqVGhjL3lkajVRdG1MOW5uCkl4ekxPdHc4aitKZ3dJN3RLODduNG4rdUZtb204ZXViRUwzZkNqdi9uN212VE5GNU9NK0FOYyszZnpjYkhTUFIKVFZ1K1lGZGZUT2tXc0piaWFKVURacWtIZUtKem82Y1NHOUNqN29acjArSFViWjRUdk1MZkhwVWtCMktKZy9HWAo1SXlyQzJFU2w3TkI4NkdwN1duTmFveE83NVphVmgxZnJscGYxSXN1UWxsYkFhdWdJamk2bm1ESXNsQk5GcEJjCm9uWUw3UUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTQzZYVW4vblBHQ2lucUJUMVRJajFUMkV3NApoakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBUXloclNzOWxLM2hXOW1EMUc1MWJJSk1TbGtKNmFpdEFJUXRXCnFodzBUQVNPZFl0dlRFV1NUdjNQSk50MndwcUNoQ1JVaU8xWTRpZ29tZjYwOHhmY0o5K2RTSUV5aTZGYkh4dHYKb2RZcDN0UzhtTXBGNkJPVUhMY2xBVndTYjBNNFpHS3kvVVRtWGhlU0oyczFrc21CRTlaOWFLTjRTOWZNSUd1SQpkME44RGdKZ3BXN3RIK29tTEgyUDEzZUpHS2VXMTVROGhxNThiSytpMGxqZy9rY1p5UndJM0VWM2lJbTlCNEJSCko2OXcvRTNiNitETkJOM0dkSFpwM1pFbG8xNGNtU1ltTjduaC9EdThjYUtWRkJLeUxEeXJ3Q3NXM2dwU0ROL0sKc1hUc0NqY0RNNVBCMEhrRG1UVFNiNStKaWE0bnZPcllkenZudkFtNWRETFhVSlFlN1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBN0R6QmpORTlSWkx6Zy8rdTFUY08zdkdtcm0vY09WNVRFb2Vab2U5UzMxT2tlcXpLClBMSUZkYW9xcENjZUs3dXZZODlYQVZXUlU2bitXTzR6QlhZZWpGd2N5c0tpTzN6RUNvREZvTTREL0ZZc2d2b1kKS01ZcUZaZ2VpR29LdTNjOGpUaGMveWRqNVF0bUw5bm5JeHpMT3R3OGorSmd3STd0Szg3bjRuK3VGbW9tOGV1YgpFTDNmQ2p2L243bXZUTkY1T00rQU5jKzNmemNiSFNQUlRWdStZRmRmVE9rV3NKYmlhSlVEWnFrSGVLSnpvNmNTCkc5Q2o3b1pyMCtIVWJaNFR2TUxmSHBVa0IyS0pnL0dYNUl5ckMyRVNsN05CODZHcDdXbk5hb3hPNzVaYVZoMWYKcmxwZjFJc3VRbGxiQWF1Z0lqaTZubURJc2xCTkZwQmNvbllMN1FJREFRQUJBb0lCQUFyeWdkR2h2S0lsdmkwbQp3eFpVVjljVEFiTmhzYVhpN2h5VXRoVGYvMG9rR1NJcU1iRUFXdXBwK1ZIa0VpemFwTFVPWGF6TkowL21OOGd0Ck9hWU9KRHBDNW42cTZGT3pZMjVOSzF0WlVLdjMzbFl2ZXNFZzljQk1iVlhLL0RaVnZ6T1lJZzhjNXk4dENRNDgKbmM1dHZpazdIWDlaY1R4Ykl6aDlmUmRzN1VkU2l6QVljbWFOUlhyWVdwanZobGVxeS9icml6cmR6R2kwR3JxUgpGMFpSK2lqaVIzMU03Z0VrTy9LVmpROUpqUW5RSm04Rmc1Z05zN1hOd0s4ZFRMZ1NaNTI4OWdpaVVLYVdzaFZ5CnZGcUpVWXZQSkJFZlo1TnFZT2tDSEtVa1JiWDRvMGgyZ05PV2EvdUZhTTZkeXlqOWk3d0o4UjhzeHhpUGxYS2UKeXluYUNPRUNnWUVBN3NjSzRTSDk2UFI0UURrVmlhQXVwU0RPaWo4ODFzN1FXa2VNRlV3RjdEUS9HTVFYN05PZQpBVmRNT1EvM0IrSEdPK1JLSWVpM0NiZ2wzdjljbzJBWUZWanBTdHozUEhxVXMvZ0FoWjNuM0hOaGhBanZwcDk3CkJXUGhycmZmSVo4T0ZIK1NueGRCS0tMUTBPSWdMUURQWndoMzFGQ3d6dXdUU1ZPUnhrOTNUbFVDZ1lFQS9VYlAKVWVvT21zajhzV0kvWklxUjFRdDhSUlNVZE1BMGorTXQzL2JHL0tzQkVETk8wZHJDcTdtVHJGZitGcElmdDFxTgpXdk92cWtVVzY2djU4OU1EWFdDRGlOT1ZpUkNjTm5qb0VaUWpLQVREZDBVNFlRTTdtcm4vclhoYUtQNjg3MWdlCmxENThJTng1V0x6UHYwSjNMWlpkN041WUNOV2JWckNEaVY5MEx6a0NnWUEya0VWc0xOaFk0NFNYS0hSRGZ0Y3AKNU5WTndpV2s4SjJzQTYxL01HQXFHY1pSWW40VklFWjdCL2ZqRWtMaENqYkNlT2gzMXpYOGdwZ2szVFhPSDZkaApPWEFXSzJoVDZhOFJjUnF0YmVnTitFL1FYRHBuV1FwRHNROWhYYU1maTdrcjlmc0xYOFVFQkRDeml2alBUK0FWCksxbzJxam05RHpWWkREL0RrV1V5QVFLQmdRREdrS0V4STBMWDd3TE5QcTFjY1piclk5bkZmdUQwdDB0K0V5bUEKRU1Ub2lsaUhEdktZYTkxN0xENnVPejRsQytKNXFUQnhRZU5TcGwvVjNEcFdBZlQ5WEJGRFVENUgreEc5VXdUOQo0eG04NGg4c2ZzUTRxb1FzUmU1QlhiMnhyaVVKc0JncE9PT3dENm5DL0NRVFdsUjlGUW9HRGpzT2tnajY2ZC8xCjd6UHpZUUtCZ1FEZSt1dnFPQ2F3dk9GQ2J2N3IyUXdIN3pHLzJMdXJqMHd0UENYd09IbXFiWWpvSnhEZGlyd2oKc0JsOXUxTzNLVkR5TmJGS2NHOUFyVmlEU3JOUFkrNEdiZCtqYTlheHFVUGZHU3pBWDlwRUdDcmdacHRrK0d0Ngo5SFBsbkFsYXpzTWgyUGhxSlN4U3ZRQ2h3QXk0cmZMdlltMVhkYjhaYnkrVndCTlZVL21obXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/cicd/k8s-calico-ipvs/configs/join.sh b/cicd/k8s-calico-ipvs/configs/join.sh deleted file mode 100755 index 0cde2a766..000000000 --- a/cicd/k8s-calico-ipvs/configs/join.sh +++ /dev/null @@ -1 +0,0 @@ -kubeadm join 192.168.80.10:6443 --token erzh6n.ysnbfgbxinfum5ps --discovery-token-ca-cert-hash sha256:43c9a9c2b22f053d87a0e11df980c04f8171778049609a62d29db794e30ece03 diff --git a/cicd/k8s-calico-ipvs/node_scripts/common.sh b/cicd/k8s-calico-ipvs/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-ipvs/node_scripts/common.sh +++ b/cicd/k8s-calico-ipvs/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipvs/node_scripts/master.sh b/cicd/k8s-calico-ipvs/node_scripts/master.sh index 9e65ff335..eaa7097cb 100644 --- a/cicd/k8s-calico-ipvs/node_scripts/master.sh +++ b/cicd/k8s-calico-ipvs/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipvs/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs/yaml/kubeadm-config.yaml index 79d05055f..2848ae7a0 100644 --- a/cicd/k8s-calico-ipvs/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipvs/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/common.sh b/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/common.sh index 34035effc..545950530 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/common.sh +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/common.sh @@ -18,6 +18,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/master.sh b/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/master.sh index 41793b5fa..2d3eb1efd 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/master.sh +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ipvs2/node_scripts/common.sh b/cicd/k8s-calico-ipvs2/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-ipvs2/node_scripts/common.sh +++ b/cicd/k8s-calico-ipvs2/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipvs2/node_scripts/master.sh b/cicd/k8s-calico-ipvs2/node_scripts/master.sh index 41793b5fa..2d3eb1efd 100644 --- a/cicd/k8s-calico-ipvs2/node_scripts/master.sh +++ b/cicd/k8s-calico-ipvs2/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh index a5cdecaac..d4b836aef 100644 --- a/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ipvs3/node_scripts/common.sh b/cicd/k8s-calico-ipvs3/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-ipvs3/node_scripts/common.sh +++ b/cicd/k8s-calico-ipvs3/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipvs3/node_scripts/master.sh b/cicd/k8s-calico-ipvs3/node_scripts/master.sh index 41793b5fa..2d3eb1efd 100644 --- a/cicd/k8s-calico-ipvs3/node_scripts/master.sh +++ b/cicd/k8s-calico-ipvs3/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd diff --git a/cicd/k8s-calico-ubuntu22/node_scripts/common.sh b/cicd/k8s-calico-ubuntu22/node_scripts/common.sh index c6cf0f19a..91fdd6650 100644 --- a/cicd/k8s-calico-ubuntu22/node_scripts/common.sh +++ b/cicd/k8s-calico-ubuntu22/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg -echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y sudo apt-get install -y kubelet kubectl kubeadm diff --git a/cicd/k8s-calico-ubuntu22/node_scripts/master.sh b/cicd/k8s-calico-ubuntu22/node_scripts/master.sh index 215a84cf4..ad35ae8e1 100644 --- a/cicd/k8s-calico-ubuntu22/node_scripts/master.sh +++ b/cicd/k8s-calico-ubuntu22/node_scripts/master.sh @@ -6,11 +6,7 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - -sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap --cri-socket /var/run/cri-dockerd.sock mkdir -p "$HOME"/.kube sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config @@ -32,7 +28,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico/node_scripts/common.sh b/cicd/k8s-calico/node_scripts/common.sh index 5f629eb14..400dd11c9 100644 --- a/cicd/k8s-calico/node_scripts/common.sh +++ b/cicd/k8s-calico/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico/node_scripts/master.sh b/cicd/k8s-calico/node_scripts/master.sh index 215a84cf4..ad35ae8e1 100755 --- a/cicd/k8s-calico/node_scripts/master.sh +++ b/cicd/k8s-calico/node_scripts/master.sh @@ -6,11 +6,7 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - -sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap --cri-socket /var/run/cri-dockerd.sock mkdir -p "$HOME"/.kube sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config @@ -32,7 +28,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin From 24911abd381e139281afa3788438a4f9732a9088 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 23 Sep 2024 17:29:17 +0900 Subject: [PATCH 15/34] gh-87 Fix for k8s cicd failing due to cri-o dependencies --- .../node_scripts/common.sh | 81 +++++++++++-------- .../node_scripts/master.sh | 10 +-- .../node_scripts/worker.sh | 10 +-- .../yaml/kubeadm-config.yaml | 5 ++ 4 files changed, 62 insertions(+), 44 deletions(-) diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh index c01ad688f..ded6b8b5d 100755 --- a/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh index 43d431e53..c971e90a3 100755 --- a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh @@ -6,12 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo sed -i 's#10.85.0.0/16#10.244.0.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist - -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -35,7 +29,9 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh + sudo -i -u vagrant bash << EOF whoami diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh index 0fd5eaee9..08d1ebd35 100755 --- a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh @@ -4,11 +4,11 @@ set -euxo pipefail -if [[ $(hostname -s) == "worker1" ]]; then - sudo sed -i 's#10.85.0.0/16#10.244.1.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist -else - sudo sed -i 's#10.85.0.0/16#10.244.2.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist -fi +#if [[ $(hostname -s) == "worker1" ]]; then +# sudo sed -i 's#10.85.0.0/16#10.244.1.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +#else +# sudo sed -i 's#10.85.0.0/16#10.244.2.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +#fi config_path="/vagrant/configs" diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml index e8de10b86..fccd933c0 100644 --- a/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml +++ b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml @@ -16,6 +16,7 @@ nodeRegistration: taints: null kubeletExtraArgs: node-ip: 192.168.80.250 + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -68,3 +69,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd From 39ec30cca9007c377c48b4389cb4e024224f21f3 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Tue, 24 Sep 2024 00:18:55 +0900 Subject: [PATCH 16/34] gh-87 Fix for k8s flannel incluster with multus cicd --- cicd/k8s-flannel-incluster-multus/config.sh | 4 ++++ cicd/k8s-flannel-incluster-multus/node_scripts/master.sh | 1 + cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh | 4 ---- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cicd/k8s-flannel-incluster-multus/config.sh b/cicd/k8s-flannel-incluster-multus/config.sh index 5970158c4..c3f8aa0d6 100755 --- a/cicd/k8s-flannel-incluster-multus/config.sh +++ b/cicd/k8s-flannel-incluster-multus/config.sh @@ -40,6 +40,10 @@ sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 #vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null #vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null +#Create multus services +vagrant ssh master -c 'kubectl apply -f /vagrant/multus/multus-pod.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/multus/multus-service.yml' 2> /dev/null + for((i=1; i<=60; i++)) do fin=1 diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh index c971e90a3..25df78cbc 100755 --- a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh @@ -63,3 +63,4 @@ kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout= kubectl apply -f /vagrant/multus/multus-vlan.yml sleep 60 kubectl apply -f /vagrant/yaml/loxilb.yaml +kubectl apply -f /vagrant/yaml/kube-loxilb.yaml diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh index 08d1ebd35..bb97d8f75 100755 --- a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh @@ -24,10 +24,6 @@ kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s >> /dev/null 2>&1 || true kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=240s >> /dev/null 2>&1 || true kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout=240s >> /dev/null 2>&1 || true -kubectl apply -f /vagrant/yaml/kube-loxilb.yaml -kubectl apply -f /vagrant/multus/multus-pod.yml -sleep 60 -kubectl apply -f /vagrant/multus/multus-service.yml EOF From 4f8e288d2b2c0c8537137fbd79bcf2c383e3503d Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Tue, 24 Sep 2024 17:49:39 +0900 Subject: [PATCH 17/34] gh-87 : Updating scripts inline with PR #808 --- .../node_scripts/common.sh | 81 +++++++++++-------- .../node_scripts/master.sh | 7 +- .../yaml/kubeadm-config.yaml | 5 ++ 3 files changed, 56 insertions(+), 37 deletions(-) diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh index c01ad688f..ded6b8b5d 100644 --- a/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/common.sh @@ -15,6 +15,7 @@ cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true sudo apt-get update -y -# Install CRI-O Runtime -VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" -CRIO_VERSION=1.27 -# Create the .conf file to load the modules at bootup -cat <> /etc/default/crio << EOF -${ENVIRONMENT} -EOF -sudo systemctl daemon-reload -sudo systemctl enable crio --now - -echo "CRI runtime installed successfully" - -sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --no-tty --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y diff --git a/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh b/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh index a5cdecaac..d4b836aef 100644 --- a/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh +++ b/cicd/k8s-calico-ipsec-ha/node_scripts/master.sh @@ -6,10 +6,6 @@ set -euxo pipefail NODENAME=$(hostname -s) -sudo kubeadm config images pull - -echo "Preflight Check Passed: Downloaded All Required Images" - #sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml @@ -33,7 +29,8 @@ cp -i /etc/kubernetes/admin.conf $config_path/config touch $config_path/join.sh chmod +x $config_path/join.sh -kubeadm token create --print-join-command > $config_path/join.sh +join_cmd=`kubeadm token create --print-join-command` +echo $join_cmd "--cri-socket /var/run/cri-dockerd.sock" > $config_path/join.sh # Install Calico Network Plugin diff --git a/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml index 245a62553..20a7207ac 100644 --- a/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml +++ b/cicd/k8s-calico-ipsec-ha/yaml/kubeadm-config.yaml @@ -14,6 +14,7 @@ nodeRegistration: imagePullPolicy: IfNotPresent name: master taints: null + criSocket: unix:///var/run/cri-dockerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki @@ -67,3 +68,7 @@ metricsBindAddress: 127.0.0.1:10249 nodePortAddresses: null oomScoreAdj: -999 portRange: "" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd From 10159cd9973c0e3575a69c90970e51de99a3bd0a Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Thu, 26 Sep 2024 16:13:00 +0900 Subject: [PATCH 18/34] sctp: enhancements for handling crc32 --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index dc6cb12e8..696b6d18b 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit dc6cb12e876c873241558f1bbb9cf3e353965bc5 +Subproject commit 696b6d18b29bca0a032f4353a814c8330ba89a8e From 0a40530241dc2c3f582776ab8a175dccd9faa8c2 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Thu, 26 Sep 2024 16:36:11 +0900 Subject: [PATCH 19/34] sctp: enhancements for handling crc32 --- options/options.go | 2 +- pkg/loxinet/loxinet.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/options/options.go b/options/options.go index e334eb4a2..e139cec39 100644 --- a/options/options.go +++ b/options/options.go @@ -22,7 +22,7 @@ var Opts struct { LogLevel string `long:"loglevel" description:"One of debug,info,error,warning,notice,critical,emergency,alert" default:"debug"` CPUProfile string `long:"cpuprofile" description:"Enable cpu profiling and specify file to use" default:"none" env:"CPUPROF"` Prometheus bool `short:"p" long:"prometheus" description:"Run prometheus thread"` - CSumDisable bool `long:"disable-csum" description:"Disable checksum update(experimental)"` + CRC32SumDisable bool `long:"disable-crc32" description:"Disable crc32 checksum update(experimental)"` PassiveEPProbe bool `long:"passive-probe" description:"Enable passive liveness probes(experimental)"` RssEnable bool `long:"rss-enable" description:"Enable rss optimization(experimental)"` EgrHooks bool `long:"egr-hooks" description:"Enable eBPF egress hooks(experimental)"` diff --git a/pkg/loxinet/loxinet.go b/pkg/loxinet/loxinet.go index 65cb4586d..68386c64e 100644 --- a/pkg/loxinet/loxinet.go +++ b/pkg/loxinet/loxinet.go @@ -242,7 +242,7 @@ func loxiNetInit() { mh.self = opts.Opts.ClusterSelf mh.rssEn = opts.Opts.RssEnable mh.eHooks = opts.Opts.EgrHooks - mh.sumDis = opts.Opts.CSumDisable + mh.sumDis = opts.Opts.CRC32SumDisable mh.pProbe = opts.Opts.PassiveEPProbe mh.lSockPolicy = opts.Opts.LocalSockPolicy mh.sockMapEn = opts.Opts.SockMapSupport From 034e96e55b29612c719c27d8b4b7a2d5613f171f Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 27 Sep 2024 19:54:38 +0900 Subject: [PATCH 20/34] loxilb-io/kube-loxilb#184 Clear inactive endpoints on LB endpoint update --- common/common.go | 4 +- loxilb-ebpf | 2 +- pkg/loxinet/apiclient.go | 8 +- pkg/loxinet/dpbroker.go | 18 +-- pkg/loxinet/dpebpf_linux.go | 20 +-- pkg/loxinet/loxinettest.go | 4 +- pkg/loxinet/rules.go | 255 +++++++++++++++++++----------------- 7 files changed, 162 insertions(+), 149 deletions(-) diff --git a/common/common.go b/common/common.go index 5c24ce377..244a8c8e6 100644 --- a/common/common.go +++ b/common/common.go @@ -511,9 +511,9 @@ const ( type LBOp int32 const ( - // LBOPAdd - Add te LB rule (replace if existing) + // LBOPAdd - Add the LB rule (replace if existing) LBOPAdd LBOp = iota - // LBModeOneArm - Attach End-Points + // LBOPAttach - Attach End-Points LBOPAttach // LBOPDetach - Detach End-Points LBOPDetach diff --git a/loxilb-ebpf b/loxilb-ebpf index 696b6d18b..9e7d51b12 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit 696b6d18b29bca0a032f4353a814c8330ba89a8e +Subproject commit 9e7d51b121b5f49a6cd477bf521ab63c3f9587d7 diff --git a/pkg/loxinet/apiclient.go b/pkg/loxinet/apiclient.go index 69bde3640..5c6feb4df 100644 --- a/pkg/loxinet/apiclient.go +++ b/pkg/loxinet/apiclient.go @@ -331,7 +331,7 @@ func (na *NetAPIStruct) NetLbRuleAdd(lm *cmn.LbRuleMod) (int, error) { mh.mtx.Lock() defer mh.mtx.Unlock() var ips []string - ret, err := mh.zr.Rules.AddNatLbRule(lm.Serv, lm.SecIPs[:], lm.Eps[:]) + ret, err := mh.zr.Rules.AddLbRule(lm.Serv, lm.SecIPs[:], lm.Eps[:]) if err == nil && lm.Serv.Bgp { if mh.bgp != nil { ips = append(ips, lm.Serv.ServIP) @@ -354,8 +354,8 @@ func (na *NetAPIStruct) NetLbRuleDel(lm *cmn.LbRuleMod) (int, error) { mh.mtx.Lock() defer mh.mtx.Unlock() - ips := mh.zr.Rules.GetNatLbRuleSecIPs(lm.Serv) - ret, err := mh.zr.Rules.DeleteNatLbRule(lm.Serv) + ips := mh.zr.Rules.GetLBRuleSecIPs(lm.Serv) + ret, err := mh.zr.Rules.DeleteLbRule(lm.Serv) if lm.Serv.Bgp { if mh.bgp != nil { ips = append(ips, lm.Serv.ServIP) @@ -372,7 +372,7 @@ func (na *NetAPIStruct) NetLbRuleGet() ([]cmn.LbRuleMod, error) { if na.BgpPeerMode { return nil, errors.New("running in bgp only mode") } - ret, err := mh.zr.Rules.GetNatLbRule() + ret, err := mh.zr.Rules.GetLBRule() return ret, err } diff --git a/pkg/loxinet/dpbroker.go b/pkg/loxinet/dpbroker.go index 457314b64..81b35b353 100644 --- a/pkg/loxinet/dpbroker.go +++ b/pkg/loxinet/dpbroker.go @@ -285,8 +285,8 @@ const ( DpE2EHTTPS ) -// NatDpWorkQ - work queue entry for nat related operation -type NatDpWorkQ struct { +// LBDpWorkQ - work queue entry for lb related operation +type LBDpWorkQ struct { Work DpWorkT Status *DpStatusT ZoneNum int @@ -431,8 +431,8 @@ type DpHookInterface interface { DpNextHopDel(*NextHopDpWorkQ) int DpRouteAdd(*RouteDpWorkQ) int DpRouteDel(*RouteDpWorkQ) int - DpNatLbRuleAdd(*NatDpWorkQ) int - DpNatLbRuleDel(*NatDpWorkQ) int + DpLBRuleAdd(*LBDpWorkQ) int + DpLBRuleDel(*LBDpWorkQ) int DpFwRuleAdd(w *FwDpWorkQ) int DpFwRuleDel(w *FwDpWorkQ) int DpStat(*StatDpWorkQ) int @@ -685,11 +685,11 @@ func (dp *DpH) DpWorkOnRoute(rtWq *RouteDpWorkQ) DpRetT { } // DpWorkOnNatLb - routine to work on a NAT lb work queue request -func (dp *DpH) DpWorkOnNatLb(nWq *NatDpWorkQ) DpRetT { +func (dp *DpH) DpWorkOnNatLb(nWq *LBDpWorkQ) DpRetT { if nWq.Work == DpCreate { - return dp.DpHooks.DpNatLbRuleAdd(nWq) + return dp.DpHooks.DpLBRuleAdd(nWq) } else if nWq.Work == DpRemove { - return dp.DpHooks.DpNatLbRuleDel(nWq) + return dp.DpHooks.DpLBRuleDel(nWq) } return DpWqUnkErr @@ -808,7 +808,7 @@ func DpWorkSingle(dp *DpH, m interface{}) DpRetT { ret = dp.DpWorkOnNextHop(mq) case *RouteDpWorkQ: ret = dp.DpWorkOnRoute(mq) - case *NatDpWorkQ: + case *LBDpWorkQ: ret = dp.DpWorkOnNatLb(mq) case *UlClDpWorkQ: ret = dp.DpWorkOnUlCl(mq) @@ -875,7 +875,7 @@ func (dp *DpH) DpMapGetCt4() []cmn.CtInfo { for _, dCti := range r { servName = "-" mh.mtx.Lock() - rule := mh.zr.Rules.GetNatLbRuleByID(dCti.RuleID) + rule := mh.zr.Rules.GetLBRuleByID(dCti.RuleID) mh.mtx.Unlock() if rule != nil { servName = rule.name diff --git a/pkg/loxinet/dpebpf_linux.go b/pkg/loxinet/dpebpf_linux.go index 2bb55e8a8..36b855870 100644 --- a/pkg/loxinet/dpebpf_linux.go +++ b/pkg/loxinet/dpebpf_linux.go @@ -947,8 +947,8 @@ func (e *DpEbpfH) DpRouteDel(w *RouteDpWorkQ) int { return DpRouteMod(w) } -// DpNatLbRuleMod - routine to work on a ebpf nat-lb change request -func DpNatLbRuleMod(w *NatDpWorkQ) int { +// DpLBRuleMod - routine to work on a ebpf lb change request +func DpLBRuleMod(w *LBDpWorkQ) int { key := new(natKey) @@ -1088,9 +1088,9 @@ func DpNatLbRuleMod(w *NatDpWorkQ) int { return EbpfErrWqUnk } -// DpNatLbRuleAdd - routine to work on a ebpf nat-lb add request -func (e *DpEbpfH) DpNatLbRuleAdd(w *NatDpWorkQ) int { - ec := DpNatLbRuleMod(w) +// DpLBRuleAdd - routine to work on a ebpf lb add request +func (e *DpEbpfH) DpLBRuleAdd(w *LBDpWorkQ) int { + ec := DpLBRuleMod(w) if ec != 0 { *w.Status = DpCreateErr } else { @@ -1099,9 +1099,9 @@ func (e *DpEbpfH) DpNatLbRuleAdd(w *NatDpWorkQ) int { return ec } -// DpNatLbRuleDel - routine to work on a ebpf nat-lb delete request -func (e *DpEbpfH) DpNatLbRuleDel(w *NatDpWorkQ) int { - return DpNatLbRuleMod(w) +// DpLBRuleDel - routine to work on a ebpf lb delete request +func (e *DpEbpfH) DpLBRuleDel(w *LBDpWorkQ) int { + return DpLBRuleMod(w) } // DpStat - routine to work on a ebpf map statistics request @@ -1956,7 +1956,7 @@ func dpCTMapNotifierWorker(cti *DpCtInfo) { if addOp { // Need to completely initialize the cti mh.mtx.Lock() - r := mh.zr.Rules.GetNatLbRuleByID(uint32(act.rid)) + r := mh.zr.Rules.GetLBRuleByID(uint32(act.rid)) mh.mtx.Unlock() if r == nil { return @@ -2213,7 +2213,7 @@ func (e *DpEbpfH) DpCtAdd(w *DpCtInfo) int { serv.BlockNum = w.BlockNum mh.mtx.Lock() - r := mh.zr.Rules.GetNatLbRuleByServArgs(serv) + r := mh.zr.Rules.GetLBRuleByServArgs(serv) mh.mtx.Unlock() if r == nil || len(w.PVal) == 0 || len(w.PKey) == 0 || w.CState != "est" { diff --git a/pkg/loxinet/loxinettest.go b/pkg/loxinet/loxinettest.go index bd5201bb2..238a31c18 100644 --- a/pkg/loxinet/loxinettest.go +++ b/pkg/loxinet/loxinettest.go @@ -289,12 +289,12 @@ func TestLoxinet(t *testing.T) { Weight: 2, }, } - _, err = mh.zr.Rules.AddNatLbRule(lbServ, nil, lbEps[:]) + _, err = mh.zr.Rules.AddLbRule(lbServ, nil, lbEps[:]) if err != nil { t.Errorf("failed to add nat lb rule for 10.10.10.1\n") } - _, err = mh.zr.Rules.DeleteNatLbRule(lbServ) + _, err = mh.zr.Rules.DeleteLbRule(lbServ) if err != nil { t.Errorf("failed to delete nat lb rule for 10.10.10.1\n") } diff --git a/pkg/loxinet/rules.go b/pkg/loxinet/rules.go index 73a173456..b046f2636 100644 --- a/pkg/loxinet/rules.go +++ b/pkg/loxinet/rules.go @@ -78,7 +78,7 @@ const ( // constants const ( - MaxNatEndPoints = 32 + MaxLBEndPoints = 32 DflLbaInactiveTries = 2 // Default number of inactive tries before LB arm is turned off MaxDflLbaInactiveTries = 100 // Max number of inactive tries before LB arm is turned off DflLbaCheckTimeout = 10 // Default timeout for checking LB arms @@ -209,28 +209,28 @@ type epHost struct { opts epHostOpts } -type ruleNatEp struct { +type ruleLBEp struct { xIP net.IP rIP net.IP xPort uint16 weight uint8 inActTries int - inActive bool + inActiveEP bool noService bool chkVal bool stat ruleStat - foldEndPoints []ruleNatEp + foldEndPoints []ruleLBEp foldRuleKey string } -type ruleNatSIP struct { +type ruleLBSIP struct { sIP net.IP } -type ruleNatActs struct { +type ruleLBActs struct { mode cmn.LBMode sel cmn.EpSelect - endPoints []ruleNatEp + endPoints []ruleLBEp } type ruleFwOpt struct { @@ -284,7 +284,7 @@ type ruleEnt struct { pTO uint32 act ruleAct privIP net.IP - secIP []ruleNatSIP + secIP []ruleLBSIP stat ruleStat name string secMode cmn.LBSec @@ -682,7 +682,7 @@ func (a *ruleAct) String() string { } switch na := a.action.(type) { - case *ruleNatActs: + case *ruleLBActs: if na.mode == cmn.LBModeOneArm { ks += fmt.Sprintf("%s", "onearm:") } else if na.mode == cmn.LBModeHostOneArm { @@ -693,7 +693,7 @@ func (a *ruleAct) String() string { for _, nf := range n.foldEndPoints { ks += fmt.Sprintf("feip-%s,fep-%d,fw-%d,", nf.xIP.String(), nf.xPort, nf.weight) - if nf.inActive || nf.noService { + if nf.inActiveEP || nf.noService { ks += fmt.Sprintf("dead|") } else { ks += fmt.Sprintf("alive|") @@ -702,7 +702,7 @@ func (a *ruleAct) String() string { } else { ks += fmt.Sprintf("eip-%s,ep-%d,w-%d,", n.xIP.String(), n.xPort, n.weight) - if n.inActive || n.noService { + if n.inActiveEP || n.noService { ks += fmt.Sprintf("dead|") } else { ks += fmt.Sprintf("alive|") @@ -740,11 +740,11 @@ func (R *RuleH) Rules2Json() ([]byte, error) { return nil, errors.New("malformed service proto") } t.ServPort = data.tuples.l4Dst.val - t.Sel = data.act.action.(*ruleNatActs).sel - t.Mode = data.act.action.(*ruleNatActs).mode + t.Sel = data.act.action.(*ruleLBActs).sel + t.Mode = data.act.action.(*ruleLBActs).mode // Make Endpoints - tmpEp := data.act.action.(*ruleNatActs).endPoints + tmpEp := data.act.action.(*ruleLBActs).endPoints for _, ep := range tmpEp { eps = append(eps, cmn.LbEndPointArg{ EpIP: ep.xIP.String(), @@ -768,8 +768,8 @@ func (R *RuleH) Rules2Json() ([]byte, error) { return bret, nil } -// GetNatLbRule - get all rules and pack them into a cmn.LbRuleMod slice -func (R *RuleH) GetNatLbRule() ([]cmn.LbRuleMod, error) { +// GetLBRule - get all rules and pack them into a cmn.LbRuleMod slice +func (R *RuleH) GetLBRule() ([]cmn.LbRuleMod, error) { var res []cmn.LbRuleMod for _, data := range R.tables[RtLB].eMap { @@ -790,8 +790,8 @@ func (R *RuleH) GetNatLbRule() ([]cmn.LbRuleMod, error) { return []cmn.LbRuleMod{}, errors.New("malformed service proto") } ret.Serv.ServPort = data.tuples.l4Dst.val - ret.Serv.Sel = data.act.action.(*ruleNatActs).sel - ret.Serv.Mode = data.act.action.(*ruleNatActs).mode + ret.Serv.Sel = data.act.action.(*ruleLBActs).sel + ret.Serv.Mode = data.act.action.(*ruleLBActs).mode ret.Serv.Monitor = data.hChk.actChk ret.Serv.InactiveTimeout = data.iTO ret.Serv.Bgp = data.bgp @@ -815,14 +815,14 @@ func (R *RuleH) GetNatLbRule() ([]cmn.LbRuleMod, error) { data.DP(DpStatsGetImm) // Make Endpoints - tmpEp := data.act.action.(*ruleNatActs).endPoints + tmpEp := data.act.action.(*ruleLBActs).endPoints for _, ep := range tmpEp { state := "active" if ep.noService { state = "inactive" } - if ep.inActive { + if ep.inActiveEP { continue } @@ -863,7 +863,7 @@ func validateXlateEPWeights(servEndPoints []cmn.LbEndPointArg) (int, error) { return 0, nil } -func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleNatEp, doAddOp bool, liveCheckEn bool) { +func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liveCheckEn bool) { var hopts epHostOpts pType := "" pPort := uint16(0) @@ -905,14 +905,14 @@ func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleNatEp, doAddOp bool, li hopts.probePort = pPort } - if mh.pProbe == true || liveCheckEn { + if mh.pProbe || liveCheckEn { hopts.probeActivated = true } epKey := makeEPKey(nep.xIP.String(), pType, pPort) if doAddOp { - if nep.inActive != true { + if !nep.inActiveEP { R.AddEPHost(false, nep.xIP.String(), epKey, hopts) } } else { @@ -921,8 +921,8 @@ func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleNatEp, doAddOp bool, li } } -// GetNatLbRuleByID - Get a NAT rule by its identifier -func (R *RuleH) GetNatLbRuleByID(ruleID uint32) *ruleEnt { +// GetLBRuleByID - Get a LB rule by its identifier +func (R *RuleH) GetLBRuleByID(ruleID uint32) *ruleEnt { if ruleID < RtMaximumLbs { return R.tables[RtLB].rArr[ruleID] } @@ -930,8 +930,8 @@ func (R *RuleH) GetNatLbRuleByID(ruleID uint32) *ruleEnt { return nil } -// GetNatLbRuleByServArgs - Get a NAT rule by its service args -func (R *RuleH) GetNatLbRuleByServArgs(serv cmn.LbServiceArg) *ruleEnt { +// GetLBRuleByServArgs - Get a LB rule by its service args +func (R *RuleH) GetLBRuleByServArgs(serv cmn.LbServiceArg) *ruleEnt { var ipProto uint8 service := "" if tk.IsNetIPv4(serv.ServIP) { @@ -965,8 +965,8 @@ func (R *RuleH) GetNatLbRuleByServArgs(serv cmn.LbServiceArg) *ruleEnt { return R.tables[RtLB].eMap[rt.ruleKey()] } -// GetNatLbRuleSecIPs - Get secondary IPs for SCTP NAT rule by its service args -func (R *RuleH) GetNatLbRuleSecIPs(serv cmn.LbServiceArg) []string { +// GetLBRuleSecIPs - Get secondary IPs for SCTP LB rule by its service args +func (R *RuleH) GetLBRuleSecIPs(serv cmn.LbServiceArg) []string { var ipProto uint8 var ips []string service := "" @@ -1006,7 +1006,7 @@ func (R *RuleH) electEPSrc(r *ruleEnt) bool { addrRslv := false switch na := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: { for idx := range na.endPoints { np := &na.endPoints[idx] @@ -1122,7 +1122,7 @@ func (R *RuleH) syncEPHostState2Rule(rule *ruleEnt, checkNow bool) bool { rChg := false if checkNow || time.Duration(time.Now().Sub(rule.sT).Seconds()) >= time.Duration(R.cfg.RuleInactChkTime) { switch na := rule.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: if rule.tuples.l4Prot.val == 6 { sType = HostProbeConnectTCP } else if rule.tuples.l4Prot.val == 17 { @@ -1142,14 +1142,14 @@ func (R *RuleH) syncEPHostState2Rule(rule *ruleEnt, checkNow bool) bool { if np.noService == false { np.noService = true rChg = true - tk.LogIt(tk.LogDebug, "nat lb-rule service-down ep - %s:%s\n", sType, n.xIP.String()) + tk.LogIt(tk.LogDebug, "lb-rule service-down ep - %s:%s\n", sType, n.xIP.String()) } } else { if n.noService { np.noService = false np.inActTries = 0 rChg = true - tk.LogIt(tk.LogDebug, "nat lb-rule service-up ep - %s:%s\n", sType, n.xIP.String()) + tk.LogIt(tk.LogDebug, "lb-rule service-up ep - %s:%s\n", sType, n.xIP.String()) } } } @@ -1166,7 +1166,7 @@ func (R *RuleH) foldRecursiveEPs(r *ruleEnt) { for _, tr := range R.tables[RtLB].eMap { switch atr := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: for i := range atr.endPoints { rep := &atr.endPoints[i] service := "" @@ -1184,15 +1184,15 @@ func (R *RuleH) foldRecursiveEPs(r *ruleEnt) { l4dst := rule16Tuple{rep.xPort, 0xffff} rtk := ruleTuples{l3Dst: l3dst, l4Prot: l4prot, l4Dst: l4dst, pref: r.tuples.pref} if rtk.ruleKey() == tr.tuples.ruleKey() { - rep.foldEndPoints = tr.act.action.(*ruleNatActs).endPoints + rep.foldEndPoints = tr.act.action.(*ruleLBActs).endPoints rep.foldRuleKey = tr.tuples.ruleKey() } } } switch at := tr.act.action.(type) { - case *ruleNatActs: - if r.act.action.(*ruleNatActs).sel != at.sel || r.act.action.(*ruleNatActs).sel == cmn.LbSelPrio { + case *ruleLBActs: + if r.act.action.(*ruleLBActs).sel != at.sel || r.act.action.(*ruleLBActs).sel == cmn.LbSelPrio { continue } fold := false @@ -1214,13 +1214,13 @@ func (R *RuleH) foldRecursiveEPs(r *ruleEnt) { l4dst := rule16Tuple{ep.xPort, 0xffff} rtk := ruleTuples{l3Dst: l3dst, l4Prot: l4prot, l4Dst: l4dst, pref: r.tuples.pref} if r.tuples.ruleKey() == rtk.ruleKey() { - ep.foldEndPoints = r.act.action.(*ruleNatActs).endPoints + ep.foldEndPoints = r.act.action.(*ruleLBActs).endPoints ep.foldRuleKey = r.tuples.ruleKey() fold = true } if fold { tr.DP(DpCreate) - tk.LogIt(tk.LogDebug, "nat lb-rule folded - %d:%s-%s\n", tr.ruleNum, tr.tuples.String(), tr.act.String()) + tk.LogIt(tk.LogDebug, "lb-rule folded - %d:%s-%s\n", tr.ruleNum, tr.tuples.String(), tr.act.String()) } } } @@ -1233,7 +1233,7 @@ func (R *RuleH) unFoldRecursiveEPs(r *ruleEnt) { selPolicy := cmn.LbSelRr switch at := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: selPolicy = at.sel } @@ -1242,7 +1242,7 @@ func (R *RuleH) unFoldRecursiveEPs(r *ruleEnt) { continue } switch atr := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: for i := range atr.endPoints { rep := &atr.endPoints[i] if rep.foldRuleKey == tr.tuples.ruleKey() { @@ -1252,7 +1252,7 @@ func (R *RuleH) unFoldRecursiveEPs(r *ruleEnt) { } } switch at := tr.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: if selPolicy != at.sel || selPolicy == cmn.LbSelPrio { continue } @@ -1262,7 +1262,7 @@ func (R *RuleH) unFoldRecursiveEPs(r *ruleEnt) { ep.foldEndPoints = nil ep.foldRuleKey = "" tr.DP(DpCreate) - tk.LogIt(tk.LogDebug, "nat lb-rule unfolded - %d:%s-%s\n", tr.ruleNum, tr.tuples.String(), tr.act.String()) + tk.LogIt(tk.LogDebug, "lb-rule unfolded - %d:%s-%s\n", tr.ruleNum, tr.tuples.String(), tr.act.String()) } } } @@ -1281,8 +1281,9 @@ func (R *RuleH) addVIPSys(r *ruleEnt) { } } -func getLBArms(oldEps []ruleNatEp, newEps []ruleNatEp, oper cmn.LBOp) (bool, []ruleNatEp) { - var retEps []ruleNatEp +func getLBConsolidatedEPs(oldEps []ruleLBEp, newEps []ruleLBEp, oper cmn.LBOp) (bool, []ruleLBEp, []ruleLBEp) { + var retEps []ruleLBEp + var delEps []ruleLBEp ruleChg := false found := false @@ -1292,9 +1293,9 @@ func getLBArms(oldEps []ruleNatEp, newEps []ruleNatEp, oper cmn.LBOp) (bool, []r eEp.xPort == nEp.xPort { e := &oldEps[i] n := &newEps[j] - if eEp.inActive && oper != cmn.LBOPDetach { + if eEp.inActiveEP && oper != cmn.LBOPDetach { ruleChg = true - e.inActive = false + e.inActiveEP = false } if e.weight != nEp.weight { ruleChg = true @@ -1311,20 +1312,30 @@ func getLBArms(oldEps []ruleNatEp, newEps []ruleNatEp, oper cmn.LBOp) (bool, []r // Remove LB arms from an existing LB if oper == cmn.LBOPDetach { if !found { - return false, oldEps + return false, oldEps, delEps } for i := range oldEps { e := &oldEps[i] if !e.chkVal { retEps = append(retEps, *e) + } else { + e.chkVal = false + delEps = append(delEps, *e) } } - return true, retEps + return true, retEps, delEps + } + + for i := range oldEps { + e := &oldEps[i] + if !e.chkVal && !e.inActiveEP { + delEps = append(delEps, *e) + } } retEps = oldEps - // Attach LB arms to an existing LB + // Attach LB endpoints to an existing LB for i, nEp := range newEps { n := &newEps[i] if !nEp.chkVal { @@ -1338,19 +1349,20 @@ func getLBArms(oldEps []ruleNatEp, newEps []ruleNatEp, oper cmn.LBOp) (bool, []r e := &retEps[i] if !eEp.chkVal && oper == cmn.LBOPAdd { ruleChg = true - e.inActive = true + e.inActiveEP = true } e.chkVal = false } - return ruleChg, retEps + + return ruleChg, retEps, delEps } -// AddNatLbRule - Add a service LB nat rule. The service details are passed in serv argument, +// AddLbRule - Add a service LB rule. The service details are passed in serv argument, // and end-point information is passed in the slice servEndPoints. On success, // it will return 0 and nil error, else appropriate return code and error string will be set -func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, servEndPoints []cmn.LbEndPointArg) (int, error) { - var natActs ruleNatActs - var nSecIP []ruleNatSIP +func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, servEndPoints []cmn.LbEndPointArg) (int, error) { + var lBActs ruleLBActs + var nSecIP []ruleLBSIP var ipProto uint8 var privIP net.IP @@ -1415,7 +1427,7 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, } // Currently support a maximum of MAX_NAT_EPS - if len(servEndPoints) <= 0 || len(servEndPoints) > MaxNatEndPoints { + if len(servEndPoints) <= 0 || len(servEndPoints) > MaxLBEndPoints { return RuleEpCountErr, errors.New("endpoints-range error") } @@ -1456,7 +1468,7 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, if tk.IsNetIPv4(serv.ServIP) && tk.IsNetIPv6(k.SecIP) { return RuleUnknownServiceErr, errors.New("malformed-secIP nat46 error") } - sip := ruleNatSIP{pNetAddr} + sip := ruleLBSIP{pNetAddr} nSecIP = append(nSecIP, sip) } @@ -1467,14 +1479,14 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, }) if serv.Mode == cmn.LBModeHostOneArm && !sNetAddr.IP.IsUnspecified() { - tk.LogIt(tk.LogInfo, "nat lb-rule %s-%v-%s hostarm needs unspec VIP\n", serv.ServIP, serv.ServPort, serv.Proto) + tk.LogIt(tk.LogInfo, "lb-rule %s-%v-%s hostarm needs unspec VIP\n", serv.ServIP, serv.ServPort, serv.Proto) return RuleArgsErr, errors.New("hostarm-args error") } - natActs.sel = serv.Sel - natActs.mode = cmn.LBMode(serv.Mode) + lBActs.sel = serv.Sel + lBActs.mode = cmn.LBMode(serv.Mode) - if natActs.mode == cmn.LBModeOneArm || natActs.mode == cmn.LBModeFullNAT || natActs.mode == cmn.LBModeHostOneArm || serv.Monitor { + if lBActs.mode == cmn.LBModeOneArm || lBActs.mode == cmn.LBModeFullNAT || lBActs.mode == cmn.LBModeHostOneArm || serv.Monitor { activateProbe = true } @@ -1491,16 +1503,16 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, return RuleUnknownServiceErr, errors.New("malformed-service error") } - if natActs.mode == cmn.LBModeDSR && k.EpPort != serv.ServPort { + if lBActs.mode == cmn.LBModeDSR && k.EpPort != serv.ServPort { return RuleUnknownServiceErr, errors.New("malformed-service dsr-port error") } - ep := ruleNatEp{pNetAddr, xNetAddr, k.EpPort, k.Weight, 0, false, false, false, ruleStat{0, 0}, nil, ""} - natActs.endPoints = append(natActs.endPoints, ep) + ep := ruleLBEp{pNetAddr, xNetAddr, k.EpPort, k.Weight, 0, false, false, false, ruleStat{0, 0}, nil, ""} + lBActs.endPoints = append(lBActs.endPoints, ep) } - sort.SliceStable(natActs.endPoints, func(i, j int) bool { - a := tk.IPtonl(natActs.endPoints[i].xIP) - b := tk.IPtonl(natActs.endPoints[j].xIP) + sort.SliceStable(lBActs.endPoints, func(i, j int) bool { + a := tk.IPtonl(lBActs.endPoints[i].xIP) + b := tk.IPtonl(lBActs.endPoints[j].xIP) return a < b }) @@ -1515,15 +1527,15 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, if !reflect.DeepEqual(eRule.secIP, nSecIP) { return RuleUnknownServiceErr, errors.New("secIP modify error") } - // If a NAT rule already exists, we try not reschuffle the order of the end-points. + // If a LB rule already exists, we try not reschuffle the order of the end-points. // We will try to append the new end-points at the end, while marking any other end-points // not in the new list as inactive - ruleChg, retEps := getLBArms(eRule.act.action.(*ruleNatActs).endPoints, natActs.endPoints, serv.Oper) + ruleChg, retEps, delEps := getLBConsolidatedEPs(eRule.act.action.(*ruleLBActs).endPoints, lBActs.endPoints, serv.Oper) if eRule.hChk.prbType != serv.ProbeType || eRule.hChk.prbPort != serv.ProbePort || eRule.hChk.prbReq != serv.ProbeReq || eRule.hChk.prbResp != serv.ProbeResp || - eRule.pTO != serv.PersistTimeout || eRule.act.action.(*ruleNatActs).sel != natActs.sel || - eRule.act.action.(*ruleNatActs).mode != natActs.mode { + eRule.pTO != serv.PersistTimeout || eRule.act.action.(*ruleLBActs).sel != lBActs.sel || + eRule.act.action.(*ruleLBActs).mode != lBActs.mode { ruleChg = true } @@ -1536,16 +1548,16 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, } if len(retEps) == 0 { - tk.LogIt(tk.LogDebug, "nat lb-rule %s has no-endpoints: to be deleted\n", eRule.tuples.String()) - return R.DeleteNatLbRule(serv) + tk.LogIt(tk.LogDebug, "lb-rule %s has no-endpoints: to be deleted\n", eRule.tuples.String()) + return R.DeleteLbRule(serv) } - if eRule.act.action.(*ruleNatActs).mode == cmn.LBModeFullProxy && natActs.mode != cmn.LBModeFullProxy || - eRule.act.action.(*ruleNatActs).mode != cmn.LBModeFullProxy && natActs.mode == cmn.LBModeFullProxy { + if eRule.act.action.(*ruleLBActs).mode == cmn.LBModeFullProxy && lBActs.mode != cmn.LBModeFullProxy || + eRule.act.action.(*ruleLBActs).mode != cmn.LBModeFullProxy && lBActs.mode == cmn.LBModeFullProxy { return RuleExistsErr, errors.New("lbrule-exist error: cant modify fullproxy rule mode") } - if eRule.act.action.(*ruleNatActs).mode == cmn.LBModeFullProxy { + if eRule.act.action.(*ruleLBActs).mode == cmn.LBModeFullProxy { eRule.DP(DpRemove) } @@ -1557,25 +1569,26 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, eRule.hChk.prbRetries = serv.ProbeRetries eRule.hChk.prbTimeo = serv.ProbeTimeout eRule.pTO = serv.PersistTimeout - eRule.act.action.(*ruleNatActs).sel = natActs.sel - eRule.act.action.(*ruleNatActs).endPoints = retEps - eRule.act.action.(*ruleNatActs).mode = natActs.mode + eRule.act.action.(*ruleLBActs).sel = lBActs.sel + eRule.act.action.(*ruleLBActs).endPoints = retEps + eRule.act.action.(*ruleLBActs).mode = lBActs.mode // Managed flag can't be modified on the fly // eRule.managed = serv.Managed if !serv.Snat { + R.modNatEpHost(eRule, delEps, false, activateProbe) R.modNatEpHost(eRule, retEps, true, activateProbe) R.electEPSrc(eRule) } eRule.sT = time.Now() eRule.iTO = serv.InactiveTimeout - tk.LogIt(tk.LogDebug, "nat lb-rule updated - %s:%s\n", eRule.tuples.String(), eRule.act.String()) + tk.LogIt(tk.LogDebug, "lb-rule updated - %s:%s\n", eRule.tuples.String(), eRule.act.String()) eRule.DP(DpCreate) return 0, nil } else if serv.Oper == cmn.LBOPDetach { - tk.LogIt(tk.LogInfo, "nat lb-rule %s-%v-%s does not exist\n", serv.ServIP, serv.ServPort, serv.Proto) + tk.LogIt(tk.LogInfo, "lb-rule %s-%v-%s does not exist\n", serv.ServIP, serv.ServPort, serv.Proto) return RuleNotExistsErr, errors.New("lbrule not-exists error") } @@ -1606,7 +1619,7 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, r.hChk.prbTimeo = serv.ProbeTimeout r.hChk.actChk = serv.Monitor - r.act.action = &natActs + r.act.action = &lBActs r.ruleNum, err = R.tables[RtLB].Mark.GetCounter() if err != nil { tk.LogIt(tk.LogError, "nat lb-rule - %s:%s hwm error\n", r.tuples.String(), r.act.String()) @@ -1629,14 +1642,14 @@ func (R *RuleH) AddNatLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, if !serv.Snat { R.foldRecursiveEPs(r) - R.modNatEpHost(r, natActs.endPoints, true, activateProbe) + R.modNatEpHost(r, lBActs.endPoints, true, activateProbe) R.electEPSrc(r) if serv.Mode == cmn.LBModeHostOneArm { R.mkHostAssocs(r) } } - tk.LogIt(tk.LogDebug, "nat lb-rule added - %d:%s-%s\n", r.ruleNum, r.tuples.String(), r.act.String()) + tk.LogIt(tk.LogDebug, "lb-rule added - %d:%s-%s\n", r.ruleNum, r.tuples.String(), r.act.String()) R.tables[RtLB].eMap[rt.ruleKey()] = r if r.ruleNum < RtMaximumLbs { @@ -1660,10 +1673,10 @@ func (R *RuleH) deleteVIPSys(r *ruleEnt) { } } -// DeleteNatLbRule - Delete a service LB nat rule. The service details are passed in serv argument. +// DeleteLbRule - Delete a service LB rule. The service details are passed in serv argument. // On success, it will return 0 and nil error, else appropriate return code and // error string will be set -func (R *RuleH) DeleteNatLbRule(serv cmn.LbServiceArg) (int, error) { +func (R *RuleH) DeleteLbRule(serv cmn.LbServiceArg) (int, error) { var ipProto uint8 service := "" @@ -1703,9 +1716,9 @@ func (R *RuleH) DeleteNatLbRule(serv cmn.LbServiceArg) (int, error) { defer R.tables[RtLB].Mark.PutCounter(rule.ruleNum) - eEps := rule.act.action.(*ruleNatActs).endPoints + eEps := rule.act.action.(*ruleLBActs).endPoints activatedProbe := false - if rule.act.action.(*ruleNatActs).mode == cmn.LBModeOneArm || rule.act.action.(*ruleNatActs).mode == cmn.LBModeFullNAT || rule.act.action.(*ruleNatActs).mode == cmn.LBModeHostOneArm || rule.hChk.actChk { + if rule.act.action.(*ruleLBActs).mode == cmn.LBModeOneArm || rule.act.action.(*ruleLBActs).mode == cmn.LBModeFullNAT || rule.act.action.(*ruleLBActs).mode == cmn.LBModeHostOneArm || rule.hChk.actChk { activatedProbe = true } if rule.act.actType != RtActSnat { @@ -1720,7 +1733,7 @@ func (R *RuleH) DeleteNatLbRule(serv cmn.LbServiceArg) (int, error) { R.deleteVIPSys(rule) - tk.LogIt(tk.LogDebug, "nat lb-rule deleted %s-%s\n", rule.tuples.String(), rule.act.String()) + tk.LogIt(tk.LogDebug, "lb-rule deleted %s-%s\n", rule.tuples.String(), rule.act.String()) rule.DP(DpRemove) @@ -1901,7 +1914,7 @@ func (R *RuleH) AddFwRule(fwRule cmn.FwRuleArg, fwOptArgs cmn.FwOptArg) (int, er snatEP := []cmn.LbEndPointArg{{EpIP: fwOpts.opt.snatIP, EpPort: fwOpts.opt.snatPort}} - _, err := R.AddNatLbRule(servArg, nil, snatEP) + _, err := R.AddLbRule(servArg, nil, snatEP) if err != nil { tk.LogIt(tk.LogError, "fw-rule - %s:%s (%s) snat create error\n", r.tuples.String(), r.act.String(), err) return RuleArgsErr, errors.New("rule-snat error") @@ -1991,7 +2004,7 @@ func (R *RuleH) DeleteFwRule(fwRule cmn.FwRuleArg) (int, error) { servArg.Name = fmt.Sprintf("%s:%s:%d", "Masq", fwOpts.opt.snatIP, fwOpts.opt.snatPort) } - _, err := R.DeleteNatLbRule(servArg) + _, err := R.DeleteLbRule(servArg) if err != nil { tk.LogIt(tk.LogError, "fw-rule - %s:%s snat delete error\n", rule.tuples.String(), rule.act.String()) } @@ -2423,7 +2436,7 @@ func (R *RuleH) RulesSync() { rChg = R.electEPSrc(rule) rlChg := false switch at := rule.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: if at.mode == cmn.LBModeHostOneArm { rlChg = R.mkHostAssocs(rule) } @@ -2442,7 +2455,7 @@ func (R *RuleH) RulesSync() { rChg = R.syncEPHostState2Rule(rule, false) if rChg { - tk.LogIt(tk.LogDebug, "nat lb-Rule updated %d:%s,%s\n", rule.ruleNum, ruleKeys, ruleActs) + tk.LogIt(tk.LogDebug, "lb-Rule updated %d:%s,%s\n", rule.ruleNum, ruleKeys, ruleActs) rule.DP(DpCreate) } } @@ -2503,7 +2516,7 @@ func (R *RuleH) RuleDestructAll() { } lbs.ServPort = r.tuples.l4Dst.val - R.DeleteNatLbRule(lbs) + R.DeleteLbRule(lbs) } for _, r := range R.tables[RtFw].eMap { fwr.DstIP = r.tuples.l3Dst.addr.String() @@ -2536,7 +2549,7 @@ func (r *ruleEnt) VIP2DP(work DpWorkT) int { portMap := make(map[int]struct{}) if mh.lSockPolicy { switch at := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: for _, ep := range at.endPoints { if _, ok := portMap[int(ep.xPort)]; ok { continue @@ -2544,7 +2557,7 @@ func (r *ruleEnt) VIP2DP(work DpWorkT) int { portMap[int(ep.xPort)] = struct{}{} nVIPWork := new(SockVIPDpWorkQ) nVIPWork.Work = work - if ep.inActive { + if ep.inActiveEP { nVIPWork.Work = DpRemove } nVIPWork.VIP = r.tuples.l3Dst.addr.IP.Mask(r.tuples.l3Dst.addr.Mask) @@ -2558,14 +2571,14 @@ func (r *ruleEnt) VIP2DP(work DpWorkT) int { return 0 } -// Nat2DP - Sync state of nat-rule entity to data-path -func (r *ruleEnt) Nat2DP(work DpWorkT) int { +// LB2DP - Sync state of lb-rule entity to data-path +func (r *ruleEnt) LB2DP(work DpWorkT) int { if r.addrRslv { return -1 } - nWork := new(NatDpWorkQ) + nWork := new(LBDpWorkQ) nWork.Work = work nWork.Status = &r.sync @@ -2603,7 +2616,7 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { } switch at := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: switch { case at.sel == cmn.LbSelRr: nWork.EpSel = EpRR @@ -2629,23 +2642,23 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { if at.sel == cmn.LbSelPrio { j := 0 k := 0 - var small [MaxNatEndPoints]int - var neps [MaxNatEndPoints]ruleNatEp + var small [MaxLBEndPoints]int + var neps [MaxLBEndPoints]ruleLBEp for i, ep := range at.endPoints { - if ep.inActive { + if ep.inActiveEP { continue } oEp := &at.endPoints[i] - sw := (int(ep.weight) * MaxNatEndPoints) / 100 + sw := (int(ep.weight) * MaxLBEndPoints) / 100 if sw == 0 { small[k] = i k++ } - for x := 0; x < sw && j < MaxNatEndPoints; x++ { + for x := 0; x < sw && j < MaxLBEndPoints; x++ { neps[j].xIP = oEp.xIP neps[j].rIP = oEp.rIP neps[j].xPort = oEp.xPort - neps[j].inActive = oEp.inActive + neps[j].inActiveEP = oEp.inActiveEP neps[j].weight = oEp.weight if sw == 1 { small[k] = i @@ -2654,18 +2667,18 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { j++ } } - if j < MaxNatEndPoints { + if j < MaxLBEndPoints { v := 0 if k == 0 { k = len(at.endPoints) } - for j < MaxNatEndPoints { + for j < MaxLBEndPoints { idx := small[v%k] oEp := &at.endPoints[idx] neps[j].xIP = oEp.xIP neps[j].rIP = oEp.rIP neps[j].xPort = oEp.xPort - neps[j].inActive = oEp.inActive + neps[j].inActiveEP = oEp.inActiveEP neps[j].weight = oEp.weight j++ v++ @@ -2678,7 +2691,7 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { ep.RIP = e.rIP ep.XPort = e.xPort ep.Weight = e.weight - if e.inActive || e.noService { + if e.inActiveEP || e.noService { ep.InActive = true } nWork.endPoints = append(nWork.endPoints, ep) @@ -2693,7 +2706,7 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { ep.RIP = kf.rIP ep.XPort = kf.xPort ep.Weight = kf.weight - if kf.inActive || kf.noService { + if kf.inActiveEP || kf.noService { ep.InActive = true } @@ -2706,7 +2719,7 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { ep.RIP = k.rIP ep.XPort = k.xPort ep.Weight = k.weight - if k.inActive || k.noService { + if k.inActiveEP || k.noService { ep.InActive = true } @@ -2724,7 +2737,7 @@ func (r *ruleEnt) Nat2DP(work DpWorkT) int { if mode == cmn.LBModeHostOneArm { for locIP := range r.locIPs { if sIP := net.ParseIP(locIP); sIP != nil { - nWork1 := new(NatDpWorkQ) + nWork1 := new(LBDpWorkQ) *nWork1 = *nWork nWork1.ServiceIP = sIP mh.dp.ToDpCh <- nWork1 @@ -2842,7 +2855,7 @@ func (r *ruleEnt) DP(work DpWorkT) int { if work == DpStatsGet || work == DpStatsGetImm { if isNat { switch at := r.act.action.(type) { - case *ruleNatActs: + case *ruleLBActs: numEndPoints := 0 for i := range at.endPoints { nEP := &at.endPoints[i] @@ -2895,7 +2908,7 @@ func (r *ruleEnt) DP(work DpWorkT) int { } if isNat { - return r.Nat2DP(work) + return r.LB2DP(work) } return r.Fw2DP(work) @@ -2922,9 +2935,9 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP) error { } if loxinlp.AddAddrNoHook(IP.String()+"/32", "lo") != 0 { - tk.LogIt(tk.LogError, "nat lb-rule vip %s:%s add failed\n", IP.String(), "lo") + tk.LogIt(tk.LogError, "lb-rule vip %s:%s add failed\n", IP.String(), "lo") } else { - tk.LogIt(tk.LogInfo, "nat lb-rule vip %s:%s added\n", IP.String(), "lo") + tk.LogIt(tk.LogInfo, "lb-rule vip %s:%s added\n", IP.String(), "lo") } loxinlp.DelNeighNoHook(IP.String(), "") } @@ -2936,16 +2949,16 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP) error { case <-rCh: break case <-ctx.Done(): - tk.LogIt(tk.LogInfo, "nat lb-rule vip %s - iface %s : GratARP timeout\n", IP.String(), iface) + tk.LogIt(tk.LogInfo, "lb-rule vip %s - iface %s : GratARP timeout\n", IP.String(), iface) } } } else if ciState != "NOT_DEFINED" { if utils.IsIPHostAddr(IP.String()) { if loxinlp.DelAddrNoHook(IP.String()+"/32", "lo") != 0 { - tk.LogIt(tk.LogError, "nat lb-rule vip %s:%s delete failed\n", IP.String(), "lo") + tk.LogIt(tk.LogError, "lb-rule vip %s:%s delete failed\n", IP.String(), "lo") } else { - tk.LogIt(tk.LogInfo, "nat lb-rule vip %s:%s deleted\n", IP.String(), "lo") + tk.LogIt(tk.LogInfo, "lb-rule vip %s:%s deleted\n", IP.String(), "lo") } } } else { From a1232a6250102a418157ba2de857a9f02bcd5f0d Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sat, 28 Sep 2024 00:58:30 +0900 Subject: [PATCH 21/34] loxilb-io/kube-loxilb#184 Clear inactive endpoints on LB endpoint update --- pkg/loxinet/rules.go | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/pkg/loxinet/rules.go b/pkg/loxinet/rules.go index b046f2636..a2b1862fe 100644 --- a/pkg/loxinet/rules.go +++ b/pkg/loxinet/rules.go @@ -218,6 +218,7 @@ type ruleLBEp struct { inActiveEP bool noService bool chkVal bool + epCreated bool stat ruleStat foldEndPoints []ruleLBEp foldRuleKey string @@ -877,7 +878,8 @@ func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liv } else { hopts.probeDuration = r.hChk.prbTimeo } - for _, nep := range endpoints { + for idx := range endpoints { + nep := &endpoints[idx] if r.tuples.l4Prot.val == 6 { pType = HostProbeConnectTCP pPort = nep.xPort @@ -912,11 +914,25 @@ func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liv epKey := makeEPKey(nep.xIP.String(), pType, pPort) if doAddOp { - if !nep.inActiveEP { - R.AddEPHost(false, nep.xIP.String(), epKey, hopts) + if !nep.inActiveEP && !nep.epCreated { + _, err := R.AddEPHost(false, nep.xIP.String(), epKey, hopts) + if err == nil { + nep.epCreated = true + } else { + tk.LogIt(tk.LogError, "add ep-host error %v : %s\n", epKey, err) + } + } else if nep.inActiveEP { + nep.epCreated = false } } else { - R.DeleteEPHost(false, epKey, nep.xIP.String(), hopts.probeType, hopts.probePort) + if nep.epCreated { + _, err := R.DeleteEPHost(false, epKey, nep.xIP.String(), hopts.probeType, hopts.probePort) + if err == nil { + nep.epCreated = false + } else { + tk.LogIt(tk.LogError, "delete ep-host error %v : %s\n", epKey, err) + } + } } } } @@ -1506,7 +1522,7 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, se if lBActs.mode == cmn.LBModeDSR && k.EpPort != serv.ServPort { return RuleUnknownServiceErr, errors.New("malformed-service dsr-port error") } - ep := ruleLBEp{pNetAddr, xNetAddr, k.EpPort, k.Weight, 0, false, false, false, ruleStat{0, 0}, nil, ""} + ep := ruleLBEp{pNetAddr, xNetAddr, k.EpPort, k.Weight, 0, false, false, false, false, ruleStat{0, 0}, nil, ""} lBActs.endPoints = append(lBActs.endPoints, ep) } From be57f53b80a7475088feee90f306874e2da1d520 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sat, 28 Sep 2024 23:11:57 +0900 Subject: [PATCH 22/34] graceful handling when max endpoint limit is reached --- pkg/loxinet/rules.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/loxinet/rules.go b/pkg/loxinet/rules.go index a2b1862fe..f3aa2e31b 100644 --- a/pkg/loxinet/rules.go +++ b/pkg/loxinet/rules.go @@ -1442,7 +1442,7 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, se return RuleArgsErr, errors.New("malformed-service-pport error") } - // Currently support a maximum of MAX_NAT_EPS + // Currently support a maximum of MaxLBEndPoints if len(servEndPoints) <= 0 || len(servEndPoints) > MaxLBEndPoints { return RuleEpCountErr, errors.New("endpoints-range error") } @@ -1573,8 +1573,13 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, se return RuleExistsErr, errors.New("lbrule-exist error: cant modify fullproxy rule mode") } - if eRule.act.action.(*ruleLBActs).mode == cmn.LBModeFullProxy { + if eRule.act.action.(*ruleLBActs).mode == cmn.LBModeFullProxy || len(retEps) > MaxLBEndPoints { eRule.DP(DpRemove) + if len(retEps) > MaxLBEndPoints { + tk.LogIt(tk.LogInfo, "lb-rule %s-%v-%s reset all end-points (too many)\n", serv.ServIP, serv.ServPort, serv.Proto) + delEps = eRule.act.action.(*ruleLBActs).endPoints + retEps = lBActs.endPoints + } } // Update the rule From d691a82631eb459868839a39a0ba138b9a1f13d1 Mon Sep 17 00:00:00 2001 From: Trekkie <111065900+TrekkieCoder@users.noreply.github.com> Date: Sun, 29 Sep 2024 10:13:23 +0900 Subject: [PATCH 23/34] chore: Updated README --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index c0b15b75b..53de91e02 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,7 @@ Telco-cloud requires load-balancing and communication across various interfaces/ ## Getting Started #### loxilb as ext-cluster pod +- [K8s : loxilb ext-mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k8s-flannel-ext.md) - [K3s : loxilb with default flannel](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_flannel.md) - [K3s : loxilb with calico](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_calico.md) - [K3s : loxilb with cilium](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/quick_start_with_cilium.md) @@ -86,10 +87,12 @@ Telco-cloud requires load-balancing and communication across various interfaces/ - [EKS : loxilb ext-mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/eks-external.md) #### loxilb as in-cluster pod +- [K8s : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k8s-flannel-incluster.md) - [K3s : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_incluster.md) - [K0s : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k0s_quick_start_incluster.md) - [MicroK8s : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/microk8s_quick_start_incluster.md) - [EKS : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/eks-incluster.md) +- [RedHat OCP : loxilb in-cluster mode](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/rhocp-quickstart-incluster.md) #### loxilb as service-proxy (kube-proxy replacement) - [K3s : loxilb service-proxy with flannel](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-proxy-flannel.md) @@ -108,6 +111,7 @@ Telco-cloud requires load-balancing and communication across various interfaces/ - [How-To : Deploy loxilb with multi-AZ HA support in AWS](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/aws-multi-az.md) - [How-To : Deploy loxilb with multi-cloud HA support](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/multi-cloud-ha.md) - [How-To : Deploy loxilb with ingress-nginx](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-nginx-ingress.md) +- [How-To : Run loxilb in-cluster with secondary networks](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-incluster-multus.md) ## Knowledge-Base - [What is eBPF](ebpf.md) From 7cc8c40201ecb42df759513891dafa9cc24c29d4 Mon Sep 17 00:00:00 2001 From: Gyuseok Jung Date: Sun, 29 Sep 2024 15:12:23 +0900 Subject: [PATCH 24/34] Fix invalid Knowledge-Base links --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53de91e02..9273017bd 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ Telco-cloud requires load-balancing and communication across various interfaces/ - [How-To : Run loxilb in-cluster with secondary networks](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-incluster-multus.md) ## Knowledge-Base -- [What is eBPF](ebpf.md) +- [What is eBPF](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/ebpf.md) - [What is k8s service - load-balancer](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/lb.md) - [Architecture in brief](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/arch.md) - [Code organization](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/code.md) From c70577f5616afca9952ed4c4c5573e24841c843c Mon Sep 17 00:00:00 2001 From: Gyuseok Jung Date: Sun, 29 Sep 2024 15:15:17 +0900 Subject: [PATCH 25/34] Fix invalid Knowledge-Base links(README-KOR) --- README-KOR.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-KOR.md b/README-KOR.md index ae54f67ea..1b3f9a68d 100644 --- a/README-KOR.md +++ b/README-KOR.md @@ -108,7 +108,7 @@ loxilb는 기본적으로 L4 로드 밸런서/서비스 프록시로 작동합 - [How-To : ingress-nginx와 함께 loxilb 배포](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-nginx-ingress.md) ## 배경 지식 -- [eBPF란 무엇인가](ebpf.md) +- [eBPF란 무엇인가](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/ebpf.md) - [k8s 서비스 - 로드 밸런서란 무엇인가](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/lb.md) - [간단한 아키텍처](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/arch.md) - [코드 조직](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/code.md) From fc7c7d4979fc4ef1e3253a045a7b7b2a5c307b8e Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 30 Sep 2024 17:16:57 +0900 Subject: [PATCH 26/34] gh-813 Fixed unnecessary error logs --- api/loxinlp/nlp.go | 28 +++++++++++++--------------- pkg/loxinet/gobgpclient.go | 4 ++-- pkg/loxinet/port.go | 2 +- pkg/loxinet/utils_aws.go | 4 ++-- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/api/loxinlp/nlp.go b/api/loxinlp/nlp.go index bcc15c16e..331fc2e57 100644 --- a/api/loxinlp/nlp.go +++ b/api/loxinlp/nlp.go @@ -907,8 +907,9 @@ func ModLink(link nlp.Link, add bool) int { Link: linkState, State: state, Mtu: mtu, Master: master, Real: real, TunID: tunId, TunDst: tunDst, TunSrc: tunSrc}) if err != nil { - tk.LogIt(tk.LogError, "[NLP] Port %v, %v, %v, %v add failed\n", name, ifMac, state, mtu) - fmt.Println(err) + if !strings.Contains(err.Error(), "port exists") { + tk.LogIt(tk.LogError, "[NLP] Port %v, %v, %v, %v add failed\n", name, ifMac, state, mtu) + } } else { tk.LogIt(tk.LogInfo, "[NLP] Port %v, %v, %v, %v add [OK]\n", name, ifMac, state, mtu) } @@ -917,7 +918,6 @@ func ModLink(link nlp.Link, add bool) int { ret, err = hooks.NetPortDel(&cmn.PortMod{Dev: name, Ptype: pType}) if err != nil { tk.LogIt(tk.LogError, "[NLP] Port %v, %v, %v, %v delete failed\n", name, ifMac, state, mtu) - fmt.Println(err) } else { tk.LogIt(tk.LogInfo, "[NLP] Port %v, %v, %v, %v delete [OK]\n", name, ifMac, state, mtu) } @@ -1627,50 +1627,48 @@ func LbSessionGet(done bool) int { if _, err := os.Stat(opt.Opts.ConfigPath + "/EPconfig.txt"); errors.Is(err, os.ErrNotExist) { if err != nil { - tk.LogIt(tk.LogInfo, "[NLP] No EndPoint config file : %s \n", err.Error()) + tk.LogIt(tk.LogInfo, "[NLP] Continuing without EP config file: %s\n", err.Error()) } } else { applyEPConfig() } - tk.LogIt(tk.LogInfo, "[NLP] EndPoint done\n") + tk.LogIt(tk.LogInfo, "[NLP] EndPoint config process done\n") if _, err := os.Stat(opt.Opts.ConfigPath + "/lbconfig.txt"); errors.Is(err, os.ErrNotExist) { if err != nil { - tk.LogIt(tk.LogInfo, "[NLP] No load balancer config file : %s \n", err.Error()) + tk.LogIt(tk.LogInfo, "[NLP] Continuing without LB config file : %s \n", err.Error()) } } else { applyLoadBalancerConfig() } + tk.LogIt(tk.LogInfo, "[NLP] LoadBalancer config done\n") - tk.LogIt(tk.LogInfo, "[NLP] LoadBalancer done\n") if _, err := os.Stat(opt.Opts.ConfigPath + "/sessionconfig.txt"); errors.Is(err, os.ErrNotExist) { if err != nil { - tk.LogIt(tk.LogInfo, "[NLP] No Session config file : %s \n", err.Error()) + tk.LogIt(tk.LogInfo, "[NLP] Continuing without Session config file : %s \n", err.Error()) } } else { applySessionConfig() } + tk.LogIt(tk.LogInfo, "[NLP] Session config done\n") - tk.LogIt(tk.LogInfo, "[NLP] Session done\n") if _, err := os.Stat(opt.Opts.ConfigPath + "/sessionulclconfig.txt"); errors.Is(err, os.ErrNotExist) { if err != nil { - tk.LogIt(tk.LogInfo, "[NLP] No UlCl config file : %s \n", err.Error()) + tk.LogIt(tk.LogInfo, "[NLP] Continuing without UlCl config file : %s \n", err.Error()) } } else { applyUlClConfig() } + tk.LogIt(tk.LogInfo, "[NLP] Session UlCl config done\n") - tk.LogIt(tk.LogInfo, "[NLP] Session UlCl done\n") if _, err := os.Stat(opt.Opts.ConfigPath + "/FWconfig.txt"); errors.Is(err, os.ErrNotExist) { if err != nil { - tk.LogIt(tk.LogInfo, "[NLP] No Firewall config file : %s \n", err.Error()) + tk.LogIt(tk.LogInfo, "[NLP] Continuing without Firewall config file : %s \n", err.Error()) } } else { applyFWConfig() } - tk.LogIt(tk.LogInfo, "[NLP] Firewall done\n") - - tk.LogIt(tk.LogInfo, "[NLP] LbSessionGet done\n") + tk.LogIt(tk.LogInfo, "[NLP] Firewall config done\n") } return 0 diff --git a/pkg/loxinet/gobgpclient.go b/pkg/loxinet/gobgpclient.go index ca75d40ad..dd65f5a17 100644 --- a/pkg/loxinet/gobgpclient.go +++ b/pkg/loxinet/gobgpclient.go @@ -205,12 +205,12 @@ func (gbh *GoBgpH) processRouteSingle(p *goBgpRouteInfo, showIdentifier bgp.BGPA tk.LogIt(tk.LogInfo, format, pathStr...) - if err := gbh.syncRoute(p, showIdentifier); err != nil { + if err := gbh.syncRoute(p); err != nil { tk.LogIt(tk.LogError, " failed to "+format, pathStr...) } } -func (gbh *GoBgpH) syncRoute(p *goBgpRouteInfo, showIdentifier bgp.BGPAddPathMode) error { +func (gbh *GoBgpH) syncRoute(p *goBgpRouteInfo) error { if gbh.noNlp { return nil } diff --git a/pkg/loxinet/port.go b/pkg/loxinet/port.go index 1c37d61be..a6e6a3e4e 100644 --- a/pkg/loxinet/port.go +++ b/pkg/loxinet/port.go @@ -293,7 +293,7 @@ func (P *PortsH) PortAdd(name string, osid int, ptype int, zone string, return 0, nil } } - tk.LogIt(tk.LogError, "port add - %s exists\n", name) + tk.LogIt(tk.LogTrace, "port add - %s exists\n", name) return PortExistsErr, errors.New("port exists") } diff --git a/pkg/loxinet/utils_aws.go b/pkg/loxinet/utils_aws.go index c0d489901..572ae46e0 100644 --- a/pkg/loxinet/utils_aws.go +++ b/pkg/loxinet/utils_aws.go @@ -696,7 +696,7 @@ func awsAssociateElasticIp(vIP, eIP net.IP, add bool) error { tk.LogIt(tk.LogInfo, "AWS adding elastic IP : %s\n", eIP.String()) if !add { - return awsDisassociateElasticIpWithInterface(ctx, eipAssociateID, niID) + return awsDisassociateElasticIpWithInterface(ctx, eipAssociateID) } return awsAssociateElasticIpWithInterface(ctx, eipID, niID, vIP) } @@ -723,7 +723,7 @@ func awsAssociateElasticIpWithInterface(ctx context.Context, eipID, niID string, return err } -func awsDisassociateElasticIpWithInterface(ctx context.Context, eipAssociateID, niID string) error { +func awsDisassociateElasticIpWithInterface(ctx context.Context, eipAssociateID string) error { _, err := ec2Client.DisassociateAddress(ctx, &ec2.DisassociateAddressInput{ AssociationId: &eipAssociateID, }) From da23de44030f56509b15af91900c9fe1c82bd6eb Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Wed, 2 Oct 2024 11:29:49 +0900 Subject: [PATCH 27/34] gh-810 fixes for LC selection and added more tests to LC selftests --- cicd/tcplblc/validation.sh | 26 ++++++++++++++++++++++++-- loxilb-ebpf | 2 +- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/cicd/tcplblc/validation.sh b/cicd/tcplblc/validation.sh index c36864375..f388e8357 100755 --- a/cicd/tcplblc/validation.sh +++ b/cicd/tcplblc/validation.sh @@ -50,7 +50,10 @@ do done done -$hexec l3h1 nohup nc -d ${servIP[0]} 2020 & +$hexec l3h1 nohup nc -d ${servIP[0]} 2020 >/dev/null 2>&1 & +$hexec l3h1 nohup nc -d ${servIP[0]} 2020 >/dev/null 2>&1 & +$hexec l3h1 nohup nc -d ${servIP[0]} 2020 >/dev/null 2>&1 & +$hexec l3h1 nohup nc -d ${servIP[0]} 2020 >/dev/null 2>&1 & sleep 5 echo "Testing Service IP: ${servIP[0]}" @@ -69,6 +72,25 @@ do done done +$hexec l3h1 nohup nc -d ${servIP[0]} 2020 >/dev/null 2>&1 & +sleep 5 + +echo "Testing Service IP: ${servIP[0]}" +lcode=0 +for i in {1..4} +do +for j in {0..2} +do + res=$($hexec l3h1 curl --max-time 10 -s ${servIP[0]}:2020) + echo $res + if [[ $res != "server3" ]] + then + lcode=1 + fi + sleep 1 +done +done + if [[ $lcode == 0 ]] then echo SCENARIO-tcplb with least-connection [OK] @@ -77,8 +99,8 @@ else code=1 fi -sudo killall -9 node 2>&1 > /dev/null sudo killall -9 nc 2>&1 > /dev/null +sudo killall -9 node 2>&1 > /dev/null rm -f nohup.out exit $code diff --git a/loxilb-ebpf b/loxilb-ebpf index 9e7d51b12..52d52aac1 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit 9e7d51b121b5f49a6cd477bf521ab63c3f9587d7 +Subproject commit 52d52aac19b6d4ff05437a86e29445a49ddcc5e4 From 853073836b9a1da9bb12faabe3840cc574957889 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Wed, 2 Oct 2024 13:07:33 +0900 Subject: [PATCH 28/34] gh-819 fixes for k3s-base-sanity intermittently failing --- .github/workflows/k3s-base-sanity.yml | 9 +++------ cicd/k3s-base-sanity/config.sh | 7 ++----- pkg/loxinet/route.go | 3 ++- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/.github/workflows/k3s-base-sanity.yml b/.github/workflows/k3s-base-sanity.yml index cde42293e..08f37688e 100644 --- a/.github/workflows/k3s-base-sanity.yml +++ b/.github/workflows/k3s-base-sanity.yml @@ -38,17 +38,14 @@ jobs: - uses: actions/checkout@v2 with: submodules: recursive - - run: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --disable-cloud-controller --kubelet-arg cloud-provider=external" K3S_KUBECONFIG_MODE="644" sh - + - run: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb" K3S_KUBECONFIG_MODE="644" sh - - run: | - sleep 10 + sleep 20 kubectl "${{ env.KUBECONFIG }}" taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- sleep 60 kubectl "${{ env.KUBECONFIG }}" get nodes kubectl "${{ env.KUBECONFIG }}" get pods -A - wget https://github.com/loxilb-io/loxi-ccm/raw/master/manifests/loxi-ccm-k3s.yaml - kubectl "${{ env.KUBECONFIG }}" apply -f ./loxi-ccm-k3s.yaml - sleep 60 - kubectl "${{ env.KUBECONFIG }}" get pods -A + sleep 20 - run: | cd cicd/k3s-base-sanity/ ./config.sh "${{ env.KUBECONFIG }}" diff --git a/cicd/k3s-base-sanity/config.sh b/cicd/k3s-base-sanity/config.sh index 430fa8d11..e0b997eaa 100755 --- a/cicd/k3s-base-sanity/config.sh +++ b/cicd/k3s-base-sanity/config.sh @@ -131,9 +131,9 @@ else # Install k3s without external cloud-manager and disabled servicelb #curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.22.9+k3s1 INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --disable-cloud-controller --kubelet-arg cloud-provider=external" K3S_KUBECONFIG_MODE="644" sh - - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --disable-cloud-controller --kubelet-arg cloud-provider=external" K3S_KUBECONFIG_MODE="644" sh - + curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb" K3S_KUBECONFIG_MODE="644" sh - - sleep 10 + sleep 20 # Check kubectl works kubectl $KUBECONFIG get pods -A @@ -141,9 +141,6 @@ else # Remove taints in k3s if any (usually happens if started without cloud-manager) kubectl $KUBECONFIG taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- - # Start loxi-ccm as k3s daemonset - kubectl $KUBECONFIG apply -f https://github.com/loxilb-io/loxi-ccm/raw/master/manifests/loxi-ccm-k3s.yaml - echo "End K3s installation" fi diff --git a/pkg/loxinet/route.go b/pkg/loxinet/route.go index 7e7064a96..dbf01b3c3 100644 --- a/pkg/loxinet/route.go +++ b/pkg/loxinet/route.go @@ -211,13 +211,14 @@ func (r *RtH) RtAdd(Dst net.IPNet, Zone string, Ra RtAttr, Na []RtNhAttr) (int, for i := 0; i < nhLen; i++ { // FIXME - Need to sort before comparing if !Na[i].NhAddr.Equal(rt.NhAttr[i].NhAddr) { - rtMod = false + rtMod = true break } } } if rtMod { + tk.LogIt(tk.LogDebug, "rt change - %s:%s detected\n", Dst.String(), Zone) ret, _ := r.RtDelete(Dst, Zone) if ret != 0 { tk.LogIt(tk.LogError, "rt add - %s:%s del failed on mod\n", Dst.String(), Zone) From f96df22895796c915194df4f0acc60587b6b892d Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Wed, 2 Oct 2024 13:18:48 +0900 Subject: [PATCH 29/34] gh-819 fixes for k3s-base-sanity intermittently failing --- .github/workflows/k3s-base-sanity.yml | 4 +--- cicd/k3s-base-sanity/config.sh | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/k3s-base-sanity.yml b/.github/workflows/k3s-base-sanity.yml index 08f37688e..2017bf091 100644 --- a/.github/workflows/k3s-base-sanity.yml +++ b/.github/workflows/k3s-base-sanity.yml @@ -40,9 +40,7 @@ jobs: submodules: recursive - run: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb" K3S_KUBECONFIG_MODE="644" sh - - run: | - sleep 20 - kubectl "${{ env.KUBECONFIG }}" taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- - sleep 60 + sleep 80 kubectl "${{ env.KUBECONFIG }}" get nodes kubectl "${{ env.KUBECONFIG }}" get pods -A sleep 20 diff --git a/cicd/k3s-base-sanity/config.sh b/cicd/k3s-base-sanity/config.sh index e0b997eaa..01d7d9a1d 100755 --- a/cicd/k3s-base-sanity/config.sh +++ b/cicd/k3s-base-sanity/config.sh @@ -139,7 +139,7 @@ else kubectl $KUBECONFIG get pods -A # Remove taints in k3s if any (usually happens if started without cloud-manager) - kubectl $KUBECONFIG taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- + # kubectl $KUBECONFIG taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- echo "End K3s installation" fi From 6ebcf6d81706b419a9b035b1e1aac8c6af99e69e Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Wed, 2 Oct 2024 19:11:29 +0900 Subject: [PATCH 30/34] gh-810 Merge lc-fix branch to main --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index 52d52aac1..f2d501dad 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit 52d52aac19b6d4ff05437a86e29445a49ddcc5e4 +Subproject commit f2d501dad67f64f2e44a4d06851d63945e9c59cb From cfc0602d3f81f1c6fd71688331ff064ff62ced13 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Thu, 3 Oct 2024 14:16:10 +0900 Subject: [PATCH 31/34] gh-813 Fixed logs level verbosity --- pkg/loxinet/dpebpf_linux.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/loxinet/dpebpf_linux.go b/pkg/loxinet/dpebpf_linux.go index 36b855870..20f76cf0f 100644 --- a/pkg/loxinet/dpebpf_linux.go +++ b/pkg/loxinet/dpebpf_linux.go @@ -2011,7 +2011,7 @@ func dpCTMapNotifierWorker(cti *DpCtInfo) { } } - tk.LogIt(tk.LogDebug, "[CT] %s - %s\n", opStr, cti.String()) + tk.LogIt(tk.LogTrace, "[CT] %s - %s\n", opStr, cti.String()) } func dpCTMapBcast() { @@ -2044,7 +2044,7 @@ func dpCTMapChkUpdates() { fd := C.llb_map2fd(C.LL_DP_CT_MAP) if len(mh.dpEbpf.ctMap) > 0 { - tk.LogIt(tk.LogInfo, "[CT] Map size %d\n", len(mh.dpEbpf.ctMap)) + tk.LogIt(tk.LogTrace, "[CT] Map size %d\n", len(mh.dpEbpf.ctMap)) } for _, cti := range mh.dpEbpf.ctMap { @@ -2075,7 +2075,7 @@ func dpCTMapChkUpdates() { delete(mh.dpEbpf.ctMap, cti.Key()) mh.dpEbpf.ctMap[goCtEnt.Key()] = goCtEnt ctStr := goCtEnt.String() - tk.LogIt(tk.LogDebug, "[CT] %s - %s\n", "update", ctStr) + tk.LogIt(tk.LogTrace, "[CT] %s - %s\n", "update", ctStr) if goCtEnt.CState == "est" { goCtEnt.XSync = true goCtEnt.NTs = tc @@ -2125,7 +2125,7 @@ func dpCTMapChkUpdates() { } if cti.XSync == true && time.Duration(tc.Sub(cti.NTs).Seconds()) >= time.Duration(10) { - tk.LogIt(tk.LogDebug, "[CT] Sync - %s\n", cti.String()) + tk.LogIt(tk.LogTrace, "[CT] Sync - %s\n", cti.String()) ret := 0 if cti.Deleted > 0 { @@ -2148,16 +2148,16 @@ func dpCTMapChkUpdates() { } if len(blkCti) >= blkCtiMaxLen { - tk.LogIt(tk.LogDebug, "[CT] Block Add Sync - \n") + tk.LogIt(tk.LogTrace, "[CT] Block Add Sync - \n") tc1 := time.Now() mh.dp.DpXsyncRPC(DpSyncAdd, blkCti) tc2 := time.Now() - tk.LogIt(tk.LogInfo, "[CT] Block Add Sync %d took %v- \n", len(blkCti), time.Duration(tc2.Sub(tc1))) + tk.LogIt(tk.LogTrace, "[CT] Block Add Sync %d took %v- \n", len(blkCti), time.Duration(tc2.Sub(tc1))) blkCti = nil } if len(blkDelCti) >= blkCtiMaxLen { - tk.LogIt(tk.LogDebug, "[CT] Block Del Sync - \n") + tk.LogIt(tk.LogTrace, "[CT] Block Del Sync - \n") mh.dp.DpXsyncRPC(DpSyncDelete, blkDelCti) blkDelCti = nil } @@ -2165,14 +2165,14 @@ func dpCTMapChkUpdates() { if len(blkCti) > 0 { tc1 := time.Now() - tk.LogIt(tk.LogDebug, "[CT] Block Add Sync - \n") + tk.LogIt(tk.LogTrace, "[CT] Block Add Sync - \n") mh.dp.DpXsyncRPC(DpSyncAdd, blkCti) tc2 := time.Now() - tk.LogIt(tk.LogInfo, "[CT] Block Add Sync %d took %v- \n", len(blkCti), time.Duration(tc2.Sub(tc1))) + tk.LogIt(tk.LogTrace, "[CT] Block Add Sync %d took %v- \n", len(blkCti), time.Duration(tc2.Sub(tc1))) } if len(blkDelCti) > 0 { - tk.LogIt(tk.LogDebug, "[CT] Block Del Sync - \n") + tk.LogIt(tk.LogTrace, "[CT] Block Del Sync - \n") mh.dp.DpXsyncRPC(DpSyncDelete, blkDelCti) } } @@ -2217,7 +2217,7 @@ func (e *DpEbpfH) DpCtAdd(w *DpCtInfo) int { mh.mtx.Unlock() if r == nil || len(w.PVal) == 0 || len(w.PKey) == 0 || w.CState != "est" { - tk.LogIt(tk.LogDebug, "Invalid CT op/No LB - %v\n", serv) + tk.LogIt(tk.LogError, "Invalid CT op/No LB - %v\n", serv) return EbpfErrCtAdd } From 236be621a427f38319ad983ad4632046a82b58f8 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 7 Oct 2024 00:34:32 +0900 Subject: [PATCH 32/34] gh-726 Fixes to https handling --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index f2d501dad..c9fd65423 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit f2d501dad67f64f2e44a4d06851d63945e9c59cb +Subproject commit c9fd65423a941ff7a8845034cf598ad3eab23fb2 From 320d8f2278c151ea602b39c2a2f96ba1f664d191 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 7 Oct 2024 00:39:29 +0900 Subject: [PATCH 33/34] gh-726 Fixes to https handling --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index c9fd65423..aea9ef839 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit c9fd65423a941ff7a8845034cf598ad3eab23fb2 +Subproject commit aea9ef839c6eb6d4c92f7df0282f190486c31fcb From 9009d75c874668421468543f85709ec7191a83e9 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Mon, 7 Oct 2024 11:13:24 +0900 Subject: [PATCH 34/34] Better handling of RR when few endpoints are inactive --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index aea9ef839..69240d58a 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit aea9ef839c6eb6d4c92f7df0282f190486c31fcb +Subproject commit 69240d58a1732d4326bee7d0841c083ff5170079