diff --git a/.github/workflows/k8s-calico-incluster.yml b/.github/workflows/k8s-calico-incluster.yml new file mode 100644 index 000000000..1b9c4b7dd --- /dev/null +++ b/.github/workflows/k8s-calico-incluster.yml @@ -0,0 +1,36 @@ +name: K8s-Calico-Incluster-Sanity-CI +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k8s-calico-incluster' +jobs: + test-runner: + name: k8s-calico-incluster-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/k8s-calico-incluster + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k8s-calico-incluster || true + ./rmconfig.sh + cd - diff --git a/cicd/k8s-calico-incluster/Vagrantfile b/cicd/k8s-calico-incluster/Vagrantfile index e68faecb6..5653a6651 100644 --- a/cicd/k8s-calico-incluster/Vagrantfile +++ b/cicd/k8s-calico-incluster/Vagrantfile @@ -5,32 +5,29 @@ require "yaml" settings = YAML.load_file "yaml/settings.yaml" workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i Vagrant.configure("2") do |config| if Vagrant.has_plugin?("vagrant-vbguest") config.vbguest.auto_update = false end - - config.vm.box = settings["software"]["cluster"]["box"]["name"] - config.vm.box_version = settings["software"]["cluster"]["box"]["version"] - - config.vm.define "host" do |host| + config.vm.define "host" do |host| host.vm.hostname = 'host1' - host.vm.network :private_network, ip: settings["network"]["client_ip"], :netmask => "255.255.255.0" + host.vm.box = settings["software"]["cluster"]["box"] host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" host.vm.provision :shell, :path => "node_scripts/host.sh" host.vm.provider :virtualbox do |vbox| vbox.customize ["modifyvm", :id, "--memory", 2048] - vbox.customize ["modifyvm", :id, "--cpus", 1] + vbox.customize ["modifyvm", :id, "--cpus", 2] end end config.vm.define "master" do |master| - master.vm.hostname = 'master1' + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" - master.vm.network :private_network, ip: settings["network"]["control_ip2"], :netmask => "255.255.255.0" master.vm.provision "shell", env: { "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), @@ -50,14 +47,16 @@ Vagrant.configure("2") do |config| master.vm.provider :virtualbox do |vbox| vbox.customize ["modifyvm", :id, "--memory", 4096] - vbox.customize ["modifyvm", :id, "--cpus", 3] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] end end (1..workers).each do |node_number| config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] worker.vm.hostname = "worker#{node_number}" - ip = node_number + 100 + ip = node_number + 200 worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" worker.vm.provision "shell", env: { @@ -70,8 +69,9 @@ Vagrant.configure("2") do |config| worker.vm.provision "shell", path: "node_scripts/worker.sh" worker.vm.provider :virtualbox do |vbox| - vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--memory", 4096] vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] end end end diff --git a/cicd/k8s-calico-incluster/config.sh b/cicd/k8s-calico-incluster/config.sh index 5c4154941..6abfc0211 100755 --- a/cicd/k8s-calico-incluster/config.sh +++ b/cicd/k8s-calico-incluster/config.sh @@ -30,8 +30,38 @@ do sleep 10 done -# Create fullnat Services +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_onearm.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_onearm.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_onearm.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null -vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp.yml' 2> /dev/null + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + echo "Cluster is ready" + break; + fi + echo "Will try after 10s" + sleep 10 +done + +if [[ $fin == 0 ]]; then + echo "Cluster is not ready" + exit 1 +fi diff --git a/cicd/k8s-calico-incluster/node_scripts/common.sh b/cicd/k8s-calico-incluster/node_scripts/common.sh index cf1e66f1a..c01ad688f 100644 --- a/cicd/k8s-calico-incluster/node_scripts/common.sh +++ b/cicd/k8s-calico-incluster/node_scripts/common.sh @@ -26,7 +26,7 @@ sudo apt-get update -y # Install CRI-O Runtime VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" - +CRIO_VERSION=1.27 # Create the .conf file to load the modules at bootup cat < /etc/default/kubelet << EOF diff --git a/cicd/k8s-calico-incluster/node_scripts/host.sh b/cicd/k8s-calico-incluster/node_scripts/host.sh index c4c22d558..6fbeb1648 100755 --- a/cicd/k8s-calico-incluster/node_scripts/host.sh +++ b/cicd/k8s-calico-incluster/node_scripts/host.sh @@ -1,10 +1,5 @@ -sudo su -sudo apt-get install -y lksctp-tools socat -wget https://github.com/loxilb-io/loxilb/raw/main/cicd/common/sctp_client -wget https://github.com/loxilb-io/loxilb/raw/main/cicd/common/udp_client -chmod 777 sctp_client -chmod 777 udp_client -echo "123.123.123.1 k8s-svc" >> /etc/hosts -ifconfig eth2 mtu 1450 -ip route add 123.123.123.0/24 via 192.168.90.10 +# Install Bird to work with k3s +sudo apt-get update +sudo apt-get -y install socat lksctp-tools + echo "Host is up" diff --git a/cicd/k8s-calico-incluster/node_scripts/loxilb.sh b/cicd/k8s-calico-incluster/node_scripts/loxilb.sh index 74e66ae9b..6df67208f 100644 --- a/cicd/k8s-calico-incluster/node_scripts/loxilb.sh +++ b/cicd/k8s-calico-incluster/node_scripts/loxilb.sh @@ -7,7 +7,3 @@ add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu apt-get update apt-get install -y docker-ce docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc -echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc - -echo $LOXILB_IP > /vagrant/loxilb-ip diff --git a/cicd/k8s-calico-incluster/node_scripts/master.sh b/cicd/k8s-calico-incluster/node_scripts/master.sh index 9d83b513d..6d20f5065 100644 --- a/cicd/k8s-calico-incluster/node_scripts/master.sh +++ b/cicd/k8s-calico-incluster/node_scripts/master.sh @@ -10,14 +10,13 @@ sudo kubeadm config images pull echo "Preflight Check Passed: Downloaded All Required Images" -sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml mkdir -p "$HOME"/.kube sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config -curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - - # Save Configs to shared /Vagrant location # For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. @@ -54,6 +53,5 @@ EOF kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml # Install loxilb -kubectl apply -f /vagrant/yaml/loxilb.yml -kubectl apply -f /vagrant/yaml/loxilb-peer.yml -kubectl apply -f /vagrant/yaml/kube-loxilb.yml +kubectl apply -f /vagrant/yaml/kube-loxilb.yaml +kubectl apply -f /vagrant/yaml/loxilb.yaml diff --git a/cicd/k8s-calico-incluster/udp_client b/cicd/k8s-calico-incluster/udp_client new file mode 100755 index 000000000..b70cd81fc Binary files /dev/null and b/cicd/k8s-calico-incluster/udp_client differ diff --git a/cicd/k8s-calico-incluster/validation.sh b/cicd/k8s-calico-incluster/validation.sh index d1f77ee9d..0a0c74b93 100755 --- a/cicd/k8s-calico-incluster/validation.sh +++ b/cicd/k8s-calico-incluster/validation.sh @@ -8,7 +8,7 @@ fi # Set space as the delimiter IFS=' ' - +alloc=0 for((i=0; i<120; i++)) do extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-fullnat") @@ -19,16 +19,32 @@ do sleep 1 continue fi - if [[ ${strarr[3]} != *"none"* ]]; then + if [[ ${strarr[3]} != *"none"* || ${strarr[3]} != *"pending"* ]]; then extIP="$(cut -d'-' -f2 <<<${strarr[3]})" + alloc=1 break fi echo "No external LB allocated" sleep 1 done +if [[ $alloc != 1 ]]; then + echo "No external LB allocated. Check kube-loxilb and loxilb logs" + echo "******************************************************************************" + vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null + echo "******************************************************************************" + echo -e "\nSVC List" + echo "******************************************************************************" + vagrant ssh master -c 'kubectl get svc' 2> /dev/null + echo "******************************************************************************" + echo -e "\nPod List" + echo "******************************************************************************" + vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null + exit 1 +fi + ## Any routing updates ?? -sleep 30 +#sleep 30 echo Service IP : $extIP echo -e "\nEnd Points List" @@ -45,14 +61,15 @@ vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null echo -e "\nTEST RESULTS" echo "******************************************************************************" -mode=( "fullnat" ) -tcp_port=( 57002 ) -udp_port=( 57003 ) -sctp_port=( 57004 ) +mode=( "onearm" "fullnat" ) +tcp_port=( 56002 57002 ) +udp_port=( 56003 57003 ) +sctp_port=( 56004 57004 ) code=0 -for ((i=0;i<1;i++)); do -out=$(vagrant ssh host -c "curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]}") -echo $out + +for ((i=0;i<=1;i++)); do +out=$(vagrant ssh host -c "curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]}" 2> /dev/null) +#echo $out if [[ ${out} == *"nginx"* ]]; then echo -e "K8s-calico-incluster TCP\t(${mode[i]})\t[OK]" else @@ -60,7 +77,8 @@ else code=1 fi -out=$(vagrant ssh host -c "timeout 5 ./udp_client $extIP ${udp_port[i]}") +out=$(vagrant ssh host -c "timeout 5 /vagrant/udp_client $extIP ${udp_port[i]}" 2> /dev/null) +#echo $out if [[ ${out} == *"Client"* ]]; then echo -e "K8s-calico-incluster UDP\t(${mode[i]})\t[OK]" else @@ -68,7 +86,8 @@ else code=1 fi -out=$(vagrant ssh host -c "socat -T10 - SCTP:$extIP:${sctp_port[i]},bind=192.168.90.9") +out=$(vagrant ssh host -c "socat -T10 - SCTP:$extIP:${sctp_port[i]}" 2> /dev/null) +#echo $out if [[ ${out} == *"server"* ]]; then echo -e "K8s-calico-incluster SCTP\t(${mode[i]})\t[OK]" else @@ -77,16 +96,4 @@ else fi done -mode=( "default" ) -sctp_port=( 55004 ) -code=0 - -out=$(vagrant ssh host -c "socat -T10 - SCTP:$extIP:${sctp_port[0]},bind=192.168.90.9") -if [[ ${out} == *"server"* ]]; then - echo -e "K8s-calico-incluster SCTP\t(${mode[0]})\t[OK]" -else - echo -e "K8s-calico-incluster SCTP\t(${mode[0]})\t[FAILED]" - code=1 -fi - exit $code diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml similarity index 64% rename from cicd/k8s-calico-incluster/yaml/kube-loxilb.yml rename to cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml index 3d8478c46..fe0293137 100644 --- a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml @@ -40,6 +40,16 @@ rules: - list - patch - update + - apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses + - gatewayclasses/status + - gateways + - gateways/status + - tcproutes + - udproutes + verbs: ["get", "watch", "list", "patch", "update"] - apiGroups: - discovery.k8s.io resources: @@ -60,6 +70,51 @@ rules: - subjectaccessreviews verbs: - create + - apiGroups: + - bgppeer.loxilb.io + resources: + - bgppeerservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinedsets.loxilb.io + resources: + - bgppolicydefinedsetsservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinition.loxilb.io + resources: + - bgppolicydefinitionservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicyapply.loxilb.io + resources: + - bgppolicyapplyservices + verbs: + - get + - watch + - list + - create + - update + - delete + --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -80,27 +135,23 @@ metadata: name: kube-loxilb namespace: kube-system labels: - app: loxilb + app: kube-loxilb-app spec: replicas: 1 selector: matchLabels: - app: loxilb + app: kube-loxilb-app template: metadata: labels: - app: loxilb + app: kube-loxilb-app spec: hostNetwork: true dnsPolicy: ClusterFirstWithHostNet tolerations: - - effect: NoSchedule - operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - - effect: NoExecute - operator: Exists priorityClassName: system-node-critical serviceAccountName: kube-loxilb terminationGracePeriodSeconds: 0 @@ -112,12 +163,13 @@ spec: - /bin/kube-loxilb args: #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=123.123.123.1/24 - - --setBGP=64512 - - --listenBGPPort=1791 + - --externalCIDR=192.168.80.5/32 + #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + #- --setBGP=64512 + #- --listenBGPPort=1791 - --setRoles=0.0.0.0 #- --monitor - #- --setBGP + #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 #- --setLBMode=1 #- --config=/opt/loxilb/agent/kube-loxilb.conf resources: diff --git a/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml b/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..245a62553 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/kubeadm-config.yaml @@ -0,0 +1,69 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.29.2 +networking: + dnsDomain: cluster.local + podSubnet: 172.16.1.0/16 + serviceSubnet: 172.17.1.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +mode: ipvs +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-calico-incluster/yaml/loxilb-localvip.yaml b/cicd/k8s-calico-incluster/yaml/loxilb-localvip.yaml new file mode 100644 index 000000000..3bcfce436 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/loxilb-localvip.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + hostPID: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + # - key: "node-role.kubernetes.io/master" + # operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + initContainers: + - name: mkllb-cgroup + command: + - sh + - -ec + - | + ls /usr/local/sbin/mkllb_cgroup && chmod 777 /usr/local/sbin/mkllb_cgroup; + cp -f /usr/local/sbin/mkllb_cgroup /hbin/mkllb_cgroup; + nsenter --cgroup=/hproc/1/ns/cgroup --mount=/hproc/1/ns/mnt /bin/mkllb_cgroup; + echo done; + rm /hbin/mkllb_cgroup; + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + volumeMounts: + - name: hproc + mountPath: /hproc + - name: hbin + mountPath: /hbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: IfNotPresent + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--localsockpolicy" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + volumeMounts: + - name: llb-cgroup + mountPath: /opt/loxilb/cgroup + securityContext: + privileged: true + runAsUser: 0 + capabilities: + add: + - SYS_ADMIN + volumes: + - name: hproc + hostPath: + path: /proc + type: Directory + - name: hbin + hostPath: + path: /bin + type: Directory + - name: llb-cgroup + hostPath: + path: /opt/loxilb/cgroup + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml b/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml deleted file mode 100644 index 5b35cd2e5..000000000 --- a/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: loxilb-peer - namespace: kube-system -spec: - selector: - matchLabels: - app: loxilb-peer-app - template: - metadata: - name: loxilb-peer - labels: - app: loxilb-peer-app - spec: - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "node-role.kubernetes.io/master" - operator: DoesNotExist - - key: "node-role.kubernetes.io/control-plane" - operator: DoesNotExist - containers: - - name: loxilb-peer-app - image: "ghcr.io/loxilb-io/loxilb:latest" - command: [ "/root/loxilb-io/loxilb/loxilb", "--peer" ] - ports: - - containerPort: 11111 - - containerPort: 1791 - - containerPort: 50051 - securityContext: - privileged: true - capabilities: - add: - - SYS_ADMIN ---- -apiVersion: v1 -kind: Service -metadata: - name: loxilb-peer-service - namespace: kube-system -spec: - clusterIP: None - selector: - app: loxilb-peer-app - ports: - - name: loxilb-peer-app - port: 11111 - targetPort: 11111 - protocol: TCP - - name: loxilb-peer-bgp - port: 1791 - targetPort: 1791 - protocol: TCP - - name: loxilb-peer-gobgp - port: 50051 - targetPort: 50051 - protocol: TCP - - diff --git a/cicd/k8s-calico-incluster/yaml/loxilb.yml b/cicd/k8s-calico-incluster/yaml/loxilb.yaml similarity index 78% rename from cicd/k8s-calico-incluster/yaml/loxilb.yml rename to cicd/k8s-calico-incluster/yaml/loxilb.yaml index 0a118b616..5ced543b1 100644 --- a/cicd/k8s-calico-incluster/yaml/loxilb.yml +++ b/cicd/k8s-calico-incluster/yaml/loxilb.yaml @@ -16,6 +16,8 @@ spec: hostNetwork: true dnsPolicy: ClusterFirstWithHostNet tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists - key: "node-role.kubernetes.io/control-plane" operator: Exists affinity: @@ -23,16 +25,18 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: + #- key: "node-role.kubernetes.io/master" + # operator: Exists - key: "node-role.kubernetes.io/control-plane" operator: Exists containers: - name: loxilb-app image: "ghcr.io/loxilb-io/loxilb:latest" imagePullPolicy: Always - command: [ "/root/loxilb-io/loxilb/loxilb", "--bgp", "--egr-hooks", "--blacklist=cali.|tunl.|vxlan[.]calico|veth." ] + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico" ] ports: - containerPort: 11111 - - containerPort: 1791 + - containerPort: 179 - containerPort: 50051 securityContext: privileged: true @@ -55,8 +59,8 @@ spec: targetPort: 11111 protocol: TCP - name: loxilb-app-bgp - port: 1791 - targetPort: 1791 + port: 179 + targetPort: 179 protocol: TCP - name: loxilb-app-gobgp port: 50051 diff --git a/cicd/k8s-calico-incluster/yaml/sctp.yml b/cicd/k8s-calico-incluster/yaml/sctp.yml deleted file mode 100644 index 5991cb1de..000000000 --- a/cicd/k8s-calico-incluster/yaml/sctp.yml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: sctp-lb-default - annotations: - loxilb.io/lbmode: "default" -spec: - loadBalancerClass: loxilb.io/loxilb - externalTrafficPolicy: Local - selector: - what: sctp-default-test - ports: - - port: 55004 - protocol: SCTP - targetPort: 9999 - type: LoadBalancer ---- -apiVersion: v1 -kind: Pod -metadata: - name: sctp-default-test - labels: - what: sctp-default-test -spec: - tolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: Exists - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "node-role.kubernetes.io/control-plane" - operator: Exists - containers: - - name: sctp-default-test - image: ghcr.io/loxilb-io/alpine-socat:latest - command: [ "sh", "-c"] - args: - - while true; do - socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; - sleep 20; - done; - ports: - - containerPort: 9999 - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP diff --git a/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml index b6eae03d5..5ac9a69b7 100644 --- a/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml +++ b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml @@ -3,6 +3,7 @@ kind: Service metadata: name: sctp-lb-fullnat annotations: + #loxilb.io/num-secondary-networks: "2" loxilb.io/liveness: "yes" loxilb.io/lbmode: "fullnat" spec: @@ -25,7 +26,10 @@ metadata: spec: containers: - name: sctp-fullnat-test + #image: loxilbio/sctp-darn:latest image: ghcr.io/loxilb-io/alpine-socat:latest + imagePullPolicy: Always + #command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] command: [ "sh", "-c"] args: - while true; do diff --git a/cicd/k8s-calico-incluster/yaml/settings.yaml b/cicd/k8s-calico-incluster/yaml/settings.yaml index 7b6e513bb..492519dce 100644 --- a/cicd/k8s-calico-incluster/yaml/settings.yaml +++ b/cicd/k8s-calico-incluster/yaml/settings.yaml @@ -9,12 +9,10 @@ cluster_name: Kubernetes Cluster # NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 # All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 # Worker IPs are simply incremented from the control IP. - client_ip: 192.168.90.9 - control_ip: 192.168.80.10 - control_ip2: 192.168.90.10 - control2_ip: 192.168.80.11 - control2_ip2: 192.168.90.11 + control_ip: 192.168.80.250 dns_servers: - 8.8.8.8 - 1.1.1.1 @@ -22,11 +20,11 @@ network: service_cidr: 172.17.1.0/18 nodes: control: - cpu: 4 + cpu: 2 memory: 4096 workers: count: 2 - cpu: 2 + cpu: 1 memory: 2048 # Mount additional shared folders from the host into each virtual machine. # Note that the project directory is automatically mounted at /vagrant. @@ -34,11 +32,14 @@ nodes: # - host_path: ../images # vm_path: /vagrant/images software: - cluster: + loxilb: box: name: sysnet4admin/Ubuntu-k8s version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + version: 202401.31.0 calico: 3.26.0 # To skip the dashboard installation, set its version to an empty value or comment it out: - kubernetes: 1.27.1-00 + kubernetes: 1.29.2 os: xUbuntu_22.04 diff --git a/cicd/k8s-calico-incluster/yaml/tcp.yml b/cicd/k8s-calico-incluster/yaml/tcp.yml deleted file mode 100644 index 8c8983403..000000000 --- a/cicd/k8s-calico-incluster/yaml/tcp.yml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: tcp-lb-default - annotations: - loxilb.io/liveness: "yes" - loxilb.io/lbmode: "default" -spec: - externalTrafficPolicy: Local - loadBalancerClass: loxilb.io/loxilb - selector: - what: tcp-default-test - ports: - - port: 55002 - targetPort: 80 - type: LoadBalancer ---- -apiVersion: v1 -kind: Pod -metadata: - name: tcp-default-test - labels: - what: tcp-default-test -spec: - containers: - - name: tcp-default-test - image: ghcr.io/loxilb-io/nginx:stable - ports: - - containerPort: 80 diff --git a/cicd/k8s-calico-incluster/yaml/udp.yml b/cicd/k8s-calico-incluster/yaml/udp.yml deleted file mode 100644 index ac6ef997d..000000000 --- a/cicd/k8s-calico-incluster/yaml/udp.yml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: udp-lb-default - annotations: - loxilb.io/liveness: "yes" - loxilb.io/lbmode: "default" -spec: - loadBalancerClass: loxilb.io/loxilb - externalTrafficPolicy: Local - selector: - what: udp-default-test - ports: - - port: 55003 - protocol: UDP - targetPort: 33333 - type: LoadBalancer ---- -apiVersion: v1 -kind: Pod -metadata: - name: udp-default-test - labels: - what: udp-default-test -spec: - containers: - - name: udp-default-test - image: ghcr.io/loxilb-io/udp-echo:latest - ports: - - containerPort: 33333