From 86ba4a5785bbbcb54307ff4d30813b6cc7ceef14 Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Tue, 31 Oct 2023 11:14:29 +0900 Subject: [PATCH 1/2] L2 incluster CICD test case --- cicd/k3s-flannel-incluster-l2/README | 7 + cicd/k3s-flannel-incluster-l2/Vagrantfile | 60 ++++++++ cicd/k3s-flannel-incluster-l2/config.sh | 7 + cicd/k3s-flannel-incluster-l2/host.sh | 3 + .../host_validation.sh | 53 +++++++ cicd/k3s-flannel-incluster-l2/input | 6 + cicd/k3s-flannel-incluster-l2/k3s.yaml | 19 +++ cicd/k3s-flannel-incluster-l2/kube-loxilb.yml | 132 ++++++++++++++++++ cicd/k3s-flannel-incluster-l2/loxilb.yml | 59 ++++++++ cicd/k3s-flannel-incluster-l2/master1.sh | 12 ++ cicd/k3s-flannel-incluster-l2/master2.sh | 10 ++ cicd/k3s-flannel-incluster-l2/node-token | 1 + cicd/k3s-flannel-incluster-l2/rmconfig.sh | 7 + .../sctp-onearm-ds.yml | 39 ++++++ .../tcp-onearm-ds.yml | 37 +++++ .../udp-onearm-ds.yml | 38 +++++ cicd/k3s-flannel-incluster-l2/udp_client | Bin 0 -> 17192 bytes cicd/k3s-flannel-incluster-l2/validation.sh | 34 +++++ cicd/k3s-flannel-incluster-l2/wait_ready.sh | 37 +++++ cicd/k3s-flannel-incluster-l2/worker.sh | 12 ++ 20 files changed, 573 insertions(+) create mode 100644 cicd/k3s-flannel-incluster-l2/README create mode 100644 cicd/k3s-flannel-incluster-l2/Vagrantfile create mode 100755 cicd/k3s-flannel-incluster-l2/config.sh create mode 100755 cicd/k3s-flannel-incluster-l2/host.sh create mode 100755 cicd/k3s-flannel-incluster-l2/host_validation.sh create mode 100644 cicd/k3s-flannel-incluster-l2/input create mode 100644 cicd/k3s-flannel-incluster-l2/k3s.yaml create mode 100644 cicd/k3s-flannel-incluster-l2/kube-loxilb.yml create mode 100644 cicd/k3s-flannel-incluster-l2/loxilb.yml create mode 100755 cicd/k3s-flannel-incluster-l2/master1.sh create mode 100755 cicd/k3s-flannel-incluster-l2/master2.sh create mode 100644 cicd/k3s-flannel-incluster-l2/node-token create mode 100755 cicd/k3s-flannel-incluster-l2/rmconfig.sh create mode 100644 cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml create mode 100644 cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml create mode 100644 cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml create mode 100755 cicd/k3s-flannel-incluster-l2/udp_client create mode 100755 cicd/k3s-flannel-incluster-l2/validation.sh create mode 100755 cicd/k3s-flannel-incluster-l2/wait_ready.sh create mode 100644 cicd/k3s-flannel-incluster-l2/worker.sh diff --git a/cicd/k3s-flannel-incluster-l2/README b/cicd/k3s-flannel-incluster-l2/README new file mode 100644 index 000000000..ff714ec1e --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/README @@ -0,0 +1,7 @@ +## Test Case Description + +This scenario will have K3s(2 Master Nodes & 2 Worker Nodes) cluster with flannel CNI. LoxiLB will be running in the in-cluster Active-Backup High Availabity mode(in both the master nodes) but without State Syncronization. Workloads will be spawned in all the cluster nodes. + +Client will be connected directly to the cluster with L2 network. Service CIDR will also be a Virtual IP from the K3s cluster network. + +In in-cluster scenarios, it is advised to create LB services in either one-arm or fullnat mode for ease of connectivity. diff --git a/cicd/k3s-flannel-incluster-l2/Vagrantfile b/cicd/k3s-flannel-incluster-l2/Vagrantfile new file mode 100644 index 000000000..2132c4936 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/Vagrantfile @@ -0,0 +1,60 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "2").to_i +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + config.vm.define "master1" do |master| + master.vm.hostname = 'master1' + master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master1.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + config.vm.define "master2" do |master| + master.vm.hostname = 'master2' + master.vm.network :private_network, ip: "192.168.90.11", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.11", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master2.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision :shell, :path => "worker.sh" + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + end +end diff --git a/cicd/k3s-flannel-incluster-l2/config.sh b/cicd/k3s-flannel-incluster-l2/config.sh new file mode 100755 index 000000000..b0cfb3651 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/config.sh @@ -0,0 +1,7 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up +#sudo ip route add 123.123.123.1 via 192.168.90.10 || true +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/tcp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/udp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/sctp-onearm-ds.yml' diff --git a/cicd/k3s-flannel-incluster-l2/host.sh b/cicd/k3s-flannel-incluster-l2/host.sh new file mode 100755 index 000000000..32cc4275a --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/host.sh @@ -0,0 +1,3 @@ +sudo apt install lksctp-tools +sudo ip route add 123.123.123.0/24 via 192.168.90.10 +echo "Host is up" diff --git a/cicd/k3s-flannel-incluster-l2/host_validation.sh b/cicd/k3s-flannel-incluster-l2/host_validation.sh new file mode 100755 index 000000000..56b4fb193 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/host_validation.sh @@ -0,0 +1,53 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +mode="onearm" +tcp_port=55001 +udp_port=55002 +sctp_port=55003 + +code=0 +echo Service IP: $extIP + +ip route list match $extIP | grep $extIP -A 2 + +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" +for((i=0;i<20;i++)) +do + +out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K3s-flannel-incluster-l2 TCP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 TCP\t($mode)\t[FAILED]" + code=1 +fi + +out=$(timeout 5 /vagrant/udp_client $extIP $udp_port) +if [[ ${out} == *"Client"* ]]; then + echo -e "K3s-flannel-incluster-l2 UDP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 UDP\t($mode)\t[FAILED]" + code=1 +fi + +sctp_darn -H 192.168.80.9 -h $extIP -p $sctp_port -s < /vagrant/input > output +#sleep 2 +exp="New connection, peer addresses +192.168.80.200:55003" + +res=`cat output | grep -A 1 "New connection, peer addresses"` +sudo rm -rf output +if [[ "$res" == "$exp" ]]; then + #echo $res + echo -e "K3s-flannel-incluster-l2 SCTP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 SCTP\t($mode)\t[FAILED]" + code=1 +fi + + +done +exit $code diff --git a/cicd/k3s-flannel-incluster-l2/input b/cicd/k3s-flannel-incluster-l2/input new file mode 100644 index 000000000..6fb66a5e2 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/input @@ -0,0 +1,6 @@ + + + + + + diff --git a/cicd/k3s-flannel-incluster-l2/k3s.yaml b/cicd/k3s-flannel-incluster-l2/k3s.yaml new file mode 100644 index 000000000..f18efb521 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/k3s.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT1RnM01UWTBPREV3SGhjTk1qTXhNRE14TURFME1USXhXaGNOTXpNeE1ESTRNREUwTVRJeApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT1RnM01UWTBPREV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUcE0zVW11N0J3M1pVMzNwS3YrS0dwTHUwUXkvSllISUQrNVVqcWM4NGcKTnJudHZTcVdISmJEUExtWWhNVng5S3FiU0I3dU9HMmVvVGN2dzEwY1Z0RVRvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWx4a2ZxcXp0dzhHMHcvb2VFb0EzCmZjejh0eDB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnT2hUamlVYnU4TDl2YmNISlpTTWFPR3FsWlgwZ205dm0KWncxZ1hBV0VvTFlDSVFDbWpJQ1FSRzJUWnhpdldJUVhrVERhekJLbHZJTWdPWkk3bFlCTTJvVFd6dz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://192.168.80.10:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrRENDQVRlZ0F3SUJBZ0lJYUxFcUF6cEl5aGt3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOams0TnpFMk5EZ3hNQjRYRFRJek1UQXpNVEF4TkRFeU1Wb1hEVEkwTVRBegpNREF4TkRFeU1Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBYnR3WEQvcWt2ajhrU0kKZlIzZXNWUGRDQnpyYU4zV1hrS3NvOTZhWnFBcUFiOHdkRlFPRnZIdTlJSEgyK2dEY0N0MXJOWC9TK1FNcFVlWgpmbEUremRtalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUWNNaW1IbFdCOTdjTE5YTW92OExpc2ZwemVvakFLQmdncWhrak9QUVFEQWdOSEFEQkUKQWlBSy9TVEJ0V0VJblpGNVF0Zkx1dVRQZ0pXZ3BvL2JCbThwNXhvTXRJN3JKd0lnRXZ3MkdOaVY5QmRtR1lLTwpmVk5lMlE2YVZwdW1hTTZ5eEFaZjdTRW1hV2c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUyT1RnM01UWTBPREV3SGhjTk1qTXhNRE14TURFME1USXhXaGNOTXpNeE1ESTRNREUwTVRJeApXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUyT1RnM01UWTBPREV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRSzR3QzhyTGdXWmFMcWE0Yjh6NllLN0dkQTFxWUJjTHNhZ0R3TmNpcnQKaGN3SHVjUEJ2cTN2elN2STVsRGpua3VlenZUdmlydy9jR0doZGlIRGdwcVNvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUhESXBoNVZnZmUzQ3pWektML0M0CnJINmMzcUl3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQU9EdW4vU09MT2w0MzhycmkyazdFWTV6bktXd2IzLzcKNVp3Z2pDUndaQkZsQWlFQXVsSjQwelFqT05SWXVVN3dNa29JQkEzNjRaR2FuaWdzaFdtd2JZTmZQVGs9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUFJTUloU1BCSFFaN3BHY2ZxNEVsZVVQY0wxR0g1TEVyRVV0akNnRTNxTDdvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFQnUzQmNQK3FTK1B5UkloOUhkNnhVOTBJSE90bzNkWmVRcXlqM3BwbW9Db0J2ekIwVkE0Vwo4ZTcwZ2NmYjZBTndLM1dzMWY5TDVBeWxSNWwrVVQ3TjJRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml new file mode 100644 index 000000000..b4c8dcbae --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --externalCIDR=192.168.80.200/32 + #- --setBGP=64512 + - --setRoles=0.0.0.0 + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-flannel-incluster-l2/loxilb.yml b/cicd/k3s-flannel-incluster-l2/loxilb.yml new file mode 100644 index 000000000..6fee9b7bd --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/loxilb.yml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel." ] + ports: + - containerPort: 11111 + - containerPort: 179 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP diff --git a/cicd/k3s-flannel-incluster-l2/master1.sh b/cicd/k3s-flannel-incluster-l2/master1.sh new file mode 100755 index 000000000..c6f0613d1 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/master1.sh @@ -0,0 +1,12 @@ +sudo su +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.90' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +curl -fL https://get.k3s.io | sh -s - server --node-ip=192.168.80.10 --disable servicelb --disable traefik --cluster-init external-hostname=192.168.80.10 --node-external-ip=192.168.80.10 --disable-cloud-controller +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - +sleep 60 +echo $MASTER_IP > /vagrant/master-ip +cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /etc/rancher/k3s/k3s.yaml +cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo kubectl apply -f /vagrant/loxilb.yml +sudo kubectl apply -f /vagrant/kube-loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-incluster-l2/master2.sh b/cicd/k3s-flannel-incluster-l2/master2.sh new file mode 100755 index 000000000..5ec72af6e --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/master2.sh @@ -0,0 +1,10 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) + +#curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 --disable-cloud-controller -t ${NODE_TOKEN} +curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 -t ${NODE_TOKEN} +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - + +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-incluster-l2/node-token b/cicd/k3s-flannel-incluster-l2/node-token new file mode 100644 index 000000000..41447f218 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/node-token @@ -0,0 +1 @@ +K104ba03b7d623244660768d0475dbaab00b38a44cf3dbd7f8cfb749899d6917dfe::server:53d79e122c4fb6b54f104932e26995dd diff --git a/cicd/k3s-flannel-incluster-l2/rmconfig.sh b/cicd/k3s-flannel-incluster-l2/rmconfig.sh new file mode 100755 index 000000000..bd4b79e81 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/rmconfig.sh @@ -0,0 +1,7 @@ +#!/bin/bash +sudo ip route del 123.123.123.1 via 192.168.90.10 || true +vagrant destroy -f worker1 +vagrant destroy -f worker2 +vagrant destroy -f master1 +vagrant destroy -f master2 +vagrant destroy -f host diff --git a/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml new file mode 100644 index 000000000..793bab3ef --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-onearm-test + ports: + - port: 55003 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: sctp-onearm-ds + labels: + what: sctp-onearm-test +spec: + selector: + matchLabels: + what: sctp-onearm-test + template: + metadata: + labels: + what: sctp-onearm-test + spec: + containers: + - name: sctp-onearm-pod + image: loxilbio/sctp-darn:latest + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml new file mode 100644 index 000000000..b1ea660d7 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 55001 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: tcp-onearm-ds + labels: + what: tcp-onearm-test +spec: + selector: + matchLabels: + what: tcp-onearm-test + template: + metadata: + labels: + what: tcp-onearm-test + spec: + containers: + - name: tcp-onearm-pod + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml new file mode 100644 index 000000000..01bcf8e70 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-onearm-svc + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: udp-onearm-test + ports: + - port: 55002 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: udp-onearm-ds + labels: + what: udp-onearm-test +spec: + selector: + matchLabels: + what: udp-onearm-test + template: + metadata: + labels: + what: udp-onearm-test + spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k3s-flannel-incluster-l2/udp_client b/cicd/k3s-flannel-incluster-l2/udp_client new file mode 100755 index 0000000000000000000000000000000000000000..b70cd81fccd77645a3ecd1f5fc7944f3dc1b399a GIT binary patch literal 17192 zcmeHOZ)_aJ6`%9Pi9?dJLt`*WAq!0;r^pvOB&h)ze74V7BRhm(18ox47vDNQasRm6 z0~=9raDzCRqppzJNUek;QMHIlbwAK1RDjda5GoZVAE>CHB1J%b6$NvIDlH_}_h#Q) z@2&4jh1xIGj&=8&_nY_L%)XhO+wsmk9PI6`@^}O%ueeE|G`rel+NogVM#=!Ri&n7; zzSoPZ#8Qx#NX{&8Hvvk`^hQ-F))4OnM7svel)v|!{GBBEWTG({CKiaJvh?W!?L zvJ+n?r;J=~)u*5_Bt?E(IEo6880~f#o53boEC$<3afae@l#uOEM7t!}CCQF)p6X-d zcw+PM(@ODlq5@@@6!|TL)9x;^b7B|iGh!W>Zhy}Rf0_DrkzJ!A-3TXgJ&au6ZD5CS z`A-vf^KPoIQa?;n{23{{VskXIfBV+W(Qs2V63>h_jka!Y+P>AFPWZRTCSbd;4jfZE zb`A(|Gm9LBQTD|elCk{RJsY1M*b{p8msdB{zrW$r6)*2T@HYFvI#CA`+VhY=8S{86 zqKLpYqai*#b>9^DWlMk1aT z$&8T}nwB;~L-%MyBll>-p-5Cj;<}+FK|v%_k+?A|(ncz($3;w!rNK0%58XGMO2kCS zNJPX?G?CUtT91c~1h^U+4Qa!XcqkfqKnD@)MfWK~i-jU_v7@)Aqf^`B-)iNy__qnI zr@v1N>nZ&}ByH%a{=Uv=BChv`_D9jjfmk9=J~UIctRh}!75sVSH|FrqKNl1rn zynVi$w((dur=Qw*jvbcG*m&pwD;>4*D=ii#KN}AnXr)s&-e<8woVM}Y_qJj;>qfwh zfExif0&WD{2>cHt@Ja1e|5PVFs8J`Y|F~HQ_0X(QRXnXuyj*ia)>_=w0d%3bu@j(n zy+D0@rWJ+!pNqxfl$7xSs*pcp$@pMX$RD?4e4r`he__e^AXCU6v1ELJDdfLr$@t(> z$RD(1d|)Z$@3mxnP$}d?mW&T5h5T)nj1O*we87_Nfvu2lVHvLG0;~sW*LMkdJs`&% zvhI-gIphI{+~<&k4!O-CZ*#~^4tb+PzS1Gr+vS$O^<>Z7t!Cd*C*Gak)!&<|ejj>3 z&8R z+ulLgbWzRDtH(dQSv`K8nK~3@Ou#79)B(#o}BTI_G5dw?X#o16!+pw9nrUg_s!tJN4iPGvFcn!d@WN>~p&S z!s_Ibjg8W8@O;axI(Z0Lb+Wxi%^ph1tjSEw)~SR>-_WlL1$Oa#I9$mbMDyH5} zS<492GVua_i?PcfppV0Y^V|fO9K{3r<*0;P#R<#0XG6}1l$O?5wJx7-@gncGSRRSPZ-KjN)H*jLT{v0JsM4jjcHW(tn=Oo z%VmK}@$(-Qi=%+2=8MH?z&8MKRdD#@VsQ}gV4+w%4cK_DSe!>5u$5l2;5$)Fo(Fad z&uE=z-O6P(Q&5IHzUyxWAMl?BA-a~~0!M6s&sO-1LY?SS`0BcS4Y$;;xW8sx+`RUN zuV1&}8l=&FFMJeei!?+M67mLo8i1b#0ZUo#2xDjw8;6}iW!2eeScz+u2 zN26tswdV}pvzUn%Evra8g$ox*SD0Y!ds{~`z5>Ox)&#-(-*8=niOWB|SWKWiO`FPi z4Khj@-XnI3%6ZS*yJ!T7_qsKhqNpciATu#Y`?>Jl7beBA-s3%Imz%{RNd)sV)DUj> zEa~yyGp_$C;#m*gAj#{4>Ldj(+@w5C{@{HWmrF}n214v%W8qd0*s?&dwW@#)18rxRFt=_iyz#H??G> z)bh}lW`FZmf6FG5|00jLi_>H6@3(8?eo)?@Udnp~?>{f)mxu?;bdVQzU-P&w)n6)j zJeTsz1dsbtzDDqPDCL(69!I78C4$FmDZj!R*QGq%R)EfN2UDbqTK!9 z6%E$>QK}EKU%Q<#^In+s>dNvwJ1*GZG_*u~TwGwx9L~E{5Vv{z=3L->#4EIa*whx{ zYT|hwXZ{wZGDJ9g%+42m>4JNag(e zT>aSVgJz{Zxy-NDL4_k`x^_I*m9zP!e53|6c{_Te!iYl@AzNm%^yA;PW{r>Uq zRXy;EtfTbng5(#ElNRZxR2QXA;OpR5)j57@QNL>O{c=kMKX+H)AE?0RD)2`t@XuA? zU#-BuRe}Euc)VWceN+SSR~CmyTn_x|sw)Kl9;e>V|n%EzTYH(m9 zmDWO;QTUg9EE&}eJ?w97-riDP2~TxIv`{J)8q@T+ks1@jsZdPU!kJiX3`}g22AW2R z>QFQSHqa)svc3u?igT^bzbDAf!Lt?QdR1ART6r6uzG2vBfpNeimvLG9Wtv>m;- zb_9C0Tf4h&5B6*QfsWoFdX=Y6hSC|?=6`#-1P{QpxAUnKc@{+vhl~&%d2tju4$3T) zZZ*%-ED-3)JUmm%nMY_!IrHpHDJM_Wlv0Zh(>RN<-R3zN2tnzg87-VnXd|I`7!TC+ z+zMLZNL^1}$T+6aOuMZe!Fu0Jg zjhN-@=XGZUIJ{)G=XC_*yHI9Nvu+lE*S06#u>jOz@ZBJhaYb<5jvmTv90#> zBM|KN3fVE9wkt#4;a>d2VbAL;M%HIHEHlnH?BQX^OoX!}GO!)vF(|ONpPy$KtLc7n z+OzzU!=Bf5jQoEi)_2D5w`9-#FBH-w+(;sW)Bg8RfbD1dM#a=*>@r2`+iCwMl-TW) zimA$2gN3%l+$oBmIvO#*ForW@dVtdA=U~IP+?WEmoBGbx h-EKdK3vftzhjXsiEH(daQqI2T22*3L!@wbme*v)VBOL$$ literal 0 HcmV?d00001 diff --git a/cicd/k3s-flannel-incluster-l2/validation.sh b/cicd/k3s-flannel-incluster-l2/validation.sh new file mode 100755 index 000000000..0995fb89c --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/validation.sh @@ -0,0 +1,34 @@ +#!/bin/bash +source ../common.sh +echo k3s-flannel-cluster-l2 + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 5 +extIP="192.168.80.200" +echo $extIP +echo $extIP > extIP + +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nCluster Info" +echo "******************************************************************************" +echo "******************************************************************************" +echo -e "\nPods" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nNodes" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get nodes' 2> /dev/null + +vagrant ssh host -c 'sudo /vagrant/host_validation.sh' 2> /dev/null +sudo rm extIP diff --git a/cicd/k3s-flannel-incluster-l2/wait_ready.sh b/cicd/k3s-flannel-incluster-l2/wait_ready.sh new file mode 100755 index 000000000..5ff06e373 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/wait_ready.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function wait_cluster_ready { + Res=$(sudo kubectl get pods -A | + while IFS= read -r line; do + if [[ "$line" != *"Running"* && "$line" != *"READY"* ]]; then + echo "not ready" + return + fi + done) + if [[ $Res == *"not ready"* ]]; then + return 1 + fi + return 0 +} + +function wait_cluster_ready_full { + i=1 + nr=0 + for ((;;)) do + wait_cluster_ready + nr=$? + if [[ $nr == 0 ]]; then + echo "Cluster is ready" + break + fi + i=$(( $i + 1 )) + if [[ $i -ge 40 ]]; then + echo "Cluster is not ready.Giving up" + exit 1 + fi + echo "Cluster is not ready...." + sleep 10 + done +} + +wait_cluster_ready_full diff --git a/cicd/k3s-flannel-incluster-l2/worker.sh b/cicd/k3s-flannel-incluster-l2/worker.sh new file mode 100644 index 000000000..b03d55fb5 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/worker.sh @@ -0,0 +1,12 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) +mkdir -p /etc/rancher/k3s +cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml +curl -sfL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - agent --server https://192.168.80.10:6443 --node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR} -t ${NODE_TOKEN} +#sudo kubectl apply -f /vagrant/loxilb-peer.yml +#sudo kubectl apply -f /vagrant/nginx.yml +#sudo kubectl apply -f /vagrant/udp.yml +#sudo kubectl apply -f /vagrant/sctp.yml +/vagrant/wait_ready.sh From 097d894dd8b3e90a9acdb4c82b2ff14bc7b2083d Mon Sep 17 00:00:00 2001 From: Nikhil Malik Date: Tue, 31 Oct 2023 14:43:17 +0900 Subject: [PATCH 2/2] Added missing scripts for K8s calico L3 CICD case --- cicd/k8s-calico-ipvs3/README | 8 +++ cicd/k8s-calico-ipvs3/host_validation.sh | 60 +++++++++++++++++++++ cicd/k8s-calico-ipvs3/node_scripts/host.sh | 2 +- cicd/k8s-calico-ipvs3/udp_client | Bin 0 -> 17192 bytes 4 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 cicd/k8s-calico-ipvs3/README create mode 100755 cicd/k8s-calico-ipvs3/host_validation.sh create mode 100755 cicd/k8s-calico-ipvs3/udp_client diff --git a/cicd/k8s-calico-ipvs3/README b/cicd/k8s-calico-ipvs3/README new file mode 100644 index 000000000..3b7d30181 --- /dev/null +++ b/cicd/k8s-calico-ipvs3/README @@ -0,0 +1,8 @@ +## Test Case Description + +This scenario will have K8s(1 Master Nodes & 2 Worker Nodes) cluster with Calico CNI in ipvs mode. LoxiLB will be running as external Service LB. Workloads will be spawned in all the cluster nodes. + +Client will be connected to the LoxiLB with L3 network. Client and LoxiLB will run BGP peering. LoxiLB will advertise the Service CIDR or VirtualIP to the client. +Service CIDR will also be a Virtual IP, different from the K8s cluster network. + +In scenarios where LoxiLB runs outside of the cluster, it is advised to create LB services in either one-arm or fullnat mode for ease of connectivity or else user has to resolve connectivity with either static routes or through BGP. diff --git a/cicd/k8s-calico-ipvs3/host_validation.sh b/cicd/k8s-calico-ipvs3/host_validation.sh new file mode 100755 index 000000000..4a3737540 --- /dev/null +++ b/cicd/k8s-calico-ipvs3/host_validation.sh @@ -0,0 +1,60 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +mode="onearm" +tcp_port=56002 +udp_port=56003 +sctp_port=56004 + +code=0 +echo Service IP: $extIP + +numECMP=$(ip route list match $extIP | grep $extIP -A 2 | tail -n 2 | wc -l) + +ip route list match $extIP | grep $extIP -A 2 + +if [ $numECMP == "2" ]; then + echo "Host ECMP route [OK]" +else + echo "Host ECMP route [NOK]" +fi +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" +for((i=0;i<20;i++)) +do + +out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K8s-calico-ipvs3 TCP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 TCP\t($mode)\t[FAILED]" + code=1 +fi + +out=$(timeout 5 /vagrant/udp_client $extIP $udp_port) +if [[ ${out} == *"Client"* ]]; then + echo -e "K8s-calico-ipvs3 UDP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 UDP\t($mode)\t[FAILED]" + code=1 +fi + +sctp_darn -H 192.168.80.9 -h 20.20.20.1 -p 56004 -s < /vagrant/input > output +#sleep 2 +exp="New connection, peer addresses +20.20.20.1:56004" + +res=`cat output | grep -A 1 "New connection, peer addresses"` +sudo rm -rf output +if [[ "$res" == "$exp" ]]; then + #echo $res + echo -e "K8s-calico-ipvs3 SCTP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 SCTP\t($mode)\t[FAILED]" + code=1 +fi + + +done +exit $code diff --git a/cicd/k8s-calico-ipvs3/node_scripts/host.sh b/cicd/k8s-calico-ipvs3/node_scripts/host.sh index a552c006a..7e2726014 100755 --- a/cicd/k8s-calico-ipvs3/node_scripts/host.sh +++ b/cicd/k8s-calico-ipvs3/node_scripts/host.sh @@ -8,6 +8,6 @@ if [ ! -f /var/log/bird.log ]; then sudo touch /var/log/bird.log fi sudo chown bird:bird /var/log/bird.log -sudo service bird start +sudo service bird restart echo "Host is up" diff --git a/cicd/k8s-calico-ipvs3/udp_client b/cicd/k8s-calico-ipvs3/udp_client new file mode 100755 index 0000000000000000000000000000000000000000..b70cd81fccd77645a3ecd1f5fc7944f3dc1b399a GIT binary patch literal 17192 zcmeHOZ)_aJ6`%9Pi9?dJLt`*WAq!0;r^pvOB&h)ze74V7BRhm(18ox47vDNQasRm6 z0~=9raDzCRqppzJNUek;QMHIlbwAK1RDjda5GoZVAE>CHB1J%b6$NvIDlH_}_h#Q) z@2&4jh1xIGj&=8&_nY_L%)XhO+wsmk9PI6`@^}O%ueeE|G`rel+NogVM#=!Ri&n7; zzSoPZ#8Qx#NX{&8Hvvk`^hQ-F))4OnM7svel)v|!{GBBEWTG({CKiaJvh?W!?L zvJ+n?r;J=~)u*5_Bt?E(IEo6880~f#o53boEC$<3afae@l#uOEM7t!}CCQF)p6X-d zcw+PM(@ODlq5@@@6!|TL)9x;^b7B|iGh!W>Zhy}Rf0_DrkzJ!A-3TXgJ&au6ZD5CS z`A-vf^KPoIQa?;n{23{{VskXIfBV+W(Qs2V63>h_jka!Y+P>AFPWZRTCSbd;4jfZE zb`A(|Gm9LBQTD|elCk{RJsY1M*b{p8msdB{zrW$r6)*2T@HYFvI#CA`+VhY=8S{86 zqKLpYqai*#b>9^DWlMk1aT z$&8T}nwB;~L-%MyBll>-p-5Cj;<}+FK|v%_k+?A|(ncz($3;w!rNK0%58XGMO2kCS zNJPX?G?CUtT91c~1h^U+4Qa!XcqkfqKnD@)MfWK~i-jU_v7@)Aqf^`B-)iNy__qnI zr@v1N>nZ&}ByH%a{=Uv=BChv`_D9jjfmk9=J~UIctRh}!75sVSH|FrqKNl1rn zynVi$w((dur=Qw*jvbcG*m&pwD;>4*D=ii#KN}AnXr)s&-e<8woVM}Y_qJj;>qfwh zfExif0&WD{2>cHt@Ja1e|5PVFs8J`Y|F~HQ_0X(QRXnXuyj*ia)>_=w0d%3bu@j(n zy+D0@rWJ+!pNqxfl$7xSs*pcp$@pMX$RD?4e4r`he__e^AXCU6v1ELJDdfLr$@t(> z$RD(1d|)Z$@3mxnP$}d?mW&T5h5T)nj1O*we87_Nfvu2lVHvLG0;~sW*LMkdJs`&% zvhI-gIphI{+~<&k4!O-CZ*#~^4tb+PzS1Gr+vS$O^<>Z7t!Cd*C*Gak)!&<|ejj>3 z&8R z+ulLgbWzRDtH(dQSv`K8nK~3@Ou#79)B(#o}BTI_G5dw?X#o16!+pw9nrUg_s!tJN4iPGvFcn!d@WN>~p&S z!s_Ibjg8W8@O;axI(Z0Lb+Wxi%^ph1tjSEw)~SR>-_WlL1$Oa#I9$mbMDyH5} zS<492GVua_i?PcfppV0Y^V|fO9K{3r<*0;P#R<#0XG6}1l$O?5wJx7-@gncGSRRSPZ-KjN)H*jLT{v0JsM4jjcHW(tn=Oo z%VmK}@$(-Qi=%+2=8MH?z&8MKRdD#@VsQ}gV4+w%4cK_DSe!>5u$5l2;5$)Fo(Fad z&uE=z-O6P(Q&5IHzUyxWAMl?BA-a~~0!M6s&sO-1LY?SS`0BcS4Y$;;xW8sx+`RUN zuV1&}8l=&FFMJeei!?+M67mLo8i1b#0ZUo#2xDjw8;6}iW!2eeScz+u2 zN26tswdV}pvzUn%Evra8g$ox*SD0Y!ds{~`z5>Ox)&#-(-*8=niOWB|SWKWiO`FPi z4Khj@-XnI3%6ZS*yJ!T7_qsKhqNpciATu#Y`?>Jl7beBA-s3%Imz%{RNd)sV)DUj> zEa~yyGp_$C;#m*gAj#{4>Ldj(+@w5C{@{HWmrF}n214v%W8qd0*s?&dwW@#)18rxRFt=_iyz#H??G> z)bh}lW`FZmf6FG5|00jLi_>H6@3(8?eo)?@Udnp~?>{f)mxu?;bdVQzU-P&w)n6)j zJeTsz1dsbtzDDqPDCL(69!I78C4$FmDZj!R*QGq%R)EfN2UDbqTK!9 z6%E$>QK}EKU%Q<#^In+s>dNvwJ1*GZG_*u~TwGwx9L~E{5Vv{z=3L->#4EIa*whx{ zYT|hwXZ{wZGDJ9g%+42m>4JNag(e zT>aSVgJz{Zxy-NDL4_k`x^_I*m9zP!e53|6c{_Te!iYl@AzNm%^yA;PW{r>Uq zRXy;EtfTbng5(#ElNRZxR2QXA;OpR5)j57@QNL>O{c=kMKX+H)AE?0RD)2`t@XuA? zU#-BuRe}Euc)VWceN+SSR~CmyTn_x|sw)Kl9;e>V|n%EzTYH(m9 zmDWO;QTUg9EE&}eJ?w97-riDP2~TxIv`{J)8q@T+ks1@jsZdPU!kJiX3`}g22AW2R z>QFQSHqa)svc3u?igT^bzbDAf!Lt?QdR1ART6r6uzG2vBfpNeimvLG9Wtv>m;- zb_9C0Tf4h&5B6*QfsWoFdX=Y6hSC|?=6`#-1P{QpxAUnKc@{+vhl~&%d2tju4$3T) zZZ*%-ED-3)JUmm%nMY_!IrHpHDJM_Wlv0Zh(>RN<-R3zN2tnzg87-VnXd|I`7!TC+ z+zMLZNL^1}$T+6aOuMZe!Fu0Jg zjhN-@=XGZUIJ{)G=XC_*yHI9Nvu+lE*S06#u>jOz@ZBJhaYb<5jvmTv90#> zBM|KN3fVE9wkt#4;a>d2VbAL;M%HIHEHlnH?BQX^OoX!}GO!)vF(|ONpPy$KtLc7n z+OzzU!=Bf5jQoEi)_2D5w`9-#FBH-w+(;sW)Bg8RfbD1dM#a=*>@r2`+iCwMl-TW) zimA$2gN3%l+$oBmIvO#*ForW@dVtdA=U~IP+?WEmoBGbx h-EKdK3vftzhjXsiEH(daQqI2T22*3L!@wbme*v)VBOL$$ literal 0 HcmV?d00001