Skip to content

Commit

Permalink
cicd for incluster-lb with calico
Browse files Browse the repository at this point in the history
  • Loading branch information
TrekkieCoder committed Jul 30, 2023
1 parent 17d368a commit 886375a
Show file tree
Hide file tree
Showing 20 changed files with 599 additions and 0 deletions.
62 changes: 62 additions & 0 deletions cicd/k3s-calico-incluster/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

workers = (ENV['WORKERS'] || "2").to_i
#box_name = (ENV['VAGRANT_BOX'] || "ubuntu/focal64")
box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s")
box_version = "0.7.1"
Vagrant.configure("2") do |config|
config.vm.box = "#{box_name}"
config.vm.box_version = "#{box_version}"

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end

config.vm.define "host" do |host|
host.vm.hostname = 'host1'
#loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp"
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 1]
end
end

config.vm.define "master1" do |master|
master.vm.hostname = 'master1'
master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0"
master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master1.sh"
master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end

config.vm.define "master2" do |master|
master.vm.hostname = 'master2'
master.vm.network :private_network, ip: "192.168.80.11", :netmask => "255.255.255.0"
master.vm.network :private_network, ip: "192.168.90.11", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master2.sh"
master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end

(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 100
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision :shell, :path => "worker.sh"
worker.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 1]
end
end
end
end
4 changes: 4 additions & 0 deletions cicd/k3s-calico-incluster/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f
vagrant up
sudo ip route add 123.123.123.1 via 192.168.90.10 || true
5 changes: 5 additions & 0 deletions cicd/k3s-calico-incluster/host.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
sudo su
echo "123.123.123.1 k8s-svc" >> /etc/hosts
ifconfig eth2 mtu 1450
ip route add 123.123.123.0/24 via 192.168.90.10
echo "Host is up"
19 changes: 19 additions & 0 deletions cicd/k3s-calico-incluster/k3s.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT1RBMk9EWTJNakl3SGhjTk1qTXdOek13TURNeE1ESXlXaGNOTXpNd056STNNRE14TURJeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT1RBMk9EWTJNakl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTYTM4cFIrbEt5NFZEZ1AxckkzbW5KaXVPeVcwN3ZTczl2cjJYWEsyK3gKRGE0MGZxTWJmUC9rbnhYdGVxZS9RbVpSN2Z4L3psNmlYVkxPZ1BHdUxzbDFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUIyRXAwL2xaeUxjUlVTQ2lKYkpQCmVEeWc3Tk13Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQVBKRlJxcFRKY2ZFZU9zbFgwdW80ckN0S2F0TEZZb0QKa1U4Q1YyY2hGYXpQQWlCSEY0MFNQQU5mRU5DVkRtNlAyUUxqV2p1VmFoa3duUTNjMUk2OGdoZlRoUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://192.168.80.10:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJSkUrNHR1b2grVzB3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOamt3TmpnMk5qSXlNQjRYRFRJek1EY3pNREF6TVRBeU1sb1hEVEkwTURjeQpPVEF6TVRBeU1sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMdGI1NG9BNHJXbjVXc2oKVEFpN2hFamJIZmZBS29FRTNaMHErN0lIcjVYaCt4c0tBOEVLeHNLUHBKaHhRd1VQaXpmUEtYdWFZZlZ4Y0lpdQprdjVjNVRhalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVGd6Vm8xUkRHV2VROHJyNU5KMnlSeVduQzJQekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQXhqa2d1dC9QcVEwSEpVR2FmNVRGWjRBWWlwRjdGN2x0ZGVla1lMNHdPaHdDSUQ0eDFtd2VBWnJNNDlTUQpYTlhuTHVSVVROczdERmJab1ExZkNwOHdxZ1R6Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUyT1RBMk9EWTJNakl3SGhjTk1qTXdOek13TURNeE1ESXlXaGNOTXpNd056STNNRE14TURJeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUyT1RBMk9EWTJNakl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTZWVEc09QcjhJQXBoWlBIRG1ZZUpoRjZyM0h6K0NDTWFoaTdkeWk1MkoKRW5GaHkrNzdyVnQ2SUJSQnpNYzZEa1NlZnZpWTFDaS9CZk1sM2R5RDRrUHBvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTRNMWFOVVF4bG5rUEs2K1RTZHNrCmNscHd0ajh3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnU3d0MllPYkNVNW1UekhEQ3JCUk5jT01Db1NIWEZTR3kKWlZaVXlhUVZPZ1lDSURFK244bjJFSjJiZGt3SDlKQ3ZtNGZMQkYya3dlNjZSZUxTcG5IYVloeVcKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUp5bi9neUhvRXlmc3dQamRLcTBCeUpxT0M3TlczYWtHMGRyTFJBUzdGY3BvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFdTF2bmlnRGl0YWZsYXlOTUNMdUVTTnNkOThBcWdRVGRuU3I3c2dldmxlSDdHd29Ed1FyRwp3bytrbUhGREJRK0xOODhwZTVwaDlYRndpSzZTL2x6bE5nPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
132 changes: 132 additions & 0 deletions cicd/k3s-calico-incluster/kube-loxilb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-loxilb
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- endpoints
- services
- services/status
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- watch
- list
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-loxilb
subjects:
- kind: ServiceAccount
name: kube-loxilb
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-loxilb
namespace: kube-system
labels:
app: loxilb
spec:
replicas: 1
selector:
matchLabels:
app: loxilb
template:
metadata:
labels:
app: loxilb
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: kube-loxilb
terminationGracePeriodSeconds: 0
containers:
- name: kube-loxilb
image: ghcr.io/loxilb-io/kube-loxilb:latest
imagePullPolicy: Always
command:
- /bin/kube-loxilb
args:
#- --loxiURL=http://192.168.80.10:11111
- --externalCIDR=123.123.123.1/24
- --setBGP=64512
- --setRoles
#- --monitor
#- --setBGP
#- --setLBMode=1
#- --config=/opt/loxilb/agent/kube-loxilb.conf
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
64 changes: 64 additions & 0 deletions cicd/k3s-calico-incluster/loxilb-peer.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loxilb-peer
namespace: kube-system
spec:
selector:
matchLabels:
app: loxilb-peer-app
template:
metadata:
name: loxilb-peer
labels:
app: loxilb-peer-app
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: DoesNotExist
- key: "node-role.kubernetes.io/control-plane"
operator: DoesNotExist
containers:
- name: loxilb-peer-app
image: "ghcr.io/loxilb-io/loxilb:latest"
command: [ "/root/loxilb-io/loxilb/loxilb", "--peer" ]
ports:
- containerPort: 11111
- containerPort: 179
- containerPort: 50051
securityContext:
privileged: true
capabilities:
add:
- SYS_ADMIN
---
apiVersion: v1
kind: Service
metadata:
name: loxilb-peer-service
namespace: kube-system
spec:
clusterIP: None
selector:
app: loxilb-peer-app
ports:
- name: loxilb-peer-app
port: 11111
targetPort: 11111
protocol: TCP
- name: loxilb-peer-bgp
port: 179
targetPort: 179
protocol: TCP
- name: loxilb-peer-gobgp
port: 50051
targetPort: 50051
protocol: TCP


67 changes: 67 additions & 0 deletions cicd/k3s-calico-incluster/loxilb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loxilb-lb
namespace: kube-system
spec:
selector:
matchLabels:
app: loxilb-app
template:
metadata:
name: loxilb-lb
labels:
app: loxilb-app
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
containers:
- name: loxilb-app
image: "ghcr.io/loxilb-io/loxilb:latest"
command: [ "/root/loxilb-io/loxilb/loxilb", "--bgp", "--egr-hooks", "--blacklist=cali.|tunl.|vxlan[.]calico|veth." ]
ports:
- containerPort: 11111
- containerPort: 179
- containerPort: 50051
securityContext:
privileged: true
capabilities:
add:
- SYS_ADMIN
---
apiVersion: v1
kind: Service
metadata:
name: loxilb-lb-service
namespace: kube-system
spec:
clusterIP: None
selector:
app: loxilb-app
ports:
- name: loxilb-app
port: 11111
targetPort: 11111
protocol: TCP
- name: loxilb-app-bgp
port: 179
targetPort: 179
protocol: TCP
- name: loxilb-app-gobgp
port: 50051
targetPort: 50051
protocol: TCP
1 change: 1 addition & 0 deletions cicd/k3s-calico-incluster/master-ip
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
192.168.80.10
13 changes: 13 additions & 0 deletions cicd/k3s-calico-incluster/master1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
sudo su
export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.90' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/')
curl -fL https://get.k3s.io | sh -s - server --node-ip=192.168.80.10 --disable servicelb --disable traefik --cluster-init external-hostname=192.168.80.10 --node-external-ip=192.168.80.10 --disable-cloud-controller --kubelet-arg cloud-provider=external --flannel-backend=none --disable-network-policy --cluster-cidr=10.42.0.0/16
sleep 60
echo $MASTER_IP > /vagrant/master-ip
cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token
sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /etc/rancher/k3s/k3s.yaml
cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml
#sudo kubectl apply -f /vagrant/loxilb.yml
#sudo kubectl apply -f /vagrant/kube-loxilb.yml
sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
/vagrant/wait_ready.sh
9 changes: 9 additions & 0 deletions cicd/k3s-calico-incluster/master2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
sudo su
export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/')
export MASTER_ADDR=$(cat /vagrant/master-ip)
export NODE_TOKEN=$(cat /vagrant/node-token)

#curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 --disable-cloud-controller -t ${NODE_TOKEN}
curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 -t ${NODE_TOKEN}

/vagrant/wait_ready.sh
Loading

0 comments on commit 886375a

Please sign in to comment.