Skip to content

Commit

Permalink
Merge pull request #737 from Hanna623/main
Browse files Browse the repository at this point in the history
Added CICD for the Rabbitmq on the K3s incluster
  • Loading branch information
UltraInstinct14 authored Jul 24, 2024
2 parents 85e4119 + 7d8e161 commit 1e18ca8
Show file tree
Hide file tree
Showing 36 changed files with 3,283 additions and 0 deletions.
51 changes: 51 additions & 0 deletions cicd/k3s-rabbitmq-incluster/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

workers = (ENV['WORKERS'] || "3").to_i
box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s")
box_version = "0.7.1"
Vagrant.configure("2") do |config|
config.vm.box = "#{box_name}"
config.vm.box_version = "#{box_version}"

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end

config.vm.define "host" do |host|
host.vm.hostname = 'host'
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.memory = "4096"
vbox.cpus = "8"
vbox.default_nic_type = "virtio"
end
end

config.vm.define "master" do |master|
master.vm.hostname = 'master'
master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master.sh"
master.vm.provider :virtualbox do |vbox|
vbox.memory = "4096"
vbox.cpus = "4"
vbox.default_nic_type = "virtio"
end
end

(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 100
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision :shell, :path => "worker.sh"
worker.vm.provider :virtualbox do |vbox|
vbox.memory = "4096"
vbox.cpus = "4"
vbox.default_nic_type = "virtio"
end
end
end
end
3 changes: 3 additions & 0 deletions cicd/k3s-rabbitmq-incluster/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash
vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f
vagrant up
84 changes: 84 additions & 0 deletions cicd/k3s-rabbitmq-incluster/grafana.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: grafana
name: grafana
spec:
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
securityContext:
fsGroup: 472
supplementalGroups:
- 0
containers:
- name: grafana
image: grafana/grafana:9.1.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
name: http-grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /robots.txt
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
resources:
requests:
cpu: 250m
memory: 750Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-pv
volumes:
- name: grafana-pv
persistentVolumeClaim:
claimName: grafana-pvc
---
apiVersion: v1
kind: Service
metadata:
name: grafana
spec:
ports:
- port: 3000
protocol: TCP
targetPort: http-grafana
selector:
app: grafana
sessionAffinity: None
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerClass: loxilb.io/loxilb
10 changes: 10 additions & 0 deletions cicd/k3s-rabbitmq-incluster/host.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apt-get update
apt-get install -y software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce

#curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
#add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get install -y iperf iperf3
13 changes: 13 additions & 0 deletions cicd/k3s-rabbitmq-incluster/install_cilium.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash

#Install Cilium
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
mkdir -p ~/.kube/
sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/config
cilium install
32 changes: 32 additions & 0 deletions cicd/k3s-rabbitmq-incluster/iperf-service.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
apiVersion: v1
kind: Service
metadata:
name: iperf-service
annotations:
loxilb.io/lbmode: "onearm"
spec:
externalTrafficPolicy: Local
loadBalancerClass: loxilb.io/loxilb
selector:
what: perf-test
ports:
- port: 55001
targetPort: 5001
type: LoadBalancer
---
apiVersion: v1
kind: Pod
metadata:
name: iperf1
labels:
what: perf-test
spec:
containers:
- name: iperf
image: eyes852/ubuntu-iperf-test:0.5
command:
- iperf
- "-s"
ports:
- containerPort: 5001

19 changes: 19 additions & 0 deletions cicd/k3s-rabbitmq-incluster/k3s.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTVRBNE1qWXhOVEl3SGhjTk1qUXdNekU1TURVeU9URXlXaGNOTXpRd016RTNNRFV5T1RFeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTVRBNE1qWXhOVEl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRcjhwZm83akZnSUN1WkZlVkdCZVRrc01PdElZWjZidWVVYVFBc1BLbU0KTGVYbm9Uc1JzRFJ5Wi92Vmw1NzNZZHNHeTYxSHh1WFN3bTNPUFhvS25DKzFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVltWTVuMnhPdUpmNEgvUmt5aDJwCnpPaDE4K3d3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnSnUvbVRlUk9qeC8rdGdNckxQdC9NMmF0a1RqRUw3NkkKU2xQV0N3eEticlVDSUhRZFZEQVJGVWtPd1ZNNEppdVhTaG5JYkt1OXJBNzdocXZBdlZ0ZFEzWEMKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.80.10:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrRENDQVRlZ0F3SUJBZ0lJRUk5Tm02SXBsdzB3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekV3T0RJMk1UVXlNQjRYRFRJME1ETXhPVEExTWpreE1sb1hEVEkxTURNeApPVEExTWpreE1sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJONlJVVGVwcnZBRkNuN2EKcDZndmRUMmxPN1MrWXd3bTZ3em45T2xXcWw4ZnJqNTgwcktEWVNxVmFCdkxUL2IrZytBL0pQRUV6TXFscWdYTwpGYWo0TElTalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVGRNaERoRUZQOEdlMVBSblh4d2hWcGltUHZYakFLQmdncWhrak9QUVFEQWdOSEFEQkUKQWlBaGNVd1d3WE1iRGZaVkE2NHVSemhweDR5dmg0UUNEM0ZZa2YwQkwwQ2FlZ0lnTmZ4enArenUxWk5PZWpoTAo2d3ZXVXhuekZpQ2xZYUpzNDVrcCt6ZFJuME09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTVRBNE1qWXhOVEl3SGhjTk1qUXdNekU1TURVeU9URXlXaGNOTXpRd016RTNNRFV5T1RFeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTVRBNE1qWXhOVEl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUTUoxd1ZOcEIwS0N6MWx5bWhRRGc3UDhRSGxGcHBUOHc5blFCWGYyeGQKMWtTb2RyS3RvSzlQYTJtelNiWFNtei9acTBpQk94SkY3aTdyT3BhQzZXUHdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTNUSVE0UkJUL0JudFQwWjE4Y0lWCmFZcGo3MTR3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlRN3ZNVldZNkMxaVdLakIzNEYzdVZFQS9GSVpKVVAKRWM1bEFLS0JSWW8vQWlBbUFVVnQzRkRrSEYreFhJWUlzenBscWVDNWZ0Y0g1azJDaFFrbXFZaThXQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU9KamZxUm90eWRLd0poQkY5SHJlTG1RNExSYVp2NFFiRXp0K0I0WnBTeWNvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFM3BGUk42bXU4QVVLZnRxbnFDOTFQYVU3dEw1akRDYnJET2YwNlZhcVh4K3VQbnpTc29OaApLcFZvRzh0UDl2NkQ0RDhrOFFUTXlxV3FCYzRWcVBnc2hBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
130 changes: 130 additions & 0 deletions cicd/k3s-rabbitmq-incluster/kube-loxilb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-loxilb
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- endpoints
- services
- services/status
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- watch
- list
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-loxilb
subjects:
- kind: ServiceAccount
name: kube-loxilb
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-loxilb
namespace: kube-system
labels:
app: loxilb
spec:
replicas: 1
selector:
matchLabels:
app: loxilb
template:
metadata:
labels:
app: loxilb
spec:
hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: kube-loxilb
terminationGracePeriodSeconds: 0
containers:
- name: kube-loxilb
image: ghcr.io/loxilb-io/kube-loxilb:latest
imagePullPolicy: Always
command:
- /bin/kube-loxilb
args:
#- --loxiURL=http://192.168.80.9:11111
- --externalCIDR=192.168.80.20/32
- --setRoles=0.0.0.0
#- --monitor
#- --setBGP
- --setLBMode=1
#- --config=/opt/loxilb/agent/kube-loxilb.conf
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
13 changes: 13 additions & 0 deletions cicd/k3s-rabbitmq-incluster/loxilb.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/')

apt-get update
apt-get install -y software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce
docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest
echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc
echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc

echo $LOXILB_IP > /vagrant/loxilb-ip
Loading

0 comments on commit 1e18ca8

Please sign in to comment.