Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PR - gh-483 : Support for node-port service together with LB services #489

Merged
merged 11 commits into from
Jan 8, 2024
19 changes: 8 additions & 11 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,22 +7,19 @@ LABEL description="This is loxilb official Docker Image"
# Disable Prompt During Packages Installation
ARG DEBIAN_FRONTEND=noninteractive

# Prepare environment
RUN mkdir -p /opt/loxilb && \
mkdir -p /opt/loxilb/cert/ && \
mkdir -p /root/loxilb-io/loxilb/ && \
mkdir -p /etc/bash_completion.d/

# Update Ubuntu Software repository
RUN apt-get update && apt-get install -y wget

# Env for golang
ENV PATH="${PATH}:/usr/local/go/bin"

# Install loxilb related packages
RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && echo $arch && if [ "$arch" = "arm64" ] ; then apt-get install -y gcc-multilib-arm-linux-gnueabihf; else apt-get update && apt-get install -y gcc-multilib;fi && \
RUN mkdir -p /opt/loxilb && \
mkdir -p /opt/loxilb/cert/ && \
mkdir -p /root/loxilb-io/loxilb/ && \
mkdir -p /etc/bash_completion.d/ && \
# Update Ubuntu Software repository
apt-get update && apt-get install -y wget && \
arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && echo $arch && if [ "$arch" = "arm64" ] ; then apt-get install -y gcc-multilib-arm-linux-gnueabihf; else apt-get update && apt-get install -y gcc-multilib;fi && \
# Arch specific packages - GoLang
wget https://go.dev/dl/go1.18.linux-${arch}.tar.gz && tar -xzf go1.18.linux-${arch}.tar.gz --directory /usr/local/ && rm go1.18.linux-${arch}.tar.gz && \
wget https://go.dev/dl/go1.21.5.linux-${arch}.tar.gz && tar -xzf go1.21.5.linux-${arch}.tar.gz --directory /usr/local/ && rm go1.21.5.linux-${arch}.tar.gz && \
# Dev and util packages
apt-get install -y clang llvm libelf-dev libpcap-dev vim net-tools \
elfutils dwarves git libbsd-dev bridge-utils wget unzip build-essential \
Expand Down
149 changes: 149 additions & 0 deletions api/k8s/k8s.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
/*
* Copyright (c) 2023 NetLOX Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package k8s

import (
"errors"
"net"
"os"

cmn "github.com/loxilb-io/loxilb/common"
tk "github.com/loxilb-io/loxilib"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)

var (
config *rest.Config
ApiHooks cmn.NetHookInterface
stopCh chan struct{}
)

func K8sApiInit(k8sConfigFile string, hooks cmn.NetHookInterface) error {

var err error
nodeIP := os.Getenv("MY_NODE_IP")
if net.ParseIP(nodeIP) == nil {
tk.LogIt(tk.LogError, "NodeIP(%s) not found\n", nodeIP)
os.Exit(1)
}

tk.LogIt(tk.LogDebug, "K8s NodeIP(%s)\n", nodeIP)

if k8sConfigFile != "cluster" {
config, err = clientcmd.BuildConfigFromFlags("", k8sConfigFile)
if err != nil {
tk.LogIt(tk.LogError, "Config(%s) build failed:%s\n", k8sConfigFile, err)
return err
}
} else {
config, err = rest.InClusterConfig()
if err != nil {
tk.LogIt(tk.LogError, "InClusterConfig build failed:%s\n", err)
return err
}
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
tk.LogIt(tk.LogError, "NewForConfig failed:%s\n", err)
return err
}

watchlist := cache.NewListWatchFromClient(
clientset.CoreV1().RESTClient(),
string(v1.ResourcePods),
v1.NamespaceAll,
fields.Everything(),
)
_, controller := cache.NewInformer(
watchlist,
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
switch pod := obj.(type) {
case *v1.Pod:
tk.LogIt(tk.LogInfo, "Pod(%s) add: %s - %s:\n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
if pod.Status.HostIP == nodeIP && pod.Status.PodIP != nodeIP && pod.Status.PodIP != "" {
_, err := ApiHooks.NetAddrAdd(&cmn.IPAddrMod{Dev: pod.Name, IP: pod.Status.PodIP + "/32"})
if err != nil {
tk.LogIt(tk.LogDebug, "Pod(%s) add: %s - %s: failed - %s\n", pod.Name, pod.Status.PodIP, pod.Status.HostIP, err)
} else {
tk.LogIt(tk.LogDebug, "Pod(%s) added: %s - %s \n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
}
}
}
},
DeleteFunc: func(obj interface{}) {
switch pod := obj.(type) {
case *v1.Pod:
tk.LogIt(tk.LogInfo, "Pod(%s) delete: %s - %s: \n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
if pod.Status.HostIP == nodeIP && pod.Status.PodIP != nodeIP && pod.Status.PodIP != "" {
_, err := ApiHooks.NetAddrDel(&cmn.IPAddrMod{Dev: pod.Name, IP: pod.Status.PodIP + "/32"})
if err != nil {
tk.LogIt(tk.LogDebug, "Pod(%s) delete: %s - %s: failed - %s\n", pod.Name, pod.Status.PodIP, pod.Status.HostIP, err)
} else {
tk.LogIt(tk.LogDebug, "Pod(%s) deleted: %s - %s \n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
}
}
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
switch oldPod := oldObj.(type) {
case *v1.Pod:
if oldPod.Status.HostIP == nodeIP && oldPod.Status.PodIP != nodeIP && oldPod.Status.PodIP != "" {
_, err := ApiHooks.NetAddrDel(&cmn.IPAddrMod{Dev: oldPod.Name, IP: oldPod.Status.PodIP + "/32"})
if err != nil {
tk.LogIt(tk.LogDebug, "Pod(%s) delete: %s - %s: failed - %s\n", oldPod.Name, oldPod.Status.PodIP, oldPod.Status.HostIP, err)
} else {
tk.LogIt(tk.LogDebug, "Pod(%s) deleted: %s - %s \n", oldPod.Name, oldPod.Status.PodIP, oldPod.Status.HostIP)
}
}
}
switch pod := newObj.(type) {
case *v1.Pod:
tk.LogIt(tk.LogInfo, "Pod(%s) modify: %s - %s:\n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
if pod.Status.HostIP == nodeIP && pod.Status.PodIP != nodeIP && pod.Status.PodIP != "" {
_, err := ApiHooks.NetAddrAdd(&cmn.IPAddrMod{Dev: pod.Name, IP: pod.Status.PodIP + "/32"})
if err != nil {
tk.LogIt(tk.LogDebug, "Pod(%s) modify: %s - %s: failed - %s\n", pod.Name, pod.Status.PodIP, pod.Status.HostIP, err)
} else {
tk.LogIt(tk.LogDebug, "Pod(%s) modified: %s - %s \n", pod.Name, pod.Status.PodIP, pod.Status.HostIP)
}
}
}
},
},
)

if controller != nil {
ApiHooks = hooks
stopCh = make(chan struct{})
go controller.Run(stopCh)
tk.LogIt(tk.LogInfo, "K8s API Init done\n")
return nil

}
return errors.New("k8s api init failed")
}

func K8sApiClose() {
close(stopCh)
}
17 changes: 3 additions & 14 deletions cicd/k3s-flannel-cluster-ipvs-compat/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,11 @@ Vagrant.configure("2") do |config|

config.vm.define "host" do |host|
host.vm.hostname = 'host'
host.vm.network :private_network, ip: "192.168.90.8", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.memory = "4096"
vbox.cpus = "2"
vbox.default_nic_type = "virtio"
end
end

config.vm.define "loxilb" do |loxilb|
loxilb.vm.hostname = 'llb1'
loxilb.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
loxilb.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
loxilb.vm.provision :shell, :path => "loxilb.sh"
loxilb.vm.provider :virtualbox do |vbox|
vbox.memory = "6000"
vbox.cpus = "4"
vbox.default_nic_type = "virtio"
end
Expand All @@ -54,7 +43,7 @@ Vagrant.configure("2") do |config|
worker.vm.provision :shell, :path => "worker.sh"
worker.vm.provider :virtualbox do |vbox|
vbox.memory = "4096"
vbox.cpus = "2"
vbox.cpus = "4"
vbox.default_nic_type = "virtio"
end
end
Expand Down
2 changes: 0 additions & 2 deletions cicd/k3s-flannel-cluster-ipvs-compat/config.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#!/bin/bash
vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f
vagrant up
vagrant ssh host -c 'sudo ip route add 123.123.123.0/24 via 192.168.90.9'

5 changes: 3 additions & 2 deletions cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,9 @@ spec:
command:
- /bin/kube-loxilb
args:
- --loxiURL=http://192.168.80.9:11111
- --externalCIDR=123.123.123.1/24
#- --loxiURL=http://192.168.80.9:11111
- --externalCIDR=192.168.80.20/32
- --setRoles=0.0.0.0
#- --monitor
#- --setBGP
#- --setLBMode=1
Expand Down
93 changes: 89 additions & 4 deletions cicd/k3s-flannel-cluster-ipvs-compat/loxilb.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,92 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: loxilb-lb
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loxilb-lb
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- endpoints
- services
- services/status
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- watch
- list
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loxilb-lb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: loxilb-lb
subjects:
- kind: ServiceAccount
name: loxilb-lb
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loxilb-lb
namespace: kube-system
labels:
app: loxilb-app
spec:
selector:
matchLabels:
app: loxilb-app
template:
metadata:
name: loxilb-lb
labels:
app: loxilb-app
spec:
Expand All @@ -20,11 +97,14 @@ spec:
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: loxilb-lb
containers:
- name: loxilb-app
image: "ghcr.io/loxilb-io/loxilb:latest"
- name: loxilb-lb
image: "ghcr.io/loxilb-io/loxilb:debug"
imagePullPolicy: Always
command: [ "/root/loxilb-io/loxilb/loxilb", "--bgp", "--egr-hooks", "--blacklist=veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--ipvs-compat" ]
#command: [ "sleep", "72000" ]
command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni.|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--ipvs-compat", "--k8s-api=cluster" ]
ports:
- containerPort: 11111
- containerPort: 179
Expand All @@ -34,6 +114,11 @@ spec:
capabilities:
add:
- SYS_ADMIN
env:
- name: MY_NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
---
apiVersion: v1
kind: Service
Expand Down
1 change: 0 additions & 1 deletion cicd/k3s-flannel-cluster-ipvs-compat/rmconfig.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/bin/bash
vagrant destroy -f worker1
vagrant destroy -f master
vagrant destroy -f loxilb
vagrant destroy -f host
Loading
Loading