Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
d0g0x01 committed Aug 4, 2023
1 parent 32cb9b6 commit 1f81db5
Show file tree
Hide file tree
Showing 14 changed files with 210 additions and 61 deletions.
5 changes: 0 additions & 5 deletions pkg/kubehound/graph/edge/endpoint_expose_public.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,6 @@ func (e *EndpointExposePublic) Stream(ctx context.Context, store storedb.Provide
bson.M{"$eq": bson.A{
"$$p.protocol", "$$proto",
}},
// Host ports are not compatible with an endpoint slice
// TODO reference/ REVIEW
// bson.M{"$eq": bson.A{
// "$$p.hostport", 0,
// }},
}},
}}},
0,
Expand Down
9 changes: 5 additions & 4 deletions pkg/kubehound/ingestor/pipeline/pod_ingest.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,13 @@ func (i *PodIngest) processEndpoints(ctx context.Context, port *corev1.Container
case cache.ErrNoEntry:
// No associated endpoint slice, create the endpoint from container parameters
case nil:
// Entry already has an associated store entry with the endpoint slice ingest pipeline
// Nothing further to do...
if port.HostPort != 0 {
// This should not be possible in our data model
// Validate our assumptions - the below not be possible in our data model and will result in missing edges
if port.HostPort != 0 && port.ContainerPort != port.HostPort {
log.Trace(ctx).Warnf("assumption failure: host port set on container with associated endpoint slice (%s)", ck.Key())
}

// Entry already has an associated store entry with the endpoint slice ingest pipeline
// Nothing further to do...
return nil
default:
return err
Expand Down
49 changes: 47 additions & 2 deletions pkg/kubehound/ingestor/pipeline/pod_ingest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
mockcollect "github.com/DataDog/KubeHound/pkg/collector/mockcollector"
"github.com/DataDog/KubeHound/pkg/config"
"github.com/DataDog/KubeHound/pkg/globals/types"
"github.com/DataDog/KubeHound/pkg/kubehound/models/shared"
"github.com/DataDog/KubeHound/pkg/kubehound/models/store"
"github.com/DataDog/KubeHound/pkg/kubehound/storage/cache"
mockcache "github.com/DataDog/KubeHound/pkg/kubehound/storage/cache/mocks"
Expand Down Expand Up @@ -44,7 +45,7 @@ func TestPodIngest_Pipeline(t *testing.T) {

cw.EXPECT().Flush(ctx).Return(nil)
cw.EXPECT().Close(ctx).Return(nil)
c.EXPECT().BulkWriter(ctx).Return(cw, nil)
c.EXPECT().BulkWriter(ctx, mock.AnythingOfType("cache.WriterOption")).Return(cw, nil)
c.EXPECT().Get(ctx, mock.AnythingOfType("*cachekey.nodeCacheKey")).Return(&cache.CacheResult{
Value: store.ObjectID().Hex(),
Err: nil,
Expand All @@ -53,6 +54,10 @@ func TestPodIngest_Pipeline(t *testing.T) {
Value: store.ObjectID().Hex(),
Err: nil,
})
c.EXPECT().Get(ctx, mock.AnythingOfType("*cachekey.endpointCacheKey")).Return(&cache.CacheResult{
Value: nil,
Err: cache.ErrNoEntry,
})

// Store setup - pods
sdb := storedb.NewProvider(t)
Expand Down Expand Up @@ -92,9 +97,23 @@ func TestPodIngest_Pipeline(t *testing.T) {
vsw.EXPECT().Flush(ctx).Return(nil)
vsw.EXPECT().Close(ctx).Return(nil)

// Store setup - endpoint
esw := storedb.NewAsyncWriter(t)
endpoints := collections.Endpoint{}
eid := store.ObjectID()
esw.EXPECT().Queue(ctx, mock.AnythingOfType("*store.Endpoint")).
RunAndReturn(func(ctx context.Context, i any) error {
i.(*store.Endpoint).Id = eid
return nil
}).Once()

esw.EXPECT().Flush(ctx).Return(nil)
esw.EXPECT().Close(ctx).Return(nil)

sdb.EXPECT().BulkWriter(ctx, pods, mock.Anything).Return(psw, nil)
sdb.EXPECT().BulkWriter(ctx, containers, mock.Anything).Return(csw, nil)
sdb.EXPECT().BulkWriter(ctx, volumes, mock.Anything).Return(vsw, nil)
sdb.EXPECT().BulkWriter(ctx, endpoints, mock.Anything).Return(esw, nil)

// Graph setup - pods
pv := map[string]any{
Expand Down Expand Up @@ -133,7 +152,7 @@ func TestPodIngest_Pipeline(t *testing.T) {
"name": "elasticsearch",
"node": "test-node.ec2.internal",
"pod": "app-monitors-client-78cb6d7899-j2rjp",
"ports": []any{"9200", "9300"},
"ports": []any{"9200"},
"privesc": false,
"privileged": false,
"runAsUser": float64(0),
Expand Down Expand Up @@ -168,9 +187,35 @@ func TestPodIngest_Pipeline(t *testing.T) {
vgw.EXPECT().Flush(ctx).Return(nil)
vgw.EXPECT().Close(ctx).Return(nil)

// Graph setup - endpoints

ev := map[string]interface{}{
"addressType": "IPv4",
"addresses": []interface{}{"10.1.1.2"},
"app": "test-app",
"compromised": float64(0),
"exposure": float64(shared.EndpointExposureNodeIP),
"isNamespaced": true,
"name": "test-app::app-monitors-client-78cb6d7899-j2rjp::TCP::9200",
"namespace": "test-app",
"port": float64(9200),
"portName": "http",
"protocol": "TCP",
"service": "test-service",
"serviceDns": "",
"serviceEndpoint": "http",
"storeID": eid.Hex(),
"team": "test-team",
}
egw := graphdb.NewAsyncVertexWriter(t)
egw.EXPECT().Queue(ctx, ev).Return(nil).Once()
egw.EXPECT().Flush(ctx).Return(nil)
egw.EXPECT().Close(ctx).Return(nil)

gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Pod"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(pgw, nil)
gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Container"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(cgw, nil)
gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Volume"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(vgw, nil)
gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Endpoint"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(egw, nil)

deps := &Dependencies{
Collector: client,
Expand Down
6 changes: 0 additions & 6 deletions pkg/kubehound/ingestor/pipeline/testdata/pod.json
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,6 @@
"hostPort": 9200,
"name": "http",
"protocol": "TCP"
},
{
"containerPort": 9300,
"hostPort": 9300,
"name": "transport",
"protocol": "TCP"
}
],
"readinessProbe": {
Expand Down
30 changes: 30 additions & 0 deletions pkg/kubehound/libkube/address.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
package libkube

import (
"fmt"
"net"
"regexp"

discoveryv1 "k8s.io/api/discovery/v1"
)

const (
AddressFQDNMatcher = `^(([a-z0-9][a-z0-9\-]*[a-z0-9])|[a-z0-9]+\.)*([a-z]+|xn\-\-[a-z0-9]+)\.?$`
)

// AddressType returns the discoveryv1.AddressType identifier corresponding to the provided address.
func AddressType(address string) (discoveryv1.AddressType, error) {
ip := net.ParseIP(address)
switch {
case ip.To4() != nil:
return discoveryv1.AddressTypeIPv4, nil
case ip.To16() != nil:
return discoveryv1.AddressTypeIPv6, nil
}

if match, _ := regexp.MatchString(AddressFQDNMatcher, address); match {
return discoveryv1.AddressTypeFQDN, nil
}

return "", fmt.Errorf("invalid addrress input: %s", address)
}
57 changes: 57 additions & 0 deletions pkg/kubehound/libkube/address_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package libkube

import (
"reflect"
"testing"

discoveryv1 "k8s.io/api/discovery/v1"
)

func TestAddressType(t *testing.T) {
type args struct {
address string
}
tests := []struct {
name string
args args
want discoveryv1.AddressType
wantErr bool
}{
{
name: "success case IPv4",
args: args{address: "10.1.1.1"},
want: discoveryv1.AddressTypeIPv4,
wantErr: false,
},
{
name: "success case IPv6",
args: args{address: "fe80::857:d999:5316:78d4"},
want: discoveryv1.AddressTypeIPv6,
wantErr: false,
},
{
name: "success case FQDN",
args: args{address: "a.domain.local"},
want: discoveryv1.AddressTypeFQDN,
wantErr: false,
},
{
name: "error case",
args: args{address: "a.domain.local OR NOT!"},
want: "",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := AddressType(tt.args.address)
if (err != nil) != tt.wantErr {
t.Errorf("AddressType() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("AddressType() = %v, want %v", got, tt.want)
}
})
}
}
19 changes: 6 additions & 13 deletions pkg/kubehound/libkube/service.go
Original file line number Diff line number Diff line change
@@ -1,27 +1,20 @@
package libkube

import (
"strings"
"fmt"

"github.com/DataDog/KubeHound/pkg/globals/types"
)

// ServiceName returns the name of the service associated with the provided EndpointSlice.
func ServiceName(ep types.EndpointType) string {
return ep.Labels["kubernetes.io/service-name"]
}

// TODO service DNS name
// ServiceDns provides the DNS name of the service associated with the provided EndpointSlice.
func ServiceDns(ep types.EndpointType) string {
var sb strings.Builder
clusterName := "stripe.us1.staging.dog" // TODO dynamic
namespace := ep.Namespace

sb.WriteString(ep.Labels["kubernetes.io/service-name"])
sb.WriteString(".")
sb.WriteString("stripe.us1.staging.dog")

return sb.String()
return fmt.Sprintf("%s.%s.%s", ep.Labels["kubernetes.io/service-name"], namespace, clusterName)
}

// // Exposed port from containerPort
// func ExposedPort() int {

// }
50 changes: 30 additions & 20 deletions pkg/kubehound/models/converter/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ func (c *StoreConverter) Endpoint(_ context.Context, addr discoveryv1.Endpoint,
Id: store.ObjectID(),
PodName: addr.TargetRef.Name,
PodNamespace: addr.TargetRef.Namespace,
Name: fmt.Sprintf("%s::%s::%s", parent.Name, addr.TargetRef.Name, *port.Name),
Name: fmt.Sprintf("%s::%s::%s", parent.Name, *port.Protocol, *port.Name),
HasSlice: true,
ServiceName: libkube.ServiceName(parent),
ServiceDns: libkube.ServiceDns(parent),
Expand Down Expand Up @@ -404,21 +404,23 @@ func (c *StoreConverter) Endpoint(_ context.Context, addr discoveryv1.Endpoint,
func (c *StoreConverter) EndpointPrivate(_ context.Context, port *corev1.ContainerPort,
pod *store.Pod, container *store.Container) (*store.Endpoint, error) {

//portName := port.Name // TODO check this logic
// https: //cylab.be/blog/154/exposing-a-kubernetes-application-service-hostport-nodeport-loadbalancer-or-ingresscontroller
// Derive the address type from the pod IP
podIP := pod.K8.Status.PodIP
addrType, err := libkube.AddressType(podIP)
if err != nil {
return nil, err
}

output := &store.Endpoint{
Id: store.ObjectID(),
ContainerId: container.Id,
PodName: pod.K8.Name,
PodNamespace: pod.K8.Namespace,
// ServiceName: fmt.Sprintf("%s::%s::%s", pod.K8.Namespace, pod.K8.Name, port.Name),
Name: fmt.Sprintf("%s::%s::%d", container.K8.Name, pod.K8.Status.HostIP, port.ContainerPort),
NodeName: pod.K8.Spec.NodeName,
AddressType: discoveryv1.AddressTypeIPv4, // TODO figure out address tyoe from inpout
Name: fmt.Sprintf("%s::%s::%s::%d", pod.K8.Namespace, pod.K8.Name, port.Protocol, port.ContainerPort),
NodeName: pod.K8.Spec.NodeName,
AddressType: addrType,
Backend: discoveryv1.Endpoint{
// TODO other fields
Addresses: []string{pod.K8.Status.PodIP},
Addresses: []string{podIP},
TargetRef: &corev1.ObjectReference{
Kind: pod.K8.Kind,
APIVersion: pod.K8.APIVersion,
Expand All @@ -435,23 +437,31 @@ func (c *StoreConverter) EndpointPrivate(_ context.Context, port *corev1.Contain
Port: &port.ContainerPort,
},
Ownership: container.Ownership,

// If created via the pod ingestion pipeline the endpoint does not correspond to a k8s
// endpoint slice and thus is only accessible from within the cluster.
Exposure: shared.EndpointExposureInternal,
}

// TODO if host port is set we wan to make expore external and set ServiceName / ServiceDns
if port.HostPort != 0 {
output.ServiceName = "TODO"
output.ServiceDns = fmt.Sprintf("%s.%s", output.NodeName, "cluster")
output.Exposure = shared.EndpointExposureExternal
}

if len(pod.K8.Namespace) != 0 {
output.IsNamespaced = true
output.Namespace = pod.K8.Namespace
}

switch {
case len(port.Name) != 0:
output.ServiceName = port.Name
case port.HostPort != 0:
output.ServiceName = fmt.Sprintf("%s::%d", port.Protocol, port.HostPort)
default:
output.ServiceName = fmt.Sprintf("%s::%d", port.Protocol, port.ContainerPort)
}

if port.HostPort != 0 {
// With a host port field, endpoint is only accessible from the node IP
output.Exposure = shared.EndpointExposureNodeIP

// TODO future improvement - consider providing the node address as a backend here
} else {
// Without a host port field, endpoint is only accessible from within the cluster on the node IP
output.Exposure = shared.EndpointExposureClusterIP
}

return output, nil
}
9 changes: 5 additions & 4 deletions pkg/kubehound/models/shared/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ const (
type EndpointExposureType int

const (
EndpointExposureNone EndpointExposureType = iota
EndpointExposureInternal // Container port exposed to cluster
EndpointExposureExternal // Kubernetes endpoint exposed outside the cluster
EndpointExposurePublic // External DNS API endpoint
EndpointExposureNone EndpointExposureType = iota
EndpointExposureClusterIP // Container port exposed to cluster
EndpointExposureNodeIP // Kubernetes endpoint exposed outside the cluster
EndpointExposureExternal // Kubernetes endpoint exposed outside the cluster
EndpointExposurePublic // External DNS API endpoint
)
2 changes: 2 additions & 0 deletions test/setup/test-cluster/attacks/ENDPOINT_EXPOSE.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ spec:
containers:
- name: endpoints-pod
image: ubuntu
securityContext:
privileged: true
command: [ "/bin/sh", "-c", "--" ]
args: [ "while true; do sleep 30; done;" ]
ports:
Expand Down
2 changes: 1 addition & 1 deletion test/setup/test-cluster/attacks/IDENTITY_ASSUME.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ metadata:
name: node-api-access
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: node-api-access
subjects:
- kind: Group
Expand Down
1 change: 0 additions & 1 deletion test/setup/test-cluster/attacks/TOKEN_VAR_LOG_SYMLINK.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ kind: ServiceAccount
metadata:
name: varlog-sa
namespace: default
automountServiceAccountToken: true
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
Expand Down
Loading

0 comments on commit 1f81db5

Please sign in to comment.