diff --git a/pkg/kubehound/graph/edge/endpoint_expose_public.go b/pkg/kubehound/graph/edge/endpoint_expose_public.go index 46eaca24c..1805046ef 100644 --- a/pkg/kubehound/graph/edge/endpoint_expose_public.go +++ b/pkg/kubehound/graph/edge/endpoint_expose_public.go @@ -97,11 +97,6 @@ func (e *EndpointExposePublic) Stream(ctx context.Context, store storedb.Provide bson.M{"$eq": bson.A{ "$$p.protocol", "$$proto", }}, - // Host ports are not compatible with an endpoint slice - // TODO reference/ REVIEW - // bson.M{"$eq": bson.A{ - // "$$p.hostport", 0, - // }}, }}, }}}, 0, diff --git a/pkg/kubehound/ingestor/pipeline/pod_ingest.go b/pkg/kubehound/ingestor/pipeline/pod_ingest.go index 30aefd91a..c0898495b 100644 --- a/pkg/kubehound/ingestor/pipeline/pod_ingest.go +++ b/pkg/kubehound/ingestor/pipeline/pod_ingest.go @@ -97,12 +97,13 @@ func (i *PodIngest) processEndpoints(ctx context.Context, port *corev1.Container case cache.ErrNoEntry: // No associated endpoint slice, create the endpoint from container parameters case nil: - // Entry already has an associated store entry with the endpoint slice ingest pipeline - // Nothing further to do... - if port.HostPort != 0 { - // This should not be possible in our data model + // Validate our assumptions - the below not be possible in our data model and will result in missing edges + if port.HostPort != 0 && port.ContainerPort != port.HostPort { log.Trace(ctx).Warnf("assumption failure: host port set on container with associated endpoint slice (%s)", ck.Key()) } + + // Entry already has an associated store entry with the endpoint slice ingest pipeline + // Nothing further to do... return nil default: return err diff --git a/pkg/kubehound/ingestor/pipeline/pod_ingest_test.go b/pkg/kubehound/ingestor/pipeline/pod_ingest_test.go index 78537b727..3f9187b58 100644 --- a/pkg/kubehound/ingestor/pipeline/pod_ingest_test.go +++ b/pkg/kubehound/ingestor/pipeline/pod_ingest_test.go @@ -8,6 +8,7 @@ import ( mockcollect "github.com/DataDog/KubeHound/pkg/collector/mockcollector" "github.com/DataDog/KubeHound/pkg/config" "github.com/DataDog/KubeHound/pkg/globals/types" + "github.com/DataDog/KubeHound/pkg/kubehound/models/shared" "github.com/DataDog/KubeHound/pkg/kubehound/models/store" "github.com/DataDog/KubeHound/pkg/kubehound/storage/cache" mockcache "github.com/DataDog/KubeHound/pkg/kubehound/storage/cache/mocks" @@ -44,7 +45,7 @@ func TestPodIngest_Pipeline(t *testing.T) { cw.EXPECT().Flush(ctx).Return(nil) cw.EXPECT().Close(ctx).Return(nil) - c.EXPECT().BulkWriter(ctx).Return(cw, nil) + c.EXPECT().BulkWriter(ctx, mock.AnythingOfType("cache.WriterOption")).Return(cw, nil) c.EXPECT().Get(ctx, mock.AnythingOfType("*cachekey.nodeCacheKey")).Return(&cache.CacheResult{ Value: store.ObjectID().Hex(), Err: nil, @@ -53,6 +54,10 @@ func TestPodIngest_Pipeline(t *testing.T) { Value: store.ObjectID().Hex(), Err: nil, }) + c.EXPECT().Get(ctx, mock.AnythingOfType("*cachekey.endpointCacheKey")).Return(&cache.CacheResult{ + Value: nil, + Err: cache.ErrNoEntry, + }) // Store setup - pods sdb := storedb.NewProvider(t) @@ -92,9 +97,23 @@ func TestPodIngest_Pipeline(t *testing.T) { vsw.EXPECT().Flush(ctx).Return(nil) vsw.EXPECT().Close(ctx).Return(nil) + // Store setup - endpoint + esw := storedb.NewAsyncWriter(t) + endpoints := collections.Endpoint{} + eid := store.ObjectID() + esw.EXPECT().Queue(ctx, mock.AnythingOfType("*store.Endpoint")). + RunAndReturn(func(ctx context.Context, i any) error { + i.(*store.Endpoint).Id = eid + return nil + }).Once() + + esw.EXPECT().Flush(ctx).Return(nil) + esw.EXPECT().Close(ctx).Return(nil) + sdb.EXPECT().BulkWriter(ctx, pods, mock.Anything).Return(psw, nil) sdb.EXPECT().BulkWriter(ctx, containers, mock.Anything).Return(csw, nil) sdb.EXPECT().BulkWriter(ctx, volumes, mock.Anything).Return(vsw, nil) + sdb.EXPECT().BulkWriter(ctx, endpoints, mock.Anything).Return(esw, nil) // Graph setup - pods pv := map[string]any{ @@ -133,7 +152,7 @@ func TestPodIngest_Pipeline(t *testing.T) { "name": "elasticsearch", "node": "test-node.ec2.internal", "pod": "app-monitors-client-78cb6d7899-j2rjp", - "ports": []any{"9200", "9300"}, + "ports": []any{"9200"}, "privesc": false, "privileged": false, "runAsUser": float64(0), @@ -168,9 +187,35 @@ func TestPodIngest_Pipeline(t *testing.T) { vgw.EXPECT().Flush(ctx).Return(nil) vgw.EXPECT().Close(ctx).Return(nil) + // Graph setup - endpoints + + ev := map[string]interface{}{ + "addressType": "IPv4", + "addresses": []interface{}{"10.1.1.2"}, + "app": "test-app", + "compromised": float64(0), + "exposure": float64(shared.EndpointExposureNodeIP), + "isNamespaced": true, + "name": "test-app::app-monitors-client-78cb6d7899-j2rjp::TCP::9200", + "namespace": "test-app", + "port": float64(9200), + "portName": "http", + "protocol": "TCP", + "service": "test-service", + "serviceDns": "", + "serviceEndpoint": "http", + "storeID": eid.Hex(), + "team": "test-team", + } + egw := graphdb.NewAsyncVertexWriter(t) + egw.EXPECT().Queue(ctx, ev).Return(nil).Once() + egw.EXPECT().Flush(ctx).Return(nil) + egw.EXPECT().Close(ctx).Return(nil) + gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Pod"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(pgw, nil) gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Container"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(cgw, nil) gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Volume"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(vgw, nil) + gdb.EXPECT().VertexWriter(ctx, mock.AnythingOfType("*vertex.Endpoint"), c, mock.AnythingOfType("graphdb.WriterOption")).Return(egw, nil) deps := &Dependencies{ Collector: client, diff --git a/pkg/kubehound/ingestor/pipeline/testdata/pod.json b/pkg/kubehound/ingestor/pipeline/testdata/pod.json index 39f82fa6f..36f9a0e20 100644 --- a/pkg/kubehound/ingestor/pipeline/testdata/pod.json +++ b/pkg/kubehound/ingestor/pipeline/testdata/pod.json @@ -44,12 +44,6 @@ "hostPort": 9200, "name": "http", "protocol": "TCP" - }, - { - "containerPort": 9300, - "hostPort": 9300, - "name": "transport", - "protocol": "TCP" } ], "readinessProbe": { diff --git a/pkg/kubehound/libkube/address.go b/pkg/kubehound/libkube/address.go new file mode 100644 index 000000000..7f60bc811 --- /dev/null +++ b/pkg/kubehound/libkube/address.go @@ -0,0 +1,30 @@ +package libkube + +import ( + "fmt" + "net" + "regexp" + + discoveryv1 "k8s.io/api/discovery/v1" +) + +const ( + AddressFQDNMatcher = `^(([a-z0-9][a-z0-9\-]*[a-z0-9])|[a-z0-9]+\.)*([a-z]+|xn\-\-[a-z0-9]+)\.?$` +) + +// AddressType returns the discoveryv1.AddressType identifier corresponding to the provided address. +func AddressType(address string) (discoveryv1.AddressType, error) { + ip := net.ParseIP(address) + switch { + case ip.To4() != nil: + return discoveryv1.AddressTypeIPv4, nil + case ip.To16() != nil: + return discoveryv1.AddressTypeIPv6, nil + } + + if match, _ := regexp.MatchString(AddressFQDNMatcher, address); match { + return discoveryv1.AddressTypeFQDN, nil + } + + return "", fmt.Errorf("invalid addrress input: %s", address) +} diff --git a/pkg/kubehound/libkube/address_test.go b/pkg/kubehound/libkube/address_test.go new file mode 100644 index 000000000..be0573ef2 --- /dev/null +++ b/pkg/kubehound/libkube/address_test.go @@ -0,0 +1,57 @@ +package libkube + +import ( + "reflect" + "testing" + + discoveryv1 "k8s.io/api/discovery/v1" +) + +func TestAddressType(t *testing.T) { + type args struct { + address string + } + tests := []struct { + name string + args args + want discoveryv1.AddressType + wantErr bool + }{ + { + name: "success case IPv4", + args: args{address: "10.1.1.1"}, + want: discoveryv1.AddressTypeIPv4, + wantErr: false, + }, + { + name: "success case IPv6", + args: args{address: "fe80::857:d999:5316:78d4"}, + want: discoveryv1.AddressTypeIPv6, + wantErr: false, + }, + { + name: "success case FQDN", + args: args{address: "a.domain.local"}, + want: discoveryv1.AddressTypeFQDN, + wantErr: false, + }, + { + name: "error case", + args: args{address: "a.domain.local OR NOT!"}, + want: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := AddressType(tt.args.address) + if (err != nil) != tt.wantErr { + t.Errorf("AddressType() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("AddressType() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/kubehound/libkube/service.go b/pkg/kubehound/libkube/service.go index 401099a7c..b2b554405 100644 --- a/pkg/kubehound/libkube/service.go +++ b/pkg/kubehound/libkube/service.go @@ -1,27 +1,20 @@ package libkube import ( - "strings" + "fmt" "github.com/DataDog/KubeHound/pkg/globals/types" ) +// ServiceName returns the name of the service associated with the provided EndpointSlice. func ServiceName(ep types.EndpointType) string { return ep.Labels["kubernetes.io/service-name"] } -// TODO service DNS name +// ServiceDns provides the DNS name of the service associated with the provided EndpointSlice. func ServiceDns(ep types.EndpointType) string { - var sb strings.Builder + clusterName := "stripe.us1.staging.dog" // TODO dynamic + namespace := ep.Namespace - sb.WriteString(ep.Labels["kubernetes.io/service-name"]) - sb.WriteString(".") - sb.WriteString("stripe.us1.staging.dog") - - return sb.String() + return fmt.Sprintf("%s.%s.%s", ep.Labels["kubernetes.io/service-name"], namespace, clusterName) } - -// // Exposed port from containerPort -// func ExposedPort() int { - -// } diff --git a/pkg/kubehound/models/converter/store.go b/pkg/kubehound/models/converter/store.go index 1b8637d92..1687507d5 100644 --- a/pkg/kubehound/models/converter/store.go +++ b/pkg/kubehound/models/converter/store.go @@ -372,7 +372,7 @@ func (c *StoreConverter) Endpoint(_ context.Context, addr discoveryv1.Endpoint, Id: store.ObjectID(), PodName: addr.TargetRef.Name, PodNamespace: addr.TargetRef.Namespace, - Name: fmt.Sprintf("%s::%s::%s", parent.Name, addr.TargetRef.Name, *port.Name), + Name: fmt.Sprintf("%s::%s::%s", parent.Name, *port.Protocol, *port.Name), HasSlice: true, ServiceName: libkube.ServiceName(parent), ServiceDns: libkube.ServiceDns(parent), @@ -404,21 +404,23 @@ func (c *StoreConverter) Endpoint(_ context.Context, addr discoveryv1.Endpoint, func (c *StoreConverter) EndpointPrivate(_ context.Context, port *corev1.ContainerPort, pod *store.Pod, container *store.Container) (*store.Endpoint, error) { - //portName := port.Name // TODO check this logic - // https: //cylab.be/blog/154/exposing-a-kubernetes-application-service-hostport-nodeport-loadbalancer-or-ingresscontroller + // Derive the address type from the pod IP + podIP := pod.K8.Status.PodIP + addrType, err := libkube.AddressType(podIP) + if err != nil { + return nil, err + } output := &store.Endpoint{ Id: store.ObjectID(), ContainerId: container.Id, PodName: pod.K8.Name, PodNamespace: pod.K8.Namespace, - // ServiceName: fmt.Sprintf("%s::%s::%s", pod.K8.Namespace, pod.K8.Name, port.Name), - Name: fmt.Sprintf("%s::%s::%d", container.K8.Name, pod.K8.Status.HostIP, port.ContainerPort), - NodeName: pod.K8.Spec.NodeName, - AddressType: discoveryv1.AddressTypeIPv4, // TODO figure out address tyoe from inpout + Name: fmt.Sprintf("%s::%s::%s::%d", pod.K8.Namespace, pod.K8.Name, port.Protocol, port.ContainerPort), + NodeName: pod.K8.Spec.NodeName, + AddressType: addrType, Backend: discoveryv1.Endpoint{ - // TODO other fields - Addresses: []string{pod.K8.Status.PodIP}, + Addresses: []string{podIP}, TargetRef: &corev1.ObjectReference{ Kind: pod.K8.Kind, APIVersion: pod.K8.APIVersion, @@ -435,17 +437,6 @@ func (c *StoreConverter) EndpointPrivate(_ context.Context, port *corev1.Contain Port: &port.ContainerPort, }, Ownership: container.Ownership, - - // If created via the pod ingestion pipeline the endpoint does not correspond to a k8s - // endpoint slice and thus is only accessible from within the cluster. - Exposure: shared.EndpointExposureInternal, - } - - // TODO if host port is set we wan to make expore external and set ServiceName / ServiceDns - if port.HostPort != 0 { - output.ServiceName = "TODO" - output.ServiceDns = fmt.Sprintf("%s.%s", output.NodeName, "cluster") - output.Exposure = shared.EndpointExposureExternal } if len(pod.K8.Namespace) != 0 { @@ -453,5 +444,24 @@ func (c *StoreConverter) EndpointPrivate(_ context.Context, port *corev1.Contain output.Namespace = pod.K8.Namespace } + switch { + case len(port.Name) != 0: + output.ServiceName = port.Name + case port.HostPort != 0: + output.ServiceName = fmt.Sprintf("%s::%d", port.Protocol, port.HostPort) + default: + output.ServiceName = fmt.Sprintf("%s::%d", port.Protocol, port.ContainerPort) + } + + if port.HostPort != 0 { + // With a host port field, endpoint is only accessible from the node IP + output.Exposure = shared.EndpointExposureNodeIP + + // TODO future improvement - consider providing the node address as a backend here + } else { + // Without a host port field, endpoint is only accessible from within the cluster on the node IP + output.Exposure = shared.EndpointExposureClusterIP + } + return output, nil } diff --git a/pkg/kubehound/models/shared/constants.go b/pkg/kubehound/models/shared/constants.go index e9a193439..a128659c8 100644 --- a/pkg/kubehound/models/shared/constants.go +++ b/pkg/kubehound/models/shared/constants.go @@ -22,8 +22,9 @@ const ( type EndpointExposureType int const ( - EndpointExposureNone EndpointExposureType = iota - EndpointExposureInternal // Container port exposed to cluster - EndpointExposureExternal // Kubernetes endpoint exposed outside the cluster - EndpointExposurePublic // External DNS API endpoint + EndpointExposureNone EndpointExposureType = iota + EndpointExposureClusterIP // Container port exposed to cluster + EndpointExposureNodeIP // Kubernetes endpoint exposed outside the cluster + EndpointExposureExternal // Kubernetes endpoint exposed outside the cluster + EndpointExposurePublic // External DNS API endpoint ) diff --git a/test/setup/test-cluster/attacks/ENDPOINT_EXPOSE.yaml b/test/setup/test-cluster/attacks/ENDPOINT_EXPOSE.yaml index 4c0ccfa0f..7821a5c48 100644 --- a/test/setup/test-cluster/attacks/ENDPOINT_EXPOSE.yaml +++ b/test/setup/test-cluster/attacks/ENDPOINT_EXPOSE.yaml @@ -11,6 +11,8 @@ spec: containers: - name: endpoints-pod image: ubuntu + securityContext: + privileged: true command: [ "/bin/sh", "-c", "--" ] args: [ "while true; do sleep 30; done;" ] ports: diff --git a/test/setup/test-cluster/attacks/IDENTITY_ASSUME.yaml b/test/setup/test-cluster/attacks/IDENTITY_ASSUME.yaml index bd402837c..4e2af1cda 100644 --- a/test/setup/test-cluster/attacks/IDENTITY_ASSUME.yaml +++ b/test/setup/test-cluster/attacks/IDENTITY_ASSUME.yaml @@ -14,7 +14,7 @@ metadata: name: node-api-access roleRef: apiGroup: rbac.authorization.k8s.io - kind: Role + kind: ClusterRole name: node-api-access subjects: - kind: Group diff --git a/test/setup/test-cluster/attacks/TOKEN_VAR_LOG_SYMLINK.yaml b/test/setup/test-cluster/attacks/TOKEN_VAR_LOG_SYMLINK.yaml index d31d643ee..da0f221ac 100644 --- a/test/setup/test-cluster/attacks/TOKEN_VAR_LOG_SYMLINK.yaml +++ b/test/setup/test-cluster/attacks/TOKEN_VAR_LOG_SYMLINK.yaml @@ -4,7 +4,6 @@ kind: ServiceAccount metadata: name: varlog-sa namespace: default -automountServiceAccountToken: true --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/test/system/graph_edge_test.go b/test/system/graph_edge_test.go index 7b8782532..a4532ed8a 100644 --- a/test/system/graph_edge_test.go +++ b/test/system/graph_edge_test.go @@ -563,11 +563,11 @@ func (suite *EdgeTestSuite) TestEdge_EXPLOIT_HOST_TRAVERSE() { } } -func (suite *EdgeTestSuite) TestEdge_ENDPOINT_EXPOSE_Internal() { +func (suite *EdgeTestSuite) TestEdge_ENDPOINT_EXPOSE_ContainerPort() { results, err := suite.g.V(). HasLabel("Endpoint"). Where( - __.Has("access", P.Eq(int(shared.EndpointExposureInternal))). + __.Has("exposure", P.Eq(int(shared.EndpointExposureClusterIP))). OutE("ENDPOINT_EXPOSE"). InV(). HasLabel("Container")). @@ -579,7 +579,29 @@ func (suite *EdgeTestSuite) TestEdge_ENDPOINT_EXPOSE_Internal() { paths := suite.resultsToStringArray(results) expected := []string{ - "default::endpoints-pod::jmx", + "jmx", + } + + suite.Subset(paths, expected) +} + +func (suite *EdgeTestSuite) TestEdge_ENDPOINT_EXPOSE_NodePort() { + results, err := suite.g.V(). + HasLabel("Endpoint"). + Where( + __.Has("exposure", P.Eq(int(shared.EndpointExposureNodeIP))). + OutE("ENDPOINT_EXPOSE"). + InV(). + HasLabel("Container")). + Values("serviceEndpoint"). + ToList() + + suite.NoError(err) + suite.GreaterOrEqual(len(results), 1) + + paths := suite.resultsToStringArray(results) + expected := []string{ + "host-port-svc", } suite.Subset(paths, expected) @@ -589,7 +611,7 @@ func (suite *EdgeTestSuite) TestEdge_ENDPOINT_EXPOSE_External() { results, err := suite.g.V(). HasLabel("Endpoint"). Where( - __.Has("access", P.Eq(int(shared.EndpointExposureExternal))). + __.Has("exposure", P.Eq(int(shared.EndpointExposureExternal))). OutE("ENDPOINT_EXPOSE"). InV(). HasLabel("Container")). diff --git a/test/system/vertex.gen.go b/test/system/vertex.gen.go index 7b62c6693..5fb0e1439 100644 --- a/test/system/vertex.gen.go +++ b/test/system/vertex.gen.go @@ -1,5 +1,5 @@ // PLEASE DO NOT EDIT -// THIS HAS BEEN GENERATED AUTOMATICALLY on 2023-08-02 14:17 +// THIS HAS BEEN GENERATED AUTOMATICALLY on 2023-08-03 15:20 // // Generate it with "go generate ./..." //