Skip to content

Commit

Permalink
add nessus e2e test (#259)
Browse files Browse the repository at this point in the history
* fork py-nessus-pro

* wip: add nessus e2e test

* lint

* fix unit test

* fixup

* better cleanup

* add secret copying step

* fixup

* debug nessus tests

* move delay

* more debug nessus

* decrease nessus cpu

* decrease nessus cpu more

* try simplifying tekton files

* fixup tekton

* fix linting

* rm e2e-tests/__init__.py

* try and fix tekton

* try and fix tekton

* define integration tasks inline

* update comment
  • Loading branch information
sfowl authored Nov 19, 2024
1 parent 9b41b2e commit c4f9314
Show file tree
Hide file tree
Showing 12 changed files with 472 additions and 185 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,6 @@ repos:
args:
- --max-line-length=120
- --min-public-methods=0
- --good-names=w,q,f,fp,i,e
- --good-names=o,w,q,f,fp,i,e
- --disable=E0401,W1201,W1203,C0114,C0115,C0116,C0411,W0107,W0511,W0702,R0801,R1705,R1710
language_version: python3
130 changes: 110 additions & 20 deletions .tekton/integration-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ spec:
- name: SNAPSHOT
value: $(params.SNAPSHOT)

- name: provision-eaas-space
- name: provision-eaas-space-nessus
runAfter:
- parse-metadata
taskRef:
Expand All @@ -63,23 +63,57 @@ spec:
- name: PIPELINERUN_UID
value: $(context.pipelineRun.uid)

# XXX not supported to use workspaces in integration tests:
# * https://issues.redhat.com/browse/STONEINTG-895
#
# - name: clone-repository
# runAfter:
# - parse-metadata
# params:
# - name: url
# value: "$(tasks.parse-metadata.results.source-git-url)"
# - name: revision
# value: "$(tasks.parse-metadata.results.source-git-revision)"
# taskRef:
# name: git-clone
# workspaces:
# - name: output
# workspace: source
- name: copy-nessus-secret
runAfter:
- provision-eaas-space-nessus
taskSpec:
steps:
- name: copy-nessus-secret
image: registry.redhat.io/openshift4/ose-cli:latest
env:
- name: KUBECONFIG
value: /tmp/kubeconfig
- name: EAAS_KUBECONFIG_VALUE
valueFrom:
secretKeyRef:
name: $(tasks.provision-eaas-space-nessus.results.secretRef)
key: kubeconfig
workingDir: /workspace
script: |
#!/bin/bash -ex
# initial request will default to in-cluster k8s config
oc whoami
oc get secret sfowler-nessus-pull-secret -o yaml > /tmp/nessus-pull-secret.yaml
sed '/namespace:/d' /tmp/nessus-pull-secret.yaml > /tmp/new-secret.yaml
# second request should use newly provisioned eaas creds + namespace
echo "$EAAS_KUBECONFIG_VALUE" > "$KUBECONFIG"
oc whoami
oc apply -f /tmp/new-secret.yaml
- name: provision-eaas-space
runAfter:
- parse-metadata
taskRef:
resolver: git
params:
- name: url
value: https://github.com/konflux-ci/build-definitions.git
- name: revision
value: main
- name: pathInRepo
value: task/provision-env-with-ephemeral-namespace/0.1/provision-env-with-ephemeral-namespace.yaml
params:
- name: KONFLUXNAMESPACE
value: $(context.pipelineRun.namespace)
- name: PIPELINERUN_NAME
value: $(context.pipelineRun.name)
- name: PIPELINERUN_UID
value: $(context.pipelineRun.uid)

# XXX integrations tests can't reference Tasks in the same PR AFAICT
# so need to repeat them inline, rather than define in a separate file
- name: run-e2e-tests
runAfter:
- provision-eaas-space
Expand All @@ -92,6 +126,59 @@ spec:
description: e2e test results
steps:

# XXX not supported to use workspaces in integration tests
- name: clone-repository
image: quay.io/konflux-ci/git-clone:latest
script: |
git config --global --add safe.directory /workspace
git clone "$(tasks.parse-metadata.results.source-git-url)" /workspace
pushd /workspace
git checkout "$(tasks.parse-metadata.results.source-git-revision)"
- name: test
image: registry.redhat.io/openshift4/ose-cli:latest
env:
- name: KUBECONFIG
value: /tmp/kubeconfig
- name: KUBECONFIG_VALUE
valueFrom:
secretKeyRef:
name: $(tasks.provision-eaas-space.results.secretRef)
key: kubeconfig
- name: RAPIDAST_CLEANUP
value: "false" # namespace will be cleaned up automatically
- name: RAPIDAST_IMAGE
value: $(tasks.parse-metadata.results.component-container-image)
- name: RAPIDAST_SERVICEACCOUNT
value: namespace-manager # created by provision-env-with-ephemeral-namespace
workingDir: /workspace
volumeMounts:
- name: credentials
mountPath: /credentials
script: |
#!/bin/bash -ex
echo "$KUBECONFIG_VALUE" > "$KUBECONFIG"
oc whoami
yum install -y python3.12 git
python3.12 -m ensurepip
pip3 install -r requirements.txt -r requirements-dev.txt
pytest -s e2e-tests/test_integration.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path)
cat $(results.TEST_RESULTS.path)
- name: run-e2e-tests-nessus
runAfter:
- copy-nessus-secret
taskSpec:
volumes:
- name: credentials
emptyDir: {}
results:
- name: TEST_RESULTS
description: e2e test results
steps:

# XXX not supported to use workspaces in integration tests:
# * https://issues.redhat.com/browse/STONEINTG-895
- name: clone-repository
Expand All @@ -110,7 +197,7 @@ spec:
- name: KUBECONFIG_VALUE
valueFrom:
secretKeyRef:
name: $(tasks.provision-eaas-space.results.secretRef)
name: $(tasks.provision-eaas-space-nessus.results.secretRef)
key: kubeconfig
- name: RAPIDAST_CLEANUP
value: "false" # namespace will be cleaned up automatically
Expand All @@ -128,8 +215,11 @@ spec:
echo "$KUBECONFIG_VALUE" > "$KUBECONFIG"
oc whoami
yum install -y python3.12
# XXX temp!
oc get secret sfowler-nessus-pull-secret
yum install -y python3.12 git
python3.12 -m ensurepip
pip3 install -r requirements.txt -r requirements-dev.txt
pytest -s e2e-tests --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path)
pytest -sv e2e-tests/test_nessus.py --json-report --json-report-summary --json-report-file $(results.TEST_RESULTS.path)
cat $(results.TEST_RESULTS.path)
166 changes: 166 additions & 0 deletions e2e-tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import logging
import os
import shutil
import tempfile
import time
from functools import partial

import certifi
from kubernetes import client
from kubernetes import config
from kubernetes import utils
from kubernetes import watch
from kubernetes.client.rest import ApiException

NAMESPACE = os.getenv("RAPIDAST_NAMESPACE", "") # e.g. rapidast--pipeline
SERVICEACCOUNT = os.getenv("RAPIDAST_SERVICEACCOUNT", "pipeline") # name of ServiceAccount used in rapidast pod
RAPIDAST_IMAGE = os.getenv("RAPIDAST_IMAGE", "quay.io/redhatproductsecurity/rapidast:development")
# delete resources created by tests
RAPIDAST_CLEANUP = os.getenv("RAPIDAST_CLEANUP", "True").lower() in ("true", "1", "t", "y", "yes")

MANIFESTS = "e2e-tests/manifests"


# monkeypatch certifi so that internal CAs are trusted
def where():
return os.getenv("REQUESTS_CA_BUNDLE", "/etc/pki/tls/certs/ca-bundle.crt")


certifi.where = where


def wait_until_ready(**kwargs):
corev1 = client.CoreV1Api()
timeout = kwargs.pop("timeout", 120)

start_time = time.time()

while time.time() - start_time < timeout:
time.sleep(2)
try:
pods = corev1.list_namespaced_pod(namespace=NAMESPACE, **kwargs)
except client.ApiException as e:
logging.error(f"Error checking pod status: {e}")
return False

if len(pods.items) != 1:
raise RuntimeError(f"Unexpected number of pods {len(pods.items)} matching: {kwargs}")
pod = pods.items[0]

# Check if pod is ready by looking at conditions
if pod.status.conditions:
for condition in pod.status.conditions:
if condition.type == "Ready":
logging.info(f"{pod.metadata.name} Ready={condition.status}")
if condition.status == "True":
return True
return False


# simulates: $ oc logs -f <pod> | tee <file>
def tee_log(pod_name: str, filename: str):
corev1 = client.CoreV1Api()
w = watch.Watch()
with open(filename, "w", encoding="utf-8") as f:
for e in w.stream(corev1.read_namespaced_pod_log, name=pod_name, namespace=NAMESPACE):
if not isinstance(e, str):
continue # Watch.stream() can yield non-string types
f.write(e + "\n")
print(e)


def render_manifests(input_dir, output_dir):
shutil.copytree(input_dir, output_dir, dirs_exist_ok=True)
logging.info(f"rendering manifests in {output_dir}")
logging.info(f"using serviceaccount {SERVICEACCOUNT}")
# XXX should probably replace this with something like kustomize
for filepath in os.scandir(output_dir):
with open(filepath, "r", encoding="utf-8") as f:
contents = f.read()
contents = contents.replace("${IMAGE}", RAPIDAST_IMAGE)
contents = contents.replace("${SERVICEACCOUNT}", SERVICEACCOUNT)
with open(filepath, "w", encoding="utf-8") as f:
f.write(contents)


def setup_namespace():
global NAMESPACE # pylint: disable=W0603
# only try to create a namespace if env is set
if NAMESPACE == "":
NAMESPACE = get_current_namespace()
else:
create_namespace(NAMESPACE)
logging.info(f"using namespace '{NAMESPACE}'")


def get_current_namespace() -> str:
try:
# Load the kubeconfig
config.load_config()

# Get the kube config object
_, active_context = config.list_kube_config_contexts()

# Return the namespace from current context
if active_context and "namespace" in active_context["context"]:
return active_context["context"]["namespace"]
return "default"

except config.config_exception.ConfigException:
# If running inside a pod
try:
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r", encoding="utf-8") as f:
return f.read().strip()
except FileNotFoundError:
return "default"


def create_namespace(namespace_name: str):
config.load_config()
corev1 = client.CoreV1Api()
try:
corev1.read_namespace(namespace_name)
logging.info(f"namespace {namespace_name} already exists")
except ApiException as e:
if e.status == 404:
logging.info(f"creating namespace {namespace_name}")
namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace_name))
corev1.create_namespace(namespace)
else:
raise e
except Exception as e: # pylint: disable=W0718
logging.error(f"error reading namespace {namespace_name}: {e}")


def new_kclient():
config.load_config()
return client.ApiClient()


class TestBase:
_teardowns = []

@classmethod
def setup_class(cls):
cls.tempdir = tempfile.mkdtemp()
cls.kclient = new_kclient()
render_manifests(MANIFESTS, cls.tempdir)
logging.info(f"testing with image: {RAPIDAST_IMAGE}")
setup_namespace()

@classmethod
def teardown_class(cls):
# TODO teardown should really occur after each test, so the the
# resource count does not grown until quota reached
if RAPIDAST_CLEANUP:
for func in cls._teardowns:
logging.debug(f"calling {func}")
func()
# XXX oobtukbe does not clean up after itself
os.system(f"kubectl delete Task/vulnerable -n {NAMESPACE}")

def create_from_yaml(self, path: str):
# delete resources in teardown method later
self._teardowns.append(partial(os.system, f"kubectl delete -f {path} -n {NAMESPACE}"))
o = utils.create_from_yaml(self.kclient, path, namespace=NAMESPACE, verbose=True)
logging.debug(o)
47 changes: 47 additions & 0 deletions e2e-tests/manifests/nessus-deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nessus
labels:
app: nessus
spec:
replicas: 1
selector:
matchLabels:
app: nessus
template:
metadata:
labels:
app: nessus
spec:
imagePullSecrets:
- name: sfowler-nessus-pull-secret
containers:
- name: nessus
command:
- /opt/nessus/sbin/nessus-service
- --no-root
env:
- name: AUTO_UPDATE
value: "no"
image: quay.io/sfowler/nessus@sha256:5881d6928e52d6c536634aeba0bbb7d5aac2b53e77c17f725e4e5aff0054f772
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8834
readinessProbe:
exec:
command:
- /bin/bash
- -c
- |
#!/bin/bash
# curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["code"] == 200 or sys.exit(1)'
curl -ks https://0.0.0.0:8834/server/status | python3 -c 'import sys, json; json.load(sys.stdin)["detailed_status"]["login_status"] == "allow" or sys.exit(1)'
initialDelaySeconds: 20
periodSeconds: 10
failureThreshold: 32
resources:
limits:
cpu: 1500m
memory: 4Gi
Loading

0 comments on commit c4f9314

Please sign in to comment.