From dcd174c8437c2eb2eae81a1bd35d57bd8908b4f6 Mon Sep 17 00:00:00 2001 From: Brij Jhala Date: Sun, 8 Nov 2020 09:46:30 -0500 Subject: [PATCH] This commit includes docker image for Openshift k8 platform including probe, SSL changes. Hope this helps community. Its prerequisite to clone cortex binary in /cortex folder and cortex-analyzers to be cloned to /cortex/Cortex-Analyzers folder. Date :Novermber 9th, 2020 --- docker/cortex/ocp/Dockerfile | 26 ++++++ docker/cortex/ocp/README.md | 10 +++ docker/cortex/ocp/application.conf | 77 +++++++++++++++++ docker/cortex/ocp/cortex/probe.sh | 27 ++++++ docker/cortex/ocp/cortex/store.sh | 11 +++ docker/cortex/ocp/entrypoint | 130 +++++++++++++++++++++++++++++ 6 files changed, 281 insertions(+) create mode 100644 docker/cortex/ocp/Dockerfile create mode 100644 docker/cortex/ocp/README.md create mode 100644 docker/cortex/ocp/application.conf create mode 100644 docker/cortex/ocp/cortex/probe.sh create mode 100644 docker/cortex/ocp/cortex/store.sh create mode 100644 docker/cortex/ocp/entrypoint diff --git a/docker/cortex/ocp/Dockerfile b/docker/cortex/ocp/Dockerfile new file mode 100644 index 000000000..8680c00b9 --- /dev/null +++ b/docker/cortex/ocp/Dockerfile @@ -0,0 +1,26 @@ +FROM registry.connect.redhat.com/ibm/ibmjava8-sdk-ubi8:latest +USER 0 + +RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +RUN yum -y install autoconf automake binutils gcc gcc-c++ glibc-devel libtool make pkgconf pkgconf-m4 pkgconf-pkg-config python2-pip python3-pip ssdeep perl-Image-ExifTool file-libs openssl-devel +RUN yum -y install git +RUN yum -y install python3-devel +RUN yum -y install procps + +COPY cortex /cortex +COPY cortex-analyzers /Cortex-Analyzers +COPY application.conf /cortex/application.conf +ADD entrypoint /cortex/ + +RUN chgrp -R 0 /cortex && \ + chmod -R g=u /cortex + +RUN chmod 755 /cortex/probe.sh + +RUN for requirement in $(ls -1 /Cortex-Analyzers/analyzers/*/requirements.txt); do pip2 install -r ${requirement}; done +RUN for requirement in $(ls -1 /Cortex-Analyzers/analyzers/*/requirements.txt); do pip3 install -r ${requirement}; done + +USER 1001 + +EXPOSE 3000 +ENTRYPOINT ["/cortex/entrypoint"] diff --git a/docker/cortex/ocp/README.md b/docker/cortex/ocp/README.md new file mode 100644 index 000000000..bb44ea3a9 --- /dev/null +++ b/docker/cortex/ocp/README.md @@ -0,0 +1,10 @@ +# OCP/ RHEL Cortex Image +Cortex docker image for Openshift platform 4.4 compitable on RHEL OS. It also includes HTTPS, probe changes. + + - prerequisite to clone cortex binary to /cortex folder. cortex-analyzers to be cloned in /cortex/Cortex-Analyzers directory. + - application.conf parameters as well as entrypoint parameters are defined as env variable in k8 deployment. Secrets are also defined in k8 secret and loaded in application.conf + - In init container of cortex is up and running. It requires store.sh which essentially added cert for truststore and keystore. + - cortex : migrate endpoint bootstrap cortex : `/api/maintenance/migrate` and create super user and super user admin api key + - once cortex is starts up and running. probe.sh will keep checking list of organization. which is liveness probe + + diff --git a/docker/cortex/ocp/application.conf b/docker/cortex/ocp/application.conf new file mode 100644 index 000000000..f6786ee71 --- /dev/null +++ b/docker/cortex/ocp/application.conf @@ -0,0 +1,77 @@ +#secret key +# ~~~~~ +# The secret key is used to secure cryptographics functions. +# If you deploy your application to several instances be sure to use the same key! + +play.http.secret.key="xxx" +#play.server.provider = play.core.server.AkkaHttpServerProvider +http2.enabled=no +#auth.method.basic = false +#auth.method.key=true +#auth.method.init=true +#http.port=disabled +cache.job = 20 minutes + +https.port: 3000 + +# Passed into SSLContext.getInstance() +play.ws.ssl.protocol = ${TLSVERSION} + +play.server.https.keyStore { + path: ${KEYSTORE} + type: ${KEYSTORE_TYPE} + password: ${STORE_PASSWORD} +} + +analyzer { + # Directory that holds analyzers + path = [ + "/Cortex-Analyzers/analyzers" + ] + fork-join-executor { + # Min number of threads available for analyze + parallelism-min = 8 + # Parallelism (threads) ... ceil(available processors * factor) + # 4.0 + parallelism-factor = 4.0 + # Max number of threads available for analyze + parallelism-max = 64 + } +} +responder { + # Directory that holds responders + path = [ + "/Cortex-Analyzers/responders ", + "/path/to/my/own/responder" + ] + fork-join-executor { + # Min number of threads available for analyze + parallelism-min = 2 + # Parallelism (threads) ... ceil(available processors * factor) + parallelism-factor = 2.0 + # Max number of threads available for analyze + parallelism-max = 4 + } +} + +## ElasticSearch + +search { + # Name of the index + index = cortex + # ElasticSearch instance address. + uri = ${ES_URL} + cluster = "hive" + ## Authentication configuration + user = ${ELASTIC_USERNAME} + password = ${ELASTIC_PASSWORD} + + ## SSL configuration + keyStore.path = ${KEYSTORE} + keyStore.type = ${KEYSTORE_TYPE} + keyStore.password = ${STORE_PASSWORD} + trustStore.path = ${TRUSTSTORE} + trustStore.type = ${TRUSTSTORE_TYPE} # or PKCS12 + trustStore.password = ${STORE_PASSWORD} + +} diff --git a/docker/cortex/ocp/cortex/probe.sh b/docker/cortex/ocp/cortex/probe.sh new file mode 100644 index 000000000..5e88a85fd --- /dev/null +++ b/docker/cortex/ocp/cortex/probe.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# fail should be called as a last resort to help the user to understand why the probe failed +function fail { + timestamp=$(date --iso-8601=seconds) + echo "{\"timestamp\": \"${timestamp}\", \"message\": \"Liveness probe failed\", "$1"}" | tee /proc/1/fd/2 2> /dev/null + exit 1 +} + +READINESS_PROBE_TIMEOUT=${READINESS_PROBE_TIMEOUT:=60} +ENDPOINT="https://cortex:3000/api/organization" +status=$(curl -o /dev/null -w "%{http_code}" --max-time ${READINESS_PROBE_TIMEOUT} -XGET -s -k -H "Authorization: Bearer ${CORTEX_API_KEY}" $ENDPOINT) +curl_rc=$? + +echo $STATUS + +if [[ ${curl_rc} -ne 0 ]]; then + fail "\"curl_rc\": \"${curl_rc}\"" +fi + +# ready if status code 200 +if [[ ${status} == "200" ]]; then + exit 0 +else + fail " \"status\": \"${status}\" " +fi +# end of cortex readiness and liveness check diff --git a/docker/cortex/ocp/cortex/store.sh b/docker/cortex/ocp/cortex/store.sh new file mode 100644 index 000000000..d846ea49d --- /dev/null +++ b/docker/cortex/ocp/cortex/store.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# number of certs in the PEM file +openssl pkcs12 -export -inkey $PRIVATE_PEM -in $PUBLIC_PEM -out $KEYSTORE -password pass:$STORE_PASSWORD -certfile $CA_CERT; +CERTS=$(grep 'END CERTIFICATE' $CA_CERT| wc -l) + +for N in $(seq 0 $(($CERTS - 1))); do + ALIAS="${CA_CERT%.*}-$N" + cat $CA_CERT | + awk "n==$N { print }; /END CERTIFICATE/ { n++ }" | + keytool -import -noprompt -keystore $TRUSTSTORE -storepass $STORE_PASSWORD -alias $ALIAS +done diff --git a/docker/cortex/ocp/entrypoint b/docker/cortex/ocp/entrypoint new file mode 100644 index 000000000..d114c1d54 --- /dev/null +++ b/docker/cortex/ocp/entrypoint @@ -0,0 +1,130 @@ +ES_HOSTNAME=elasticsearch +CONFIG_SECRET=1 +CONFIG_ES=1 +CONFIG=1 +CONFIG_FILE=/cortex/application.conf +ANALYZER_PATH=/Cortex-Analyzers/analyzers +ANALYZER_URLS=() +RESPONDER_PATH=/Cortex-Analyzers/responders +RESPONDER_URLS=() +START_DOCKER=0 +SHOW_SECRET=0 +ES_CORTEX_URL=$ES_URL + +if [ -z "$ES_CORTEX_URL" ]; then + echo "ES_URL environment is not defined or set" + +else + echo "ES_URL configured using elasticsearch uri: $ES_CORTEX_URL" +fi + +function usage { + cat <<- _EOF_ + Available options: + --no-config | do not try to configure TheHive (add secret and elasticsearch) + --no-config-secret | do not add random secret to configuration + --no-config-es | do not add elasticsearch hosts to configuration + --es-uri | use this string to configure elasticsearch hosts (format: http(s)://host:port,host:port(/prefix)?querystring) + --es-hostname | resolve this hostname to find elasticseach instances + --secret | secret to secure sessions + --show-secret | show the generated secret + --analyzer-url | where analyzers are located (url or path) + --responder-url | where responders are located (url or path) + --start-docker | start a internal docker (inside container) to run analyzers/responders + _EOF_ + exit 1 +} + +STOP=0 +while test $# -gt 0 -o $STOP = 1 +do + case "$1" in + "--no-config") CONFIG=0;; + "--no-config-secret") CONFIG_SECRET=0;; + "--no-config-es") CONFIG_ES=0;; + "--es-hosts") echo "--es-hosts is deprecated, please use --es-uri" + usage;; + "--es-uri") shift; ES_URI=$1;; + "--es-hostname") shift; ES_HOSTNAME=$1;; + "--secret") shift; SECRET=$1;; + "--show-secret") SHOW_SECRET=1;; + "--analyzer-path") shift; ANALYZER_PATH=$1;; + "--responder-path") shift; RESPONDER_PATH=$1;; + "--analyzer-url") shift; ANALYZER_URLS+=$1;; + "--responder-url") shift; RESPONDER_URLS+=$1;; + "--start-docker") START_DOCKER=1;; + "--") STOP=1;; + *) echo "unrecognized option: $1"; usage;; + esac + shift +done + + +if test $CONFIG = 1 +then + CONFIG_FILE=$(mktemp).conf + if test $CONFIG_SECRET = 1 + then + if test -z "$SECRET" + then + SECRET=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1) + test $SHOW_SECRET = 1 && echo Using secret: $SECRET + fi + echo play.http.secret.key=\"$SECRET\" >> $CONFIG_FILE + fi + + if test $CONFIG_ES = 1 + then + if test -z "$ES_URI" + then + function join_es_hosts { + echo -n $1:9200 + shift + printf "%s," "${@/#/:9200}" + } + + ES=$(getent ahostsv4 $ES_HOSTNAME | awk '{ print $1 }' | sort -u) + if test -z "$ES" + then + echo "Warning automatic elasticsearch host config fails" + else + ES_URI=http://$(join_es_hosts $ES) + fi + fi + if test -n "$ES_URI" + then + echo Using elasticsearch uri: $ES_URI + echo search.uri=\"$ES_URI\" >> $CONFIG_FILE + else + echo "elasticsearch host not configured as an image argument (Ignore if ES_URL configured)" + fi + fi + + function join_urls { + echo -n \"$1\" + shift + for U do echo -n ,\"$U\"; done +# printf ",\"%s\"" $@ + } + test ${#ANALYZER_URLS} = 0 && ANALYZER_URLS+=$ANALYZER_PATH + test ${#RESPONDER_URLS} = 0 && RESPONDER_URLS+=$RESPONDER_PATH + + echo analyzer.urls=\[$(join_urls ${ANALYZER_URLS[@]})\] >> $CONFIG_FILE + echo responder.urls=\[$(join_urls ${RESPONDER_URLS[@]})\] >> $CONFIG_FILE + + echo 'include file("/cortex/application.conf")' >> $CONFIG_FILE +fi + + +echo config file is: +cat $CONFIG_FILE + + +echo "XMS and XMS defined : $XMX and $XMS" +/bin/sh -c "/cortex/bin/cortex \ + -Dconfig.file=$CONFIG_FILE \ + -J-$XMX -J-$XMS \ + -Dlogger.file=/cortex/conf/logback.xml \ + -Dpidfile.path=/dev/null \ + $@" daemon +