diff --git a/.env b/.env index 7722d701b1..54413a752c 100644 --- a/.env +++ b/.env @@ -2,14 +2,23 @@ # Images IMAGE_VERSION=1.6.0 -IMAGE_NAME=ghcr.io/open-telemetry/demo +# IMAGE_NAME=ghcr.io/open-telemetry/demo +# IMAGE_NAME=default-route-openshift-image-registry.apps.ocp05.tec.cz.ibm.com/otel-demo/demo +IMAGE_NAME=styblope/otel-demo TRACETEST_IMAGE_VERSION=v0.14.5 +# Instana +# INSTANA_AGENT_HOST=instana-agent.instana-agent # k8s agent service +INSTANA_AGENT_HOST=172.17.0.1 # docker0 interface +INSTANA_AGENT_PORT=42699 + +# Collector # Demo Platform ENV_PLATFORM=local # OpenTelemetry Collector -OTEL_COLLECTOR_HOST=otelcol +# OTEL_COLLECTOR_HOST=otelcol +OTEL_COLLECTOR_HOST=${INSTANA_AGENT_HOST:-otelcol} OTEL_COLLECTOR_PORT_GRPC=4317 OTEL_COLLECTOR_PORT_HTTP=4318 OTEL_EXPORTER_OTLP_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:${OTEL_COLLECTOR_PORT_GRPC} diff --git a/.github/README.md b/.github/README.md new file mode 120000 index 0000000000..acc32c2972 --- /dev/null +++ b/.github/README.md @@ -0,0 +1 @@ +../instana/README.md \ No newline at end of file diff --git a/.github/workflows/custom-manual-release.yml b/.github/workflows/custom-manual-release.yml new file mode 100644 index 0000000000..f75dfddc20 --- /dev/null +++ b/.github/workflows/custom-manual-release.yml @@ -0,0 +1,125 @@ +name: "Custom manual release" + +on: workflow_dispatch + +jobs: + build_and_push_images: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + env: + # RELEASE_VERSION: "${{ github.event.release.tag_name }}" + RELEASE_VERSION: "1.6.0" + DOCKERHUB_REPO: "styblope/otel-demo" + # GHCR_REPO: "styblope/otel-emo" + + strategy: + matrix: + file_tag: + - file: ./src/adservice/Dockerfile + tag_suffix: adservice + context: ./ + - file: ./src/cartservice/src/Dockerfile + tag_suffix: cartservice + context: ./ + - file: ./src/checkoutservice/Dockerfile + tag_suffix: checkoutservice + context: ./ + - file: ./src/currencyservice/Dockerfile + tag_suffix: currencyservice + context: ./src/currencyservice + - file: ./src/emailservice/Dockerfile + tag_suffix: emailservice + context: ./src/emailservice + - file: ./src/featureflagservice/Dockerfile + tag_suffix: featureflagservice + context: ./ + - file: ./src/frontend/Dockerfile + tag_suffix: frontend + context: ./ + - file: ./src/frontendproxy/Dockerfile + tag_suffix: frontendproxy + context: ./ + - file: ./src/loadgenerator/Dockerfile + tag_suffix: loadgenerator + context: ./ + - file: ./src/paymentservice/Dockerfile + tag_suffix: paymentservice + context: ./ + - file: ./src/productcatalogservice/Dockerfile + tag_suffix: productcatalogservice + context: ./ + - file: ./src/quoteservice/Dockerfile + tag_suffix: quoteservice + context: ./ + - file: ./src/shippingservice/Dockerfile + tag_suffix: shippingservice + context: ./ + - file: ./src/recommendationservice/Dockerfile + tag_suffix: recommendationservice + context: ./ + - file: ./src/kafka/Dockerfile + tag_suffix: kafka + context: ./ + - file: ./src/accountingservice/Dockerfile + tag_suffix: accountingservice + context: ./ + - file: ./src/frauddetectionservice/Dockerfile + tag_suffix: frauddetectionservice + context: ./ + - file: ./src/frontend/Dockerfile.cypress + tag_suffix: frontend-tests + context: ./ + - file: ./test/Dockerfile + tag_suffix: integrationTests + context: ./ + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + # - name: Log in to the Container registry + # uses: docker/login-action@v2 + # with: + # registry: ghcr.io + # username: ${{ github.repository_owner }} + # password: ${{ secrets.GITHUB_TOKEN }} + # if: github.event_name != 'pull_request' + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + if: github.event_name != 'pull_request' + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + config-inline: | + [worker.oci] + max-parallelism = 2 + + - name: Matrix Build and push demo images + uses: docker/build-push-action@v3.3.0 + with: + context: ${{ matrix.file_tag.context }} + file: ${{ matrix.file_tag.file }} + platforms: linux/amd64 + push: ${{ github.event_name != 'pull_request' }} + # tags: | + # ${{ env.DOCKERHUB_REPO }}:${{ env.RELEASE_VERSION || 'pr' }}-${{matrix.file_tag.tag_suffix }} + # ${{ env.GHCR_REPO }}:${{ env.RELEASE_VERSION || 'pr' }}-${{ matrix.file_tag.tag_suffix }} + tags: | + ${{ env.DOCKERHUB_REPO }}:${{ env.RELEASE_VERSION || 'pr' }}-${{matrix.file_tag.tag_suffix }} + build-args: | + INSTANA_AGENT_KEY=${{ secrets.INSTANA_AGENT_KEY }} + INSTANA_DOWNLOAD_KEY=${{ secrets.INSTANA_DOWNLOAD_KEY }} + cache-from: type=gha + cache-to: type=gha diff --git a/Makefile b/Makefile index e9340278b5..fda92db382 100644 --- a/Makefile +++ b/Makefile @@ -86,6 +86,9 @@ build-and-push-ghcr: docker compose --env-file .ghcr.env -f docker-compose.yml build docker compose --env-file .ghcr.env -f docker-compose.yml push +.PHONY: push + docker compose --env-file .env -f docker-compose.yml push + .PHONY: build-env-file build-env-file: cp .env .dockerhub.env diff --git a/docker-compose.yml b/docker-compose.yml index d224dad9a4..1fb37729fa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -37,6 +37,9 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=accountingservice + - INSTANA_SERVICE_NAME=accountingservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: otelcol: condition: service_started @@ -68,6 +71,8 @@ services: - OTEL_RESOURCE_ATTRIBUTES - OTEL_LOGS_EXPORTER=otlp - OTEL_SERVICE_NAME=adservice + - INSTANA_SERVICE_NAME=adservice + - OTEL_LOGS_EXPORTER depends_on: otelcol: condition: service_started @@ -96,6 +101,13 @@ services: - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=cartservice - ASPNETCORE_URLS=http://*:${CART_SERVICE_PORT} + - INSTANA_SERVICE_NAME=cartservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} + - DOTNET_STARTUP_HOOKS=/usr/src/app/Instana.Tracing.Core.dll + # - CORECLR_ENABLE_PROFILING=1 + # - CORECLR_PROFILER={cf0d821e-299b-5307-a3d8-b283c03916dd} + # - CORECLR_PROFILER_PATH=/usr/src/app/instana_tracing/CoreProfiler.so depends_on: redis-cart: condition: service_started @@ -132,6 +144,9 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=checkoutservice + - INSTANA_SERVICE_NAME=checkoutservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: cartservice: condition: service_started @@ -173,6 +188,7 @@ services: - CURRENCY_SERVICE_PORT - OTEL_EXPORTER_OTLP_ENDPOINT - OTEL_RESOURCE_ATTRIBUTES=${OTEL_RESOURCE_ATTRIBUTES},service.name=currencyservice # The C++ SDK does not support OTEL_SERVICE_NAME + - INSTANA_SERVICE_NAME=currencyservice depends_on: otelcol: condition: service_started @@ -199,6 +215,9 @@ services: - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4318/v1/traces - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=emailservice + - INSTANA_SERVICE_NAME=emailservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: otelcol: condition: service_started @@ -213,10 +232,10 @@ services: dockerfile: ./src/featureflagservice/Dockerfile cache_from: - ${IMAGE_NAME}:${IMAGE_VERSION}-featureflagservice - deploy: - resources: - limits: - memory: 175M + # deploy: + # resources: + # limits: + # memory: 200M restart: unless-stopped ports: - "${FEATURE_FLAG_SERVICE_PORT}" # Feature Flag Service UI @@ -228,6 +247,7 @@ services: - OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc - OTEL_SERVICE_NAME=featureflagservice - DATABASE_URL=ecto://ffs:ffs@ffs_postgres:5432/ffs + - INSTANA_SERVICE_NAME=featureflagservice healthcheck: test: ["CMD", "curl", "-H", "baggage: synthetic_request=true", "-f", "http://localhost:${FEATURE_FLAG_SERVICE_PORT}"] depends_on: @@ -255,6 +275,7 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=frauddetectionservice + - INSTANA_SERVICE_NAME=frauddetectionservice depends_on: otelcol: condition: service_started @@ -264,6 +285,9 @@ services: # Frontend frontend: + env_file: + - .env + - instana/agent/.env image: ${IMAGE_NAME}:${IMAGE_VERSION}-frontend container_name: frontend build: @@ -295,6 +319,12 @@ services: - PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - WEB_OTEL_SERVICE_NAME=frontend-web + - INSTANA_SERVICE_NAME=frontend + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} + - INSTANA_DISABLE_TRACING=true + - INSTANA_EUM_URL + - INSTANA_EUM_KEY depends_on: adservice: condition: service_started @@ -323,6 +353,9 @@ services: build: context: ./ dockerfile: src/frontendproxy/Dockerfile + args: + - INSTANA_AGENT_KEY=${INSTANA_AGENT_KEY} + - INSTANA_DOWNLOAD_KEY=${INSTANA_DOWNLOAD_KEY} deploy: resources: limits: @@ -345,6 +378,7 @@ services: - OTEL_COLLECTOR_PORT_GRPC - OTEL_COLLECTOR_PORT_HTTP - ENVOY_PORT + - INSTANA_SERVICE_NAME=frontend-proxy depends_on: frontend: condition: service_started @@ -385,6 +419,10 @@ services: - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=loadgenerator - PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + - INSTANA_SERVICE_NAME=loadgenerator + - INSTANA_DISABLE_AUTO_INSTR=true + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: frontend: condition: service_started @@ -412,6 +450,10 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=paymentservice + - INSTANA_SERVICE_NAME=paymentservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} + - INSTANA_DISABLE_TRACING=true depends_on: otelcol: condition: service_started @@ -440,6 +482,9 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=productcatalogservice + - INSTANA_SERVICE_NAME=productcatalogservice + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: otelcol: condition: service_started @@ -467,6 +512,7 @@ services: - QUOTE_SERVICE_PORT - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=quoteservice + - INSTANA_SERVICE_NAME=quoteservice - OTEL_PHP_INTERNAL_METRICS_ENABLED=true depends_on: otelcol: @@ -499,6 +545,11 @@ services: - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=recommendationservice - PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + - INSTANA_SERVICE_NAME=recommendationservice + - AUTOWRAPT_BOOTSTRAP=instana + - INSTANA_DISABLE_AUTO_INSTR=true + - INSTANA_AGENT_HOST=${INSTANA_AGENT_HOST} + - INSTANA_AGENT_PORT=${INSTANA_AGENT_PORT} depends_on: featureflagservice: condition: service_started @@ -530,6 +581,7 @@ services: - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4317/v1/traces - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=shippingservice + - INSTANA_SERVICE_NAME=shippingservice depends_on: otelcol: condition: service_started @@ -552,6 +604,9 @@ services: - POSTGRES_USER=ffs - POSTGRES_DB=ffs - POSTGRES_PASSWORD=ffs + - INSTANA_SERVICE_NAME=ffs_postgres + # command: ["-c", "track_activities=on", "-c", "track_counts=on", "-c", "track_io_timing=on"] + # The above way of passing paramaeters ins't picked up by Instana. Must use config file :( healthcheck: test: ["CMD-SHELL", "pg_isready -d ffs -U ffs"] interval: 10s @@ -579,13 +634,14 @@ services: - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - OTEL_RESOURCE_ATTRIBUTES - OTEL_SERVICE_NAME=kafka - - KAFKA_HEAP_OPTS=-Xmx200m -Xms200m + - INSTANA_SERVICE_NAME=kafka + - KAFKA_HEAP_OPTS=-Xmx400m -Xms400m healthcheck: test: nc -z kafka 9092 - start_period: 10s + start_period: 20s interval: 5s timeout: 10s - retries: 10 + retries: 20 logging: *logging # Redis used by Cart service @@ -600,6 +656,8 @@ services: restart: unless-stopped ports: - "${REDIS_PORT}" + environment: + - INSTANA_SERVICE_NAME=redis-cart logging: *logging @@ -660,12 +718,16 @@ services: - ./src/otelcollector/otelcol-config-extras.yml:/etc/otelcol-config-extras.yml ports: - "4317" # OTLP over gRPC receiver - - "4318" # OTLP over HTTP receiver + - "4318" # OTLP over HTTP receiver + environment: + - HTTP_PROXY=${http_proxy:-} + - HTTPS_PROXY=${https_proxy:-} + - NO_PROXY=jaeger,${no_proxy:-} + - INSTANA_SERVICE_NAME=otelcol + - ENVOY_PORT depends_on: - jaeger logging: *logging - environment: - - ENVOY_PORT # Prometheus prometheus: diff --git a/instana/README.md b/instana/README.md new file mode 100644 index 0000000000..c4aeaf741c --- /dev/null +++ b/instana/README.md @@ -0,0 +1,29 @@ +# OTel logo + @instana OpenTelemetry Demo with Instana + +This repository contains a custom fork of the [OpenTelemetry Astronomy Shop](https://github.com/open-telemetry/opentelemetry-demo) integrated with an Instana backend. The demo showcases native Instana OpenTelemetry data collection, correlation of OpenTelemetry tracing and underlying monitored infrastructure, or example of trace continuity between Instana-native tracing and OpenTelemetry. Details about the implementation are sumamrized in the [OpenTelemetry Demo with Instana](https://community.ibm.com/community/user/instana/blogs/petr-styblo/2022/12/21/opentelementry-demo?CommunityKey=58f324a3-3104-41be-9510-5b7c413cc48f) blog article. + +To learn more about the demo application please refer to the [upstream README](../README.md) and the [official demo documentation](https://opentelemetry.io/docs/demo/) available at the OpenTelemetry project site. + +![otel-demo screenshot](../instana/screenshot.png) + +Custom features include: + +- instrumented demo services to enable Instana infrastructure monitoring +- disabled Instana-native tracing in agent and services configuration +- include ready-to-use agent configuration and docker-compose deployment files (in [instana/agent](../instana/agent) folder) +- provide custom Helm [configuration file](../instana/values-instana-agent.yaml) to deploy the demo in Kubernetes (excluding Instana agent deployment) +- ship pre-built custom demo container images +- replaced OpenTelemetry tracer with Instana tracer on the Envoy Frontend-Proxy service to demonstrate Instana cross-protocol trace continuity support and W3C context propagation +- provisioned Frontend service with EUM JavaScript including code modifications to enable correlation between browser requests and the application backend. +- [amended OpenTelemetry Ecto library](../instana/customizations.md) to support span attributes for correct downstream linking with Postgres database + +## Running the demo + +Instructions on how to run the demo are provided [here](../instana/deploy-build.md). + +## Reporting issues + +If you found a bug, have a suggestion or a question regarding the Instana-specific functionality, please open an issue [here](https://github.com/instana/opentelemetry-demo/issues). Problems related to the core demo application should generally be reported via the [upstream OTel Demo project](https://github.com/open-telemetry/opentelemetry-demo/issues). Please read the [troubleshooting tips](../instana/troubleshooting.md) before you and issue. + +## Contributing +Contributions are welcome - feel free to submit a pull request. You may find useful the upstream [CONTRIBUTING](https://github.com/instana/opentelemetry-demo/blob/main/CONTRIBUTING.md) for general general guidance on setting up a development environment or submitting GitHub PRs. diff --git a/instana/TODO b/instana/TODO new file mode 100644 index 0000000000..28f355255c --- /dev/null +++ b/instana/TODO @@ -0,0 +1,2 @@ +- Describe multiple deployment scenarios using OTel Collector and provide respective Helm configuration files. +- Expand the Troubleshooting guide diff --git a/instana/agent/.gitignore b/instana/agent/.gitignore new file mode 100644 index 0000000000..4c49bd78f1 --- /dev/null +++ b/instana/agent/.gitignore @@ -0,0 +1 @@ +.env diff --git a/instana/agent/configuration.yaml b/instana/agent/configuration.yaml new file mode 100644 index 0000000000..a7d433342e --- /dev/null +++ b/instana/agent/configuration.yaml @@ -0,0 +1,43 @@ +com.instana.plugin.opentelemetry: + # enabled: false # legacy setting, will only enable grpc, defaults to false + grpc: + enabled: true # takes precedence over legacy settings above, defaults to true iff "grpc:" is present + http: + enabled: true # allows to enable http endpoints, defaults to true iff "http:" is present + +com.instana.ignore: + arguments: + # - '-javaagent:/opt/otel/opentelemetry-javaagent-all.jar' + # - '-javaagent:/app/opentelemetry-javaagent.jar' + # - '-javaagent:/tmp/opentelemetry-javaagent.jar' + +com.instana.tracing: + extra-http-headers: + - traceparent + - tracestate + +com.instana.plugin.generic.hardware: + enabled: true + +# otel-demo configurations +com.instana.plugin.postgresql: + user: 'ffs' + password: 'ffs' + database: 'postgres' + +com.instana.plugin.javatrace: + instrumentation: + enabled: false + trace-jvms-with-problematic-agents: true + +com.instana.plugin.python: + autotrace: + enabled: false + +com.instana.plugin.php: + tracing: + enabled: false + +com.instana.plugin.netcore: + tracing: + enabled: false diff --git a/instana/agent/docker-compose.yml b/instana/agent/docker-compose.yml new file mode 100644 index 0000000000..f5fd1af1d8 --- /dev/null +++ b/instana/agent/docker-compose.yml @@ -0,0 +1,39 @@ +version: "3" +services: + instana-agent: + image: icr.io/instana/agent:latest + container_name: instana-agent + pid: "host" + privileged: true + network_mode: host + ipc: host + deploy: + resources: + limits: + memory: 768M + cpus: "1.5" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /dev:/dev + - /sys:/sys + - /var/log:/var/log + - ./configuration.yaml:/opt/instana/agent/etc/instana/configuration.yaml + #- ./com.instana.agent.main.sender.File.cfg:/opt/instana/agent/etc/instana/com.instana.agent.main.sender.File.cfg + environment: + - INSTANA_AGENT_ENDPOINT=${INSTANA_AGENT_ENDPOINT:?No agent endpoint provided} + - INSTANA_AGENT_ENDPOINT_PORT=${INSTANA_AGENT_ENDPOINT_PORT:-443} + - INSTANA_AGENT_KEY=${INSTANA_AGENT_KEY:?No agent key provided} + - INSTANA_DOWNLOAD_KEY=${INSTANA_DOWNLOAD_KEY} + - INSTANA_AGENT_UPDATES_VERSION + - INSTANA_AGENT_ZONE + # - INSTANA_MVN_REPOSITORY_FEATURES_PATH=artifactory/features-internal@id=features@snapshots@snapshotsUpdate=always + # - INSTANA_MVN_REPOSITORY_SHARED_PATH=artifactory/shared@id=shared@snapshots@snapshotsUpdate=always + - INSTANA_AGENT_PROXY_HOST + - INSTANA_AGENT_PROXY_PORT + - INSTANA_REPOSITORY_PROXY_ENABLED=${INSTANA_REPOSITORY_PROXY_ENABLED:-false} + - INSTANA_REPOSITORY_PROXY_HOST + - INSTANA_REPOSITORY_PROXY_PORT + - INSTANA_LOG_LEVEL + - HTTP_PROXY=${http_proxy:-} + - HTTPS_PROXY=${https_proxy:-} + - NO_PROXY=${no_proxy:-} diff --git a/instana/agent/instana-agent.env.template b/instana/agent/instana-agent.env.template new file mode 100644 index 0000000000..68a37184d5 --- /dev/null +++ b/instana/agent/instana-agent.env.template @@ -0,0 +1,21 @@ +# WARNING: DO NOT ENCLOSE THE VALUES IN QUOTES + +# Instana agent configuration +INSTANA_AGENT_KEY= +INSTANA_DOWNLOAD_KEY= +INSTANA_AGENT_ENDPOINT= # endpoint hostname or IP (without http:// prefix) +INSTANA_AGENT_ENDPOINT_PORT=1444 # 443 for SaaS, 1444 for on-prem +INSTANA_AGENT_ZONE=otel-demo + +# Optional HTTP proxy settings +#INSTANA_AGENT_PROXY_HOST= +#INSTANA_AGENT_PROXY_PORT= +#INSTANA_REPOSITORY_PROXY_HOST= +#INSTANA_REPOSITORY_PROXY_PORT= +#INSTANA_REPOSITORY_PROXY_ENABLED=false + +INSTANA_LOG_LEVEL=INFO # INFO, DEBUG, TRACE, ERROR or OFF. + +# EUM settings +INSTANA_EUM_URL= +INSTANA_EUM_KEY= diff --git a/instana/customizations.md b/instana/customizations.md new file mode 100644 index 0000000000..b59c82bc60 --- /dev/null +++ b/instana/customizations.md @@ -0,0 +1,11 @@ +# Demo customizations + +## Link calls from FeatureFlag to Postgres database +**Problem:** FeatureFlag service calls to downstream PostgreSQL database aren't linked. +**Reason:** The reason for the missing downstream database link is that the current v1.0 release of Erlang/Elix [OpentelemetryEcto instrumentation library](https://github.com/open-telemetry/opentelemetry-erlang-contrib/tree/main/instrumentation/opentelemetry_ecto) doesn't yet add the OTel peer attributes `net.peer.host` and `net.peer.port`. These standardized attributes are used by Instana to correlate downstream services. +**Solution:** Although the instrumentation library provides other attributes with the downstream link details, it isn't possible to use plain OTel attributes for creating custom service mapping via [manual service configuration](https://www.ibm.com/docs/en/instana-observability/current?topic=applications-services#link-calls-to-an-existing-database-or-messaging-service-that-is-created-from-a-monitored-infrastructure-entity). Therefore, in order to inject the required attributes into the generated spans it was necessary to modify the OpentelemetryEcto library source and use the custom-built library in place of the default distribution package. + +The [patched](https://github.com/styblope/opentelemetry_ecto/commit/0bc71d465621e6f76d71bc8d6d336011661eb754) OpenTelemetryEcto library is available at https://github.com/styblope/opentelemetry_ecto. The rest of the solution involved changing the FeatureFlag service Elixir code dependencies and building a new custom image. + +## Adding W3C context propagation to Envoy the enable cross-tracer trace continuity +To demonstrate the context propagation across Instana and OTel tracing implementations, we chose to instrument the `frontendproxy` service with the Instana native tracer. The Instana sensor supports W3C propagation headers, which is the default propagation header format used by OpenTelemetry. We use a custom build of the Instana envoy sensor which supports W3C context propagation (public release of the W3C enabled sensor is due soon). diff --git a/instana/deploy-build.md b/instana/deploy-build.md new file mode 100644 index 0000000000..46f5243bda --- /dev/null +++ b/instana/deploy-build.md @@ -0,0 +1,159 @@ +# How to build and deploy the Instana demo + +## Run the demo in Docker + +Clone the repo: +```sh +git clone https://github.com/instana/opentelemetry-demo.git +cd opentelemetry-demo +``` + +#### Deploy Instana agent +Create a new docker-compose environment file with your Instana backend connection and EUM website monitoring settings. The configuration values are also re-used in building and running the demo containers. Use the template: +```sh +cd instana/agent +cp instana-agent.env.template .env +``` + +Run the agent (inside the `instana/agent` directory): +```sh +docker compose up -d +``` + +#### Launch the demo +Refer to the main demo [documentation](https://opentelemetry.io/docs/demo/docker-deployment/). This is basically: +```sh +cd - # move back to the opentelemetry-demo main directory +docker compose up --no-build -d +``` + +> **Notes:** +> - The `--no-build` flag is used to fetch released docker images instead of building from source. Removing the `--no-build` command line option will rebuild all images from source. The image repository is defined in [`.env`](../.env) file. See below for details on building the images. +> - You can configure the pre-injected Instana EUM Javascript in Frontend service by setting and exporting `INSTANA_EUM_URL` and `INSTANA_EUM_KEY` environment variables in the shell before running the demo. +> - You can safely ignore any WARN messages related to `INSTANA_AGENT_KEY` and `INSTANA_DOWNLOAD_KEY` as these are only important when building the demo images from the source. + +> **Tip:** +> You can run the demo in the foreground by omitting the `-d` parameter (`docker compose up`) to get the container logs dumped out to the terminal so you can check for any errors. + +## Deploy in Kubernetes + +Create a namespace/project: +```sh +kubectl create namespace otel-demo + +# or equivalently in OpenShift: +oc new-project otel-demo +``` + +In OpenShift, you must provide sufficient security privileges to allow the demo pods to rune in the namespace under the demo's service account (the service accounht name equals is the same as the Helm release name). +```sh +oc get sa -n otel-demo +oc adm policy -n otel-demo add-scc-to-user anyuid -z my-otel-demo +``` + +Deploy the Instana agent via Helm or using an operator: use a standard installation according to Instana documentation. Apply the demo-specific agent configuration as in [`instana/agent/configuration.yaml`](../instana/agent/configuration.yaml). These settings enable the OpenTelemetry ports, add specific service settings for infrastructure monitoring and suppression of native Instana tracing. + +The demo assumes that an Instana [agent Kubernetes service](https://www.ibm.com/docs/en/instana-observability/current?topic=agents-installing-host-agent-kubernetes#instana-agent-service) `instana-agent` is present in `instana-agent` namespace. The agent service, besides exposing the standard Instana agent API endpoint, also provides the common OTLP endpoint for both gRPC (port 4317) and HTTP (port 4318) protocols across all nodes. Be aware that at time of writing, the HTTP endpoint definition wasn't yet included in the public Instana agent Helm chart (and likely neither in the Operator). You can better create the service manually using the following manifest that is tested to work well with the demo. +```yaml +cat < **Note:** +> We use custom values file ([`values-instana-agent.yaml`](../instana/values-instana-agent.yaml)) with additional settings for the Instana agent to act as the default OTel traces and metrics receiver. There is no need to change the default values except when you want to use Instana website EUM; in this case edit the values file and fill-in the corresponding values for `INSTANA_EUM_URL` and `INSTANA_EUM_KEY` environment variables in the Frontend service component section. Alternatively, you can add these variables later after installation by editing the frontend service deployment (`kubectl edit deploy my-otel-demo-frontend`). + +```sh +cd instana +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +helm repo update +# install the chart using pinned chart version that corresponds to the 1.6 release +helm install my-otel-demo open-telemetry/opentelemetry-demo -f values-instana-agent.yaml --version 0.26.1 +``` + +> **Tip:** +> You may occasionally get stuck with pods in `ImagePullBackOff` state due to reaching DockerHub's pull rate limits. You can increase the limits by authenticating to Docker. Create a new secret in the demo namespace with your Docker credentials and attach the secret to the demo service account: +> ```sh +> kubectl create secret docker-registry my-docker-hub --docker-username --docker-password --docker-server docker.io +> kubectl patch serviceaccount my-otel-demo -p '{"imagePullSecrets": [{"name": "my-docker-hub"}]}' +> ``` + +In OpenShift, you can create a route for the `frontendproxy` service for easy access to the demo frontpage and featureflags services instead of the `kubectl port-forward` way that Helm prompts you after installation. Using TLS terminated route endpoint enables for better [correlation between EUM and backend tracing](https://www.ibm.com/docs/en/instana-observability/current?topic=websites-backend-correlation#backend-correlation). +```sh +oc create route edge my-otel-demo --service my-otel-demo-frontendproxy +``` + +## Build the demo images from source + +This step only applies if you make custom changes in the demo source code and you need to re-build the docker images locally. + +Before building the project, you first need to export your Instana instance `INSTANA_AGENT_KEY` and `INSTANA_DOWNLOAD_KEY` values to your shell environment. You can use the values from the previously configured agent's `.env` file and export them by running: +```sh +set -a +. ./instana/agent/.env +set +a +``` + +Build the demo: +```sh +docker compose build +``` + +If you plan to push the built images to a remote container registry you should specify your registry domain in the `IMAGE_NAME` variable in [`.env`](../.env) file. + +To push the images to a remote registry, login to the registry first (`docker login` or `oc registry login` for OpenShift's internal registry) and run: +```sh +make push +``` + +#### If you are behind an HTTP proxy +Configure the proxy settings for the Docker daemon systemd service according to the [guide](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy). + +Optionally (not needed for building and running `docker compose`), you may [configure your Docker client](https://docs.docker.com/network/proxy/) by adding the following snippet to `~/.docker/config.json` +```json +{ + "proxies": + { + "default": + { + "httpProxy": "http://192.168.31.253:3128", + "httpsProxy": "http://192.168.31.253:3128", + "noProxy": "192.168.0.0/16,.tec.cz.ibm.com,127.0.0.0/8" + } + } +} +``` + +Build the demo with `http_proxy` and `https_proxy` build arguments passed to `docker-compose`: +```sh +docker compose build \ + --build-arg 'https_proxy=http://192.168.31.253:3128' \ + --build-arg 'http_proxy=http://192.168.31.253:3128' +``` diff --git a/instana/screenshot.png b/instana/screenshot.png new file mode 100644 index 0000000000..d2756f2534 Binary files /dev/null and b/instana/screenshot.png differ diff --git a/instana/troubleshooting.md b/instana/troubleshooting.md new file mode 100644 index 0000000000..28c20560a2 --- /dev/null +++ b/instana/troubleshooting.md @@ -0,0 +1,35 @@ +# Troubleshooting tips + +## Kubernetes pods or containers are crashing +If some containers are crashing (CrashLoopBackOff) check the logs first to find any possible clues: +```sh +kubectl logs +``` + +Next, check the pod status: +```sh +kubectl describe +``` + +If the above commands aren't helpful to explain the pod/container crashes try to increase the container memory allocation limits, as this is one of the most common isues in some environments +```sh +kubectl edit deployment +``` + +## Pods are in ImagePullBackOff +This might happen because you've reached your DockerHub pull rate limits. You can increase the limits by authenticating to Docker. Create a new secret in the demo namespace with your Docker credentials and attach the secret to the demo service account: +```sh +kubectl create secret docker-registry my-docker-hub --docker-username --docker-password --docker-server docker.io +kubectl patch serviceaccount my-otel-demo -p '{"imagePullSecrets": [{"name": "my-docker-hub"}]}' +``` + +## No OpenTelemetry traces are received via the agent + +- Make sure the agent configuration includes correct settings to enable OpenTelemetry for both gRPC and HTTP enpoints. The reference configuration is [instana/agent/configuration.yaml](../instana/agent/configuration.yaml) +- Double check the Kubernetes service for Instana agent and the OTLP endpoint environment variables in `.env` or in the Helm values file. + +## Why do I see "Unspecified" service in Instana dependency graph? +Some calls reported by OTel spans don't have a downstream counterpart or correlation hints to classify a service. This is also the case of calls originating at Instana sensors (used for infrastructure monitoring) reporting back to the Instana backend. We can effectively treat these calls as 'synthetic' and suppress the respective endpoints via Services -> Configure Services -> synthetic Endpoints -> "endpoint.name containts com.instana" + +## Website tracing data is not visible in Instana even though I've properly set both INSTANA_EUM URL and INSTANA_EUM_KEY +Your browser may be blocking cross-site content. The resource at “https://instana_eum_url:446/eum/eum.min.js” that is requested during the webpage load was probably blocked by the browser's security policy. Disable the content blocking by adding a tracking exception for the Instana domain (see [here](https://developer.mozilla.org/en-US/docs/Web/Privacy/Firefox_tracking_protection) for Firefox) diff --git a/instana/values-instana-agent.yaml b/instana/values-instana-agent.yaml new file mode 100644 index 0000000000..a2da173fb3 --- /dev/null +++ b/instana/values-instana-agent.yaml @@ -0,0 +1,817 @@ +default: + # list of environment variables applied to all components + env: + - name: OTEL_SERVICE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: "metadata.labels['app.kubernetes.io/component']" + # Instana defaults + - name: INSTANA_SERVICE_NAME + value: $(OTEL_SERVICE_NAME) + - name: INSTANA_AGENT_HOST + # value: instana-agent.instana-agent # Instana agent service . + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: INSTANA_AGENT_PORT + value: "42699" + - name: OTEL_K8S_POD_UID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.uid + - name: OTEL_COLLECTOR_NAME + #value: '{{ include "otel-demo.name" . }}-otelcol' + value: $(INSTANA_AGENT_HOST) + - name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE + value: cumulative + - name: OTEL_RESOURCE_ATTRIBUTES + value: service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo + # Allows overriding and additions to .Values.default.env + envOverrides: [] + # - name: OTEL_K8S_NODE_NAME + # value: "someConstantValue" + image: + repository: styblope/otel-demo + # Overrides the image tag whose default is the chart appVersion. + # The service's name will be applied to the end of this value. + tag: "1.6.0" + pullPolicy: IfNotPresent + pullSecrets: [] + # Default # of replicas for all components + replicas: 1 + # Default schedulingRules for all components + schedulingRules: + nodeSelector: {} + affinity: {} + tolerations: [] + # Default securityContext for all components + securityContext: {} + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +components: + ## Demo Components are named objects (services) with several properties + # demoService: + ## Enable the component (service) + # enabled: true + # useDefault: + ## Use default environment variables + # env: true + ## Override Image repository and Tag. Tag will use appVersion as default. + ## Component's name will be applied to end of this value. + # imageOverride: {} + ## Optional service definitions to apply + # service: + ## Service Type to use for this component. Default is ClusterIP. + # type: ClusterIP + ## Service Port to use to expose this component. Default is nil + # port: 8080 + ## Service Node Port to use to expose this component on a NodePort service. Default is nil + # nodePort: 30080 + ## Service Annotations to add to this component + # annotations: {} + ## Additional service ports to use to expose this component + # ports: + # - name: extraServicePort + # value: 8081 + ## Environment variables to add to the component's pod + # env: + ## Environment variables that upsert (append + merge) into the `env` specification for this component. + # envOverrides: + ## Pod Scheduling rules for nodeSelector, affinity, or tolerations. + # schedulingRules: + # nodeSelector: {} + # affinity: {} + # tolerations: [] + ## Pod Annotations to add to this component + # podAnnotations: {} + ## Resources for this component + # resources: {} + ## Container security context for setting user ID (UID), group ID (GID) and other security policies + # securityContext: + ## Ingresses rules to add for the to the component + # ingress: + ## Enable the creation of Ingress rules. Default is false + # enabled: false + ## Annotations to add to the ingress rule + # annotations: {} + ## Which Ingress class (controller) to use. Default is unspecified. + # ingressClassName: nginx + ## Hosts definitions for the Ingress rule + # hosts: + # - host: demo.example.com + ## Each host can have multiple paths/routes + # paths: + # - path: / + # pathType: Prefix + # port: 8080 + ## Optional TLS specifications for the Ingress rule + # tls: + # - secretName: demo-tls + # hosts: + # - demo.example.com + ## Additional ingresses - only created if ingress.enabled is true + ## Useful for when differently annotated ingress services are required + ## Each additional ingress needs key "name" set to something unique + # additionalIngresses: [] + # - name: extra-demo-ingress + # ingressClassName: nginx + # annotations: {} + # hosts: + # - host: demo.example.com + # paths: + # - path: / + # pathType: Prefix + # port: 8080 + # tls: + # - secretName: demo-tls + # hosts: + # - demo.example.com + # # Command to use in the container spec, in case you don't want to go with the default command from the image. + # command: [] + # # Configuration to for this service; will create a ConfigMap, Volume, and Mount it into the container being spun up/. + # configuration: {} + # # Kubernetes container health check options + # livenessProbe: {} + # # Optional init container to run before the pod starts. + # initContainers: + # - name: + # image: + # command: [list of commands for the init container to run] + # # Replicas for the component + # replicas: 1 + accountingService: + enabled: true + useDefault: + env: true + env: + - name: KAFKA_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-kafka:9092' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 20Mi + initContainers: + - name: wait-for-kafka + image: busybox:latest + command: ['sh', '-c', 'until nc -z -v -w30 {{ include "otel-demo.name" . }}-kafka 9092; do echo waiting for kafka; sleep 2; done;'] + + adService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: AD_SERVICE_PORT + value: "8080" + - name: FEATURE_FLAG_GRPC_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-featureflagservice:50053' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: OTLP_LOGS_EXPORTER + value: otlp + resources: + limits: + memory: 300Mi + + cartService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: CART_SERVICE_PORT + value: "8080" + - name: ASPNETCORE_URLS + value: http://*:$(CART_SERVICE_PORT) + - name: FEATURE_FLAG_GRPC_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-featureflagservice:50053' + - name: REDIS_ADDR + value: '{{ include "otel-demo.name" . }}-redis:6379' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: DOTNET_STARTUP_HOOKS + value: /usr/src/app/Instana.Tracing.Core.dll + # - name: CORECLR_ENABLE_PROFILING + # value: 1 + # - name: CORECLR_PROFILER + # value: '{cf0d821e-299b-5307-a3d8-b283c03916dd}' + # - name: CORECLR_PROFILER_PATH + # value: /usr/src/app/instana_tracing/CoreProfiler.so + resources: + limits: + memory: 160Mi + initContainers: + - name: wait-for-redis + image: busybox:latest + command: ['sh', '-c', 'until nc -z -v -w30 {{ include "otel-demo.name" . }}-redis 6379; do echo waiting for redis; sleep 2; done;'] + + checkoutService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: CHECKOUT_SERVICE_PORT + value: "8080" + - name: CART_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-cartservice:8080' + - name: CURRENCY_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-currencyservice:8080' + - name: EMAIL_SERVICE_ADDR + value: 'http://{{ include "otel-demo.name" . }}-emailservice:8080' + - name: PAYMENT_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-paymentservice:8080' + - name: PRODUCT_CATALOG_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-productcatalogservice:8080' + - name: SHIPPING_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-shippingservice:8080' + - name: KAFKA_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-kafka:9092' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 20Mi + initContainers: + - name: wait-for-kafka + image: busybox:latest + command: ['sh', '-c', 'until nc -z -v -w30 {{ include "otel-demo.name" . }}-kafka 9092; do echo waiting for kafka; sleep 2; done;'] + + currencyService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: CURRENCY_SERVICE_PORT + value: "8080" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 20Mi + + emailService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: EMAIL_SERVICE_PORT + value: "8080" + - name: APP_ENV + value: production + - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4318/v1/traces + resources: + limits: + memory: 100Mi + + featureflagService: + enabled: true + useDefault: + env: true + ports: + - name: grpc + value: 50053 + - name: http + value: 8081 + env: + - name: FEATURE_FLAG_SERVICE_PORT + value: "8081" + - name: FEATURE_FLAG_GRPC_SERVICE_PORT + value: "50053" + - name: DATABASE_URL + value: 'ecto://ffs:ffs@{{ include "otel-demo.name" . }}-ffspostgres:5432/ffs' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + value: grpc + resources: + limits: + memory: 175Mi + livenessProbe: + httpGet: + path: /featureflags/ + port: 8081 + initialDelaySeconds: 30 + periodSeconds: 10 + initContainers: + - name: wait-for-ffspostgres + image: busybox:latest + command: ['sh', '-c', 'until nc -z -v -w30 {{ include "otel-demo.name" . }}-ffspostgres 5432; do echo waiting for ffspostgres; sleep 2; done'] + + frauddetectionService: + enabled: true + useDefault: + env: true + env: + - name: KAFKA_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-kafka:9092' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 200Mi + initContainers: + - name: wait-for-kafka + image: busybox:latest + command: ['sh', '-c', 'until nc -z -v -w30 {{ include "otel-demo.name" . }}-kafka 9092; do echo waiting for kafka; sleep 2; done;'] + + frontend: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: FRONTEND_PORT + value: "8080" + - name: FRONTEND_ADDR + value: :8080 + - name: AD_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-adservice:8080' + - name: CART_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-cartservice:8080' + - name: CHECKOUT_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-checkoutservice:8080' + - name: CURRENCY_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-currencyservice:8080' + - name: PRODUCT_CATALOG_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-productcatalogservice:8080' + - name: RECOMMENDATION_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-recommendationservice:8080' + - name: SHIPPING_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-shippingservice:8080' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: WEB_OTEL_SERVICE_NAME + value: frontend-web + - name: PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: http://localhost:8080/otlp-http/v1/traces # This expects users to use `kubectl port-forward ...` + resources: + limits: + memory: 200Mi + securityContext: + runAsUser: 1001 # nextjs + runAsGroup: 1001 + runAsNonRoot: true + + frontendProxy: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: ENVOY_PORT + value: "8080" + - name: FRONTEND_PORT + value: "8080" + - name: FRONTEND_HOST + value: '{{ include "otel-demo.name" . }}-frontend' + - name: FEATURE_FLAG_SERVICE_PORT + value: "8081" + - name: FEATURE_FLAG_SERVICE_HOST + value: '{{ include "otel-demo.name" . }}-featureflagservice' + - name: LOCUST_WEB_PORT + value: "8089" + - name: LOCUST_WEB_HOST + value: '{{ include "otel-demo.name" . }}-loadgenerator' + - name: GRAFANA_SERVICE_PORT + value: "80" + - name: GRAFANA_SERVICE_HOST + value: '{{ include "otel-demo.name" . }}-grafana' + - name: JAEGER_SERVICE_PORT + value: "16686" + - name: JAEGER_SERVICE_HOST + value: '{{ include "otel-demo.name" . }}-jaeger-query' + - name: OTEL_COLLECTOR_PORT_GRPC + value: "4317" + - name: OTEL_COLLECTOR_PORT_HTTP + value: "4318" + - name: OTEL_COLLECTOR_HOST + value: $(OTEL_COLLECTOR_NAME) + resources: + limits: + memory: 50Mi + securityContext: + runAsUser: 101 # envoy + runAsGroup: 101 + runAsNonRoot: true + + loadgenerator: + enabled: true + useDefault: + env: true + service: + port: 8089 + env: + - name: LOCUST_WEB_PORT + value: "8089" + - name: LOCUST_USERS + value: "10" + - name: LOCUST_SPAWN_RATE + value: "1" + - name: LOCUST_HOST + value: 'http://{{ include "otel-demo.name" . }}-frontendproxy:8080' + - name: LOCUST_HEADLESS + value: "false" + - name: LOCUST_AUTOSTART + value: "true" + - name: PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION + value: python + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 120Mi + + paymentService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: PAYMENT_SERVICE_PORT + value: "8080" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: INSTANA_DISABLE_TRACING + value: "true" + resources: + limits: + memory: 120Mi + securityContext: + runAsUser: 1000 # node + runAsGroup: 1000 + runAsNonRoot: true + + productCatalogService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: PRODUCT_CATALOG_SERVICE_PORT + value: "8080" + - name: FEATURE_FLAG_GRPC_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-featureflagservice:50053' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 20Mi + + quoteService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: QUOTE_SERVICE_PORT + value: "8080" + - name: OTEL_PHP_AUTOLOAD_ENABLED + value: "true" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4318 + resources: + limits: + memory: 40Mi + securityContext: + runAsUser: 33 # www-data + runAsGroup: 33 + runAsNonRoot: true + + recommendationService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: RECOMMENDATION_SERVICE_PORT + value: "8080" + - name: PRODUCT_CATALOG_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-productcatalogservice:8080' + - name: FEATURE_FLAG_GRPC_SERVICE_ADDR + value: '{{ include "otel-demo.name" . }}-featureflagservice:50053' + - name: OTEL_PYTHON_LOG_CORRELATION + value: "true" + - name: PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION + value: python + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: AUTOWRAPT_BOOTSTRAP + value: instana + - name: INSTANA_DISABLE_AUTO_INSTR + value: "true" + resources: + limits: + memory: 500Mi # This is high to enable supporting the recommendationCache feature flag use case + + shippingService: + enabled: true + useDefault: + env: true + service: + port: 8080 + env: + - name: SHIPPING_SERVICE_PORT + value: "8080" + - name: QUOTE_SERVICE_ADDR + value: 'http://{{ include "otel-demo.name" . }}-quoteservice:8080' + - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317/v1/traces + resources: + limits: + memory: 20Mi + + ffsPostgres: + enabled: true + useDefault: + env: true + imageOverride: + repository: "postgres" + tag: "14" + replicas: 1 + ports: + - name: postgres + value: 5432 + env: + - name: POSTGRES_DB + value: ffs + - name: POSTGRES_USER + value: ffs + - name: POSTGRES_PASSWORD + value: ffs + resources: + limits: + memory: 120Mi + securityContext: + runAsUser: 999 # postgres + runAsGroup: 999 + runAsNonRoot: true + + kafka: + enabled: true + useDefault: + env: true + replicas: 1 + ports: + - name: plaintext + value: 9092 + - name: controller + value: 9093 + env: + - name: KAFKA_ADVERTISED_LISTENERS + value: 'PLAINTEXT://{{ include "otel-demo.name" . }}-kafka:9092' + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4317 + - name: KAFKA_HEAP_OPTS + value: "-Xmx200M -Xms200M" + resources: + limits: + memory: 500Mi + securityContext: + runAsUser: 1000 # appuser + runAsGroup: 1000 + runAsNonRoot: true + + redis: + enabled: true + useDefault: + env: true + imageOverride: + repository: "redis" + tag: "alpine" + replicas: 1 + ports: + - name: redis + value: 6379 + resources: + limits: + memory: 20Mi + securityContext: + runAsUser: 999 # redis + runAsGroup: 1000 + runAsNonRoot: true + +opentelemetry-collector: + enabled: false + nameOverride: otelcol + mode: deployment + presets: + kubernetesAttributes: + enabled: true + resources: + limits: + memory: 200Mi + service: + type: ClusterIP + ports: + metrics: + enabled: true + prometheus: + enabled: true + containerPort: 9464 + servicePort: 9464 + protocol: TCP + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9464" + opentelemetry_community_demo: "true" + config: + receivers: + otlp: + protocols: + http: + # Since this collector needs to receive data from the web, enable cors for all origins + # `allowed_origins` can be refined for your deployment domain + cors: + allowed_origins: + - "http://*" + - "https://*" + + exporters: + ## Create an exporter to Jaeger using the standard `otlp` export format + otlp: + endpoint: '{{ include "otel-demo.name" . }}-jaeger-collector:4317' + tls: + insecure: true + # Create an exporter to Prometheus (metrics) + otlphttp/prometheus: + endpoint: 'http://{{ include "otel-demo.name" . }}-prometheus-server:9090/api/v1/otlp' + tls: + insecure: true + + processors: + resource: + attributes: + - key: service.instance.id + from_attribute: k8s.pod.uid + action: insert + filter/ottl: + error_mode: ignore + metrics: + metric: + # FIXME: remove when a Metrics View is implemented in the checkout and productcatalog components + # or when this issue is resolved: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3071 + - 'name == "rpc.server.duration"' + transform: + metric_statements: + - context: metric + statements: + # FIXME: remove when this issue is resolved: https://github.com/open-telemetry/opentelemetry-java/issues/4834 + - set(description, "") where name == "queueSize" + # FIXME: remove when these 2 issues are resolved: + # Java: https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/9478 + # Go: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4301 + - set(description, "") where name == "rpc.server.duration" + # FIXME: remove when this issue is resolved: https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1958 + - set(description, "") where name == "http.client.duration" + + connectors: + spanmetrics: + + service: + pipelines: + traces: + processors: [memory_limiter, resource, batch] + exporters: [otlp, debug, spanmetrics] + metrics: + receivers: [otlp, spanmetrics] + processors: [memory_limiter, filter/ottl, transform, resource, batch] + exporters: [otlphttp/prometheus, debug] + +jaeger: + enabled: false + provisionDataStore: + cassandra: false + allInOne: + enabled: true + args: + - "--memory.max-traces=8000" + - "--query.base-path=/jaeger/ui" + - "--prometheus.server-url=http://{{ include \"otel-demo.name\" . }}-prometheus-server:9090" + - "--prometheus.query.normalize-calls=true" + - "--prometheus.query.normalize-duration=true" + extraEnv: + - name: METRICS_STORAGE_TYPE + value: prometheus + resources: + limits: + memory: 300Mi + storage: + type: none + agent: + enabled: false + collector: + enabled: false + query: + enabled: false + +prometheus: + enabled: false + alertmanager: + enabled: false + configmapReload: + prometheus: + enabled: false + kube-state-metrics: + enabled: false + prometheus-node-exporter: + enabled: false + prometheus-pushgateway: + enabled: false + + server: + extraFlags: + - "enable-feature=exemplar-storage" + - "enable-feature=otlp-write-receiver" + persistentVolume: + enabled: false + service: + servicePort: 9090 + resources: + limits: + memory: 300Mi + + serverFiles: + prometheus.yml: + scrape_configs: [] + +grafana: + enabled: false + grafana.ini: + auth: + disable_login_form: true + auth.anonymous: + enabled: true + org_name: Main Org. + org_role: Admin + server: + root_url: "%(protocol)s://%(domain)s:%(http_port)s/grafana" + serve_from_sub_path: true + adminPassword: admin + datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Prometheus + uid: webstore-metrics + type: prometheus + url: 'http://{{ include "otel-demo.name" . }}-prometheus-server:9090' + editable: true + isDefault: true + jsonData: + exemplarTraceIdDestinations: + - datasourceUid: webstore-traces + name: trace_id + + - url: http://localhost:8080/jaeger/ui/trace/$${__value.raw} + name: trace_id + urlDisplayLabel: View in Jaeger UI + + - name: Jaeger + uid: webstore-traces + type: jaeger + url: 'http://{{ include "otel-demo.name" . }}-jaeger-query:16686/jaeger/ui' + editable: true + isDefault: false + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default + dashboardsConfigMaps: + default: '{{ include "otel-demo.name" . }}-grafana-dashboards' + resources: + limits: + memory: 150Mi diff --git a/src/accountingservice/Dockerfile b/src/accountingservice/Dockerfile index d4c7dc5b24..c15633ab4d 100644 --- a/src/accountingservice/Dockerfile +++ b/src/accountingservice/Dockerfile @@ -6,6 +6,7 @@ FROM golang:1.21.4-alpine AS builder WORKDIR /usr/src/app/ COPY ./src/accountingservice/ ./ +RUN go get github.com/instana/go-sensor RUN go build -o /go/bin/accountingservice/ # ----------------------------------------------------------------------------- diff --git a/src/accountingservice/main.go b/src/accountingservice/main.go index 60f966b06d..7916b3c2d2 100644 --- a/src/accountingservice/main.go +++ b/src/accountingservice/main.go @@ -22,6 +22,7 @@ import ( sdkresource "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" + instana "github.com/instana/go-sensor" "github.com/open-telemetry/opentelemetry-demo/src/accountingservice/kafka" ) @@ -77,6 +78,9 @@ func initTracerProvider() (*sdktrace.TracerProvider, error) { } func main() { + // Instana instrumentation + instana.InitSensor(instana.DefaultOptions()) + tp, err := initTracerProvider() if err != nil { log.Fatal(err) diff --git a/src/adservice/Dockerfile b/src/adservice/Dockerfile index 425960e9c0..b9fe79ae9d 100644 --- a/src/adservice/Dockerfile +++ b/src/adservice/Dockerfile @@ -9,6 +9,9 @@ WORKDIR /usr/src/app/ COPY ./src/adservice/gradlew* ./src/adservice/settings.gradle* ./src/adservice/build.gradle . COPY ./src/adservice/gradle ./gradle +COPY ./src/adservice/http_proxy.sh . +RUN ./http_proxy.sh + RUN ./gradlew RUN ./gradlew downloadRepos diff --git a/src/adservice/http_proxy.sh b/src/adservice/http_proxy.sh new file mode 100755 index 0000000000..ced417c510 --- /dev/null +++ b/src/adservice/http_proxy.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +if [ -n "$http_proxy" ] +then + host=$(echo $http_proxy | awk -F'[/:]' '{print $4}') + port=$(echo $http_proxy | awk -F'[/:]' '{print $5}') + + echo "systemProp.http.proxyHost=$host" >> gradle.properties + echo "systemProp.http.proxyPort=$port" >> gradle.properties +fi + +if [ -n "$https_proxy" ] +then + host=$(echo $https_proxy | awk -F'[/:]' '{print $4}') + port=$(echo $https_proxy | awk -F'[/:]' '{print $5}') + + echo "systemProp.https.proxyHost=$host" >> gradle.properties + echo "systemProp.https.proxyPort=$port" >> gradle.properties +fi diff --git a/src/cartservice/src/Dockerfile b/src/cartservice/src/Dockerfile index 38c39366dc..9470d820ac 100644 --- a/src/cartservice/src/Dockerfile +++ b/src/cartservice/src/Dockerfile @@ -30,13 +30,14 @@ RUN \ RUN \ RUNTIME_IDENTIIFER=linux-musl-x64; \ if [ "$(uname -m)" = "aarch64" ]; then RUNTIME_IDENTIIFER=linux-musl-arm64; fi; \ - dotnet publish ./src/cartservice.csproj -v d -p:PublishSingleFile=true -r $RUNTIME_IDENTIIFER --self-contained true -p:PublishTrimmed=False -p:TrimMode=Link -c Release -o /cartservice --no-restore + dotnet publish ./src/cartservice.csproj -v d -p:PublishSingleFile=false -r $RUNTIME_IDENTIIFER --self-contained true -p:PublishTrimmed=False -p:TrimMode=Link -c Release -o /cartservice --no-restore # ----------------------------------------------------------------------------- # https://mcr.microsoft.com/v2/dotnet/runtime-deps/tags/list FROM mcr.microsoft.com/dotnet/runtime-deps:7.0.4-alpine3.16 +RUN apk add bash WORKDIR /usr/src/app/ COPY --from=builder /cartservice/ ./ diff --git a/src/cartservice/src/cartservice.csproj b/src/cartservice/src/cartservice.csproj index 0274442663..9194b20784 100644 --- a/src/cartservice/src/cartservice.csproj +++ b/src/cartservice/src/cartservice.csproj @@ -18,6 +18,8 @@ + + diff --git a/src/checkoutservice/Dockerfile b/src/checkoutservice/Dockerfile index 1881c5412c..549139f89e 100644 --- a/src/checkoutservice/Dockerfile +++ b/src/checkoutservice/Dockerfile @@ -6,6 +6,7 @@ FROM golang:1.21.4-alpine AS builder WORKDIR /usr/src/app/ COPY ./src/checkoutservice/ ./ +RUN go get github.com/instana/go-sensor RUN go build -o /go/bin/checkoutservice/ # ----------------------------------------------------------------------------- diff --git a/src/checkoutservice/main.go b/src/checkoutservice/main.go index 847514b352..300ee809e5 100644 --- a/src/checkoutservice/main.go +++ b/src/checkoutservice/main.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" + instana "github.com/instana/go-sensor" pb "github.com/open-telemetry/opentelemetry-demo/src/checkoutservice/genproto/oteldemo" "github.com/open-telemetry/opentelemetry-demo/src/checkoutservice/kafka" "github.com/open-telemetry/opentelemetry-demo/src/checkoutservice/money" @@ -133,6 +134,9 @@ type checkoutService struct { } func main() { + // Instana instrumentation + instana.InitSensor(instana.DefaultOptions()) + var port string mustMapEnv(&port, "CHECKOUT_SERVICE_PORT") diff --git a/src/emailservice/Gemfile b/src/emailservice/Gemfile index f4548eab3d..7c9937528c 100644 --- a/src/emailservice/Gemfile +++ b/src/emailservice/Gemfile @@ -10,3 +10,4 @@ gem "sinatra", "~> 3.0" gem "opentelemetry-sdk", "~> 1.2" gem "opentelemetry-exporter-otlp", "~> 0.24" gem "opentelemetry-instrumentation-all", "~> 0.39" +gem "instana" diff --git a/src/emailservice/email_server.rb b/src/emailservice/email_server.rb index ae0291d466..5ea791e015 100644 --- a/src/emailservice/email_server.rb +++ b/src/emailservice/email_server.rb @@ -8,6 +8,7 @@ require "opentelemetry/sdk" require "opentelemetry/exporter/otlp" require "opentelemetry/instrumentation/sinatra" +require "instana" set :port, ENV["EMAIL_SERVICE_PORT"] @@ -15,6 +16,8 @@ c.use "OpenTelemetry::Instrumentation::Sinatra" end +::Instana.config[:tracing][:enabled] = false # default true + post "/send_order_confirmation" do data = JSON.parse(request.body.read, object_class: OpenStruct) diff --git a/src/featureflagservice/Dockerfile b/src/featureflagservice/Dockerfile index f2c369c58a..11c8faf260 100644 --- a/src/featureflagservice/Dockerfile +++ b/src/featureflagservice/Dockerfile @@ -1,4 +1,4 @@ -# Copyright The OpenTelemetry Authors + # Copyright The OpenTelemetry Authors # SPDX-License-Identifier: Apache-2.0 @@ -53,6 +53,10 @@ RUN mkdir config # to ensure any relevant config change will trigger the dependencies # to be re-compiled. COPY ./src/featureflagservice/config/config.exs ./src/featureflagservice/config/${MIX_ENV}.exs config/ + +# patch opentelemetry_ecto to enable Instana downstream correlation +RUN sed -i 's/base_attributes = \%{/&\n "db.system": "postgres",\n "net.peer.port": "5432",\n "net.peer.name": repo.config()[:hostname],/' ./deps/opentelemetry_ecto/lib/opentelemetry_ecto.ex + RUN mix deps.compile COPY ./src/featureflagservice/priv priv diff --git a/src/frauddetectionservice/Dockerfile b/src/frauddetectionservice/Dockerfile index 925728a30c..5bb9c3a7d9 100644 --- a/src/frauddetectionservice/Dockerfile +++ b/src/frauddetectionservice/Dockerfile @@ -8,6 +8,7 @@ WORKDIR /usr/src/app/ COPY ./src/frauddetectionservice/ ./ COPY ./pb/ ./src/main/proto/ +RUN ./http_proxy.sh RUN gradle shadowJar # ----------------------------------------------------------------------------- diff --git a/src/frauddetectionservice/http_proxy.sh b/src/frauddetectionservice/http_proxy.sh new file mode 100755 index 0000000000..ced417c510 --- /dev/null +++ b/src/frauddetectionservice/http_proxy.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +if [ -n "$http_proxy" ] +then + host=$(echo $http_proxy | awk -F'[/:]' '{print $4}') + port=$(echo $http_proxy | awk -F'[/:]' '{print $5}') + + echo "systemProp.http.proxyHost=$host" >> gradle.properties + echo "systemProp.http.proxyPort=$port" >> gradle.properties +fi + +if [ -n "$https_proxy" ] +then + host=$(echo $https_proxy | awk -F'[/:]' '{print $4}') + port=$(echo $https_proxy | awk -F'[/:]' '{print $5}') + + echo "systemProp.https.proxyHost=$host" >> gradle.properties + echo "systemProp.https.proxyPort=$port" >> gradle.properties +fi diff --git a/src/frontend/Dockerfile b/src/frontend/Dockerfile index ca4ea57c1d..deddeef168 100644 --- a/src/frontend/Dockerfile +++ b/src/frontend/Dockerfile @@ -9,6 +9,7 @@ WORKDIR /app COPY ./src/frontend/package*.json ./ RUN npm ci +RUN npm install --save @instana/collector FROM node:18-alpine AS builder RUN apk add --no-cache libc6-compat protobuf-dev protoc @@ -42,5 +43,6 @@ USER nextjs ENV PORT 8080 EXPOSE ${PORT} +ENV NODE_OPTIONS="--require ./node_modules/@instana/collector/src/immediate" ENTRYPOINT npm start diff --git a/src/frontend/middleware.ts b/src/frontend/middleware.ts new file mode 100644 index 0000000000..452ef6469b --- /dev/null +++ b/src/frontend/middleware.ts @@ -0,0 +1,13 @@ +import { NextResponse } from 'next/server' +import type { NextRequest } from 'next/server' + +export function middleware(request: NextRequest) { + + const response = NextResponse.next() + + // set Instana EUM server-timing response header + const traceId = request.headers.get('x-instana-t') || '' + response.headers.set('Server-Timing', `intid;desc=${traceId}`) + + return response +} diff --git a/src/frontend/pages/_app.tsx b/src/frontend/pages/_app.tsx index e5e1523277..0d81b37b5b 100755 --- a/src/frontend/pages/_app.tsx +++ b/src/frontend/pages/_app.tsx @@ -17,11 +17,16 @@ declare global { NEXT_PUBLIC_PLATFORM?: string; NEXT_PUBLIC_OTEL_SERVICE_NAME?: string; NEXT_PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT?: string; + INSTANA_EUM_URL?: string; + INSTANA_EUM_KEY?: string; }; } } -if (typeof window !== 'undefined') { +if (typeof window !== 'undefined' && (window.ENV.INSTANA_EUM_KEY === '' || + window.ENV.INSTANA_EUM_KEY === 'undefined' || + window.ENV.INSTANA_EUM_URL === '' || + window.ENV.INSTANA_EUM_URL === 'undefined')) { const collector = getCookie('otelCollectorUrl')?.toString() || ''; FrontendTracer(collector); } diff --git a/src/frontend/pages/_document.tsx b/src/frontend/pages/_document.tsx index db6dac7e91..e56667aefd 100644 --- a/src/frontend/pages/_document.tsx +++ b/src/frontend/pages/_document.tsx @@ -3,18 +3,22 @@ import Document, { DocumentContext, Html, Head, Main, NextScript } from 'next/document'; import { ServerStyleSheet } from 'styled-components'; +import Script from 'next/script' const { ENV_PLATFORM, WEB_OTEL_SERVICE_NAME, PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT } = process.env; +const { INSTANA_EUM_URL, INSTANA_EUM_KEY } = process.env; const envString = ` window.ENV = { NEXT_PUBLIC_PLATFORM: '${ENV_PLATFORM}', NEXT_PUBLIC_OTEL_SERVICE_NAME: '${WEB_OTEL_SERVICE_NAME}', NEXT_PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}', + INSTANA_EUM_URL: '${INSTANA_EUM_URL}', + INSTANA_EUM_KEY: '${INSTANA_EUM_KEY}', }; `; -export default class MyDocument extends Document<{ envString: string }> { +export default class MyDocument extends Document<{ envString: string, traceId: string }> { static async getInitialProps(ctx: DocumentContext) { const sheet = new ServerStyleSheet(); const originalRenderPage = ctx.renderPage; @@ -26,10 +30,12 @@ export default class MyDocument extends Document<{ envString: string }> { }); const initialProps = await Document.getInitialProps(ctx); + const traceId = ctx.req?.headers['x-instana-t'] || '' return { ...initialProps, styles: [initialProps.styles, sheet.getStyleElement()], envString, + traceId, }; } finally { sheet.seal(); @@ -47,6 +53,16 @@ export default class MyDocument extends Document<{ envString: string }> { rel="stylesheet" /> OTel demo + +