diff --git a/docs/shipping/App360/App360.md b/docs/shipping/App360/App360.md index 66a06f87..9a5d56cb 100644 --- a/docs/shipping/App360/App360.md +++ b/docs/shipping/App360/App360.md @@ -19,6 +19,8 @@ drop_filter: [] [App360](https://docs.logz.io/docs/user-guide/distributed-tracing/spm/) is a high-level monitoring dashboard within Logz.io that enables you to monitor your operations. This integration allows you to configure the OpenTelemetry collector to send data from your OpenTelemetry installation to Logz.io using App360. + + ## Architecture overview This integration is based on OpenTelemetry and includes configuring the OpenTelemetry collector to receive data generated by your application instrumentation and send it to Logz.io using App360 @@ -32,7 +34,9 @@ This integration uses OpenTelemetry Collector Contrib, not the OpenTelemetry Col -## Set up your locally hosted OpenTelemetry installation to send App360 data to Logz.io + + +## Set up your locally hosted OpenTelemetry **Before you begin, you'll need**: @@ -49,14 +53,14 @@ You can either download the OpenTelemetry collector to your local host or run th ##### Download locally -Create a dedicated directory on the host of your application and download the [OpenTelemetry collector](https://github.com/open-telemetry/opentelemetry-collector/releases/tag/cmd%2Fbuilder%2Fv0.73.0) that is relevant to the operating system of your host. +Create a dedicated directory on the host of your application and download the [OpenTelemetry collector](https://github.com/open-telemetry/opentelemetry-collector/releases) that is relevant to the operating system of your host. ##### Run as a Docker container In the same Docker network as your application: ```shell -docker pull otel/opentelemetry-collector-contrib:0.73.0 +docker pull otel/opentelemetry-collector-contrib:0.105.0 ``` :::note @@ -80,33 +84,33 @@ connectors: spanmetrics: aggregation_temporality: AGGREGATION_TEMPORALITY_CUMULATIVE dimensions: - - name: rpc.grpc.status_code - - name: http.method - - name: http.status_code - - name: cloud.provider - - name: cloud.region - - name: db.system - - name: messaging.system - - default: DEV - name: env_id + - name: rpc.grpc.status_code + - name: http.method + - name: http.status_code + - name: cloud.provider + - name: cloud.region + - name: db.system + - name: messaging.system + - default: DEV + name: env_id dimensions_cache_size: 100000 histogram: explicit: buckets: - - 2ms - - 8ms - - 50ms - - 100ms - - 200ms - - 500ms - - 1s - - 5s - - 10s + - 2ms + - 8ms + - 50ms + - 100ms + - 200ms + - 500ms + - 1s + - 5s + - 10s metrics_expiration: 5m resource_metrics_key_attributes: - - service.name - - telemetry.sdk.language - - telemetry.sdk.name + - service.name + - telemetry.sdk.language + - telemetry.sdk.name exporters: logzio/traces: @@ -122,23 +126,15 @@ processors: batch: tail_sampling: policies: - [ - { - name: policy-errors, - type: status_code, - status_code: {status_codes: [ERROR]} - }, - { - name: policy-slow, - type: latency, - latency: {threshold_ms: 1000} - }, - { - name: policy-random-ok, - type: probabilistic, - probabilistic: {sampling_percentage: 10} - } - ] + - name: policy-errors + type: status_code + status_code: {status_codes: [ERROR]} + - name: policy-slow + type: latency + latency: {threshold_ms: 1000} + - name: policy-random-ok + type: probabilistic + probabilistic: {sampling_percentage: 10} metricstransform/metrics-rename: transforms: - include: ^duration.*$$ @@ -150,20 +146,20 @@ processors: new_name: calls_total metricstransform/labels-rename: transforms: - - include: ^latency - action: update - match_type: regexp - operations: - - action: update_label - label: span.name - new_label: operation - - include: ^calls - action: update - match_type: regexp - operations: - - action: update_label - label: span.name - new_label: operation + - include: ^latency + action: update + match_type: regexp + operations: + - action: update_label + label: span.name + new_label: operation + - include: ^calls + action: update + match_type: regexp + operations: + - action: update_label + label: span.name + new_label: operation extensions: pprof: @@ -186,6 +182,7 @@ service: telemetry: logs: level: "debug" + ``` {@include: ../../_include/tracing-shipping/replace-tracing-token.html} @@ -194,7 +191,8 @@ service: {@include: ../../_include/log-shipping/listener-var.html} -By default, this configuration collects all traces that have a span that was completed with an error, all traces that are slower than 1000 ms, and 10% of the rest of the traces. + +By default, this configuration collects all traces that have a span completed with an error, all traces that are slower than 1000 ms, and 10% of the remaining traces. You can add more policy configurations to the processor. For more on this, refer to [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md). @@ -236,7 +234,7 @@ Mount the `config.yaml` as volume to the `docker run` command and run it as foll docker run \ --network host \ -v /config.yaml:/etc/otelcol-contrib/config.yaml \ -otel/opentelemetry-collector-contrib:0.73.0 +otel/opentelemetry-collector-contrib:0.105.0 ``` @@ -257,7 +255,7 @@ docker run \ -p 14268:14268 \ -p 4317:4317 \ -p 55681:55681 \ -otel/opentelemetry-collector-contrib:0.73.0 +otel/opentelemetry-collector-contrib:0.105.0 ```