diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6c0d1711c58..86739f645f3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#4154](https://github.com/open-telemetry/opentelemetry-python/pull/4154))
- sdk: Add support for log formatting
([#4137](https://github.com/open-telemetry/opentelemetry-python/pull/4166))
+- sdk: Implementation of exemplars
+ ([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094))
- Implement events sdk
([#4176](https://github.com/open-telemetry/opentelemetry-python/pull/4176))
diff --git a/docs/conf.py b/docs/conf.py
index 3aa7e022e3a..2224eedde5c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -138,6 +138,18 @@
"py:class",
"opentelemetry.proto.collector.logs.v1.logs_service_pb2.ExportLogsServiceRequest",
),
+ (
+ "py:class",
+ "opentelemetry.sdk.metrics._internal.exemplar.exemplar_reservoir.FixedSizeExemplarReservoirABC",
+ ),
+ (
+ "py:class",
+ "opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar",
+ ),
+ (
+ "py:class",
+ "opentelemetry.sdk.metrics._internal.aggregation._Aggregation",
+ ),
]
# Add any paths that contain templates here, relative to this directory.
diff --git a/docs/examples/metrics/reader/README.rst b/docs/examples/metrics/reader/README.rst
index 1751e4bd81f..4822fe77669 100644
--- a/docs/examples/metrics/reader/README.rst
+++ b/docs/examples/metrics/reader/README.rst
@@ -5,6 +5,7 @@ These examples show how to customize the metrics that are output by the SDK usin
* preferred_aggregation.py: Shows how to configure the preferred aggregation for metric instrument types.
* preferred_temporality.py: Shows how to configure the preferred temporality for metric instrument types.
+* preferred_exemplarfilter.py: Shows how to configure the exemplar filter.
The source files of these examples are available :scm_web:`here `.
diff --git a/docs/examples/metrics/reader/preferred_exemplarfilter.py b/docs/examples/metrics/reader/preferred_exemplarfilter.py
new file mode 100644
index 00000000000..fd1e1cccb60
--- /dev/null
+++ b/docs/examples/metrics/reader/preferred_exemplarfilter.py
@@ -0,0 +1,62 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import time
+
+from opentelemetry import trace
+from opentelemetry.metrics import get_meter_provider, set_meter_provider
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics._internal.exemplar import AlwaysOnExemplarFilter
+from opentelemetry.sdk.metrics.export import (
+ ConsoleMetricExporter,
+ PeriodicExportingMetricReader,
+)
+from opentelemetry.sdk.trace import TracerProvider
+
+# Create an ExemplarFilter instance
+# Available values are AlwaysOffExemplarFilter, AlwaysOnExemplarFilter
+# and TraceBasedExemplarFilter.
+# The default value is `TraceBasedExemplarFilter`.
+#
+# You can also use the environment variable `OTEL_METRICS_EXEMPLAR_FILTER`
+# to change the default value.
+#
+# You can also define your own filter by implementing the abstract class
+# `ExemplarFilter`
+exemplar_filter = AlwaysOnExemplarFilter()
+
+exporter = ConsoleMetricExporter()
+
+reader = PeriodicExportingMetricReader(
+ exporter,
+ export_interval_millis=5_000,
+)
+
+# Set up the MeterProvider with the ExemplarFilter
+provider = MeterProvider(
+ metric_readers=[reader],
+ exemplar_filter=exemplar_filter, # Pass the ExemplarFilter to the MeterProvider
+)
+set_meter_provider(provider)
+
+meter = get_meter_provider().get_meter("exemplar-filter-example", "0.1.2")
+counter = meter.create_counter("my-counter")
+
+# Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter`
+# will only store exemplar if a context exists
+trace.set_tracer_provider(TracerProvider())
+tracer = trace.get_tracer(__name__)
+with tracer.start_as_current_span("foo"):
+ for value in range(10):
+ counter.add(value)
+ time.sleep(2.0)
diff --git a/docs/examples/metrics/views/README.rst b/docs/examples/metrics/views/README.rst
index cc9afd97d0c..43f30df693d 100644
--- a/docs/examples/metrics/views/README.rst
+++ b/docs/examples/metrics/views/README.rst
@@ -7,6 +7,7 @@ These examples show how to customize the metrics that are output by the SDK usin
* change_name.py: Shows how to change the name of a metric.
* limit_num_of_attrs.py: Shows how to limit the number of attributes that are output for a metric.
* drop_metrics_from_instrument.py: Shows how to drop measurements from an instrument.
+* change_reservoir_factory.py: Shows how to use your own ``ExemplarReservoir``
The source files of these examples are available :scm_web:`here `.
diff --git a/docs/examples/metrics/views/change_reservoir_factory.py b/docs/examples/metrics/views/change_reservoir_factory.py
new file mode 100644
index 00000000000..8f8c676d036
--- /dev/null
+++ b/docs/examples/metrics/views/change_reservoir_factory.py
@@ -0,0 +1,90 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import time
+from typing import Type
+
+from opentelemetry import trace
+from opentelemetry.metrics import get_meter_provider, set_meter_provider
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics._internal.aggregation import (
+ DefaultAggregation,
+ _Aggregation,
+ _ExplicitBucketHistogramAggregation,
+)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ ExemplarReservoirBuilder,
+ SimpleFixedSizeExemplarReservoir,
+)
+from opentelemetry.sdk.metrics.export import (
+ ConsoleMetricExporter,
+ PeriodicExportingMetricReader,
+)
+from opentelemetry.sdk.metrics.view import View
+from opentelemetry.sdk.trace import TracerProvider
+
+
+# Create a custom reservoir factory with specified parameters
+def custom_reservoir_factory(
+ aggregationType: Type[_Aggregation],
+) -> ExemplarReservoirBuilder:
+ if issubclass(aggregationType, _ExplicitBucketHistogramAggregation):
+ return AlignedHistogramBucketExemplarReservoir
+ else:
+ # Custom reservoir must accept `**kwargs` that may set the `size` for
+ # _ExponentialBucketHistogramAggregation or the `boundaries` for
+ # _ExplicitBucketHistogramAggregation
+ return lambda **kwargs: SimpleFixedSizeExemplarReservoir(
+ size=10,
+ **{k: v for k, v in kwargs.items() if k != "size"},
+ )
+
+
+# Create a view with the custom reservoir factory
+change_reservoir_factory_view = View(
+ instrument_name="my.counter",
+ name="name",
+ aggregation=DefaultAggregation(),
+ exemplar_reservoir_factory=custom_reservoir_factory,
+)
+
+# Use console exporter for the example
+exporter = ConsoleMetricExporter()
+
+# Create a metric reader with stdout exporter
+reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000)
+provider = MeterProvider(
+ metric_readers=[
+ reader,
+ ],
+ views=[
+ change_reservoir_factory_view,
+ ],
+)
+set_meter_provider(provider)
+
+meter = get_meter_provider().get_meter("reservoir-factory-change", "0.1.2")
+
+my_counter = meter.create_counter("my.counter")
+
+# Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter`
+# will only store exemplar if a context exists
+trace.set_tracer_provider(TracerProvider())
+tracer = trace.get_tracer(__name__)
+with tracer.start_as_current_span("foo"):
+ while 1:
+ my_counter.add(random.randint(1, 10))
+ time.sleep(random.random())
diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py
index 0df1983e753..b3c7b98f1db 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py
@@ -28,6 +28,8 @@
)
from opentelemetry.exporter.otlp.proto.common._internal import (
_encode_attributes,
+ _encode_span_id,
+ _encode_trace_id,
)
from opentelemetry.sdk.environment_variables import (
OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE,
@@ -254,6 +256,7 @@ def _encode_metric(metric, pb2_metric):
pt = pb2.NumberDataPoint(
attributes=_encode_attributes(data_point.attributes),
time_unix_nano=data_point.time_unix_nano,
+ exemplars=_encode_exemplars(data_point.exemplars),
)
if isinstance(data_point.value, int):
pt.as_int = data_point.value
@@ -267,6 +270,7 @@ def _encode_metric(metric, pb2_metric):
attributes=_encode_attributes(data_point.attributes),
time_unix_nano=data_point.time_unix_nano,
start_time_unix_nano=data_point.start_time_unix_nano,
+ exemplars=_encode_exemplars(data_point.exemplars),
count=data_point.count,
sum=data_point.sum,
bucket_counts=data_point.bucket_counts,
@@ -285,6 +289,7 @@ def _encode_metric(metric, pb2_metric):
attributes=_encode_attributes(data_point.attributes),
start_time_unix_nano=data_point.start_time_unix_nano,
time_unix_nano=data_point.time_unix_nano,
+ exemplars=_encode_exemplars(data_point.exemplars),
)
if isinstance(data_point.value, int):
pt.as_int = data_point.value
@@ -322,6 +327,7 @@ def _encode_metric(metric, pb2_metric):
attributes=_encode_attributes(data_point.attributes),
time_unix_nano=data_point.time_unix_nano,
start_time_unix_nano=data_point.start_time_unix_nano,
+ exemplars=_encode_exemplars(data_point.exemplars),
count=data_point.count,
sum=data_point.sum,
scale=data_point.scale,
@@ -342,3 +348,35 @@ def _encode_metric(metric, pb2_metric):
"unsupported data type %s",
metric.data.__class__.__name__,
)
+
+
+def _encode_exemplars(sdk_exemplars: list) -> list:
+ """
+ Converts a list of SDK Exemplars into a list of protobuf Exemplars.
+
+ Args:
+ sdk_exemplars (list): The list of exemplars from the OpenTelemetry SDK.
+
+ Returns:
+ list: A list of protobuf exemplars.
+ """
+ pb_exemplars = []
+ for sdk_exemplar in sdk_exemplars:
+ pb_exemplar = pb2.Exemplar(
+ time_unix_nano=sdk_exemplar.time_unix_nano,
+ span_id=_encode_span_id(sdk_exemplar.span_id),
+ trace_id=_encode_trace_id(sdk_exemplar.trace_id),
+ filtered_attributes=_encode_attributes(
+ sdk_exemplar.filtered_attributes
+ ),
+ )
+ # Assign the value based on its type in the SDK exemplar
+ if isinstance(sdk_exemplar.value, float):
+ pb_exemplar.as_double = sdk_exemplar.value
+ elif isinstance(sdk_exemplar.value, int):
+ pb_exemplar.as_int = sdk_exemplar.value
+ else:
+ raise ValueError("Exemplar value must be an int or float")
+ pb_exemplars.append(pb_exemplar)
+
+ return pb_exemplars
diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py
index 1115bb1f191..2250c8b6fdd 100644
--- a/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py
+++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py
@@ -33,6 +33,7 @@
# pylint: disable=unused-import; needed for typing and sphinx
from opentelemetry import metrics
+from opentelemetry.context import Context
from opentelemetry.metrics._internal.observation import Observation
from opentelemetry.util.types import Attributes
@@ -173,6 +174,7 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
pass
@@ -192,8 +194,9 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
- return super().add(amount, attributes=attributes)
+ return super().add(amount, attributes=attributes, context=context)
class _ProxyCounter(_ProxyInstrument[Counter], Counter):
@@ -201,9 +204,10 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
if self._real_instrument:
- self._real_instrument.add(amount, attributes)
+ self._real_instrument.add(amount, attributes, context)
def _create_real_instrument(self, meter: "metrics.Meter") -> Counter:
return meter.create_counter(self._name, self._unit, self._description)
@@ -217,6 +221,7 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
pass
@@ -236,8 +241,9 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
- return super().add(amount, attributes=attributes)
+ return super().add(amount, attributes=attributes, context=context)
class _ProxyUpDownCounter(_ProxyInstrument[UpDownCounter], UpDownCounter):
@@ -245,9 +251,10 @@ def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
if self._real_instrument:
- self._real_instrument.add(amount, attributes)
+ self._real_instrument.add(amount, attributes, context)
def _create_real_instrument(self, meter: "metrics.Meter") -> UpDownCounter:
return meter.create_up_down_counter(
@@ -328,6 +335,7 @@ def record(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
pass
@@ -347,8 +355,9 @@ def record(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
- return super().record(amount, attributes=attributes)
+ return super().record(amount, attributes=attributes, context=context)
class _ProxyHistogram(_ProxyInstrument[Histogram], Histogram):
@@ -356,9 +365,10 @@ def record(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
if self._real_instrument:
- self._real_instrument.record(amount, attributes)
+ self._real_instrument.record(amount, attributes, context)
def _create_real_instrument(self, meter: "metrics.Meter") -> Histogram:
return meter.create_histogram(
@@ -406,6 +416,7 @@ def set(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
pass
@@ -425,8 +436,9 @@ def set(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
- return super().set(amount, attributes=attributes)
+ return super().set(amount, attributes=attributes, context=context)
class _ProxyGauge(
@@ -437,9 +449,10 @@ def set(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
+ context: Optional[Context] = None,
) -> None:
if self._real_instrument:
- self._real_instrument.set(amount, attributes)
+ self._real_instrument.set(amount, attributes, context)
def _create_real_instrument(self, meter: "metrics.Meter") -> Gauge:
return meter.create_gauge(self._name, self._unit, self._description)
diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py
index 7aa24e3342d..ffc254b20a4 100644
--- a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py
+++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py
@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Union
+from typing import Optional, Union
+from opentelemetry.context import Context
from opentelemetry.util.types import Attributes
@@ -25,13 +26,18 @@ class Observation:
Args:
value: The float or int measured value
attributes: The measurement's attributes
+ context: The measurement's context
"""
def __init__(
- self, value: Union[int, float], attributes: Attributes = None
+ self,
+ value: Union[int, float],
+ attributes: Attributes = None,
+ context: Optional[Context] = None,
) -> None:
self._value = value
self._attributes = attributes
+ self._context = context
@property
def value(self) -> Union[float, int]:
@@ -41,12 +47,17 @@ def value(self) -> Union[float, int]:
def attributes(self) -> Attributes:
return self._attributes
+ @property
+ def context(self) -> Optional[Context]:
+ return self._context
+
def __eq__(self, other: object) -> bool:
return (
isinstance(other, Observation)
and self.value == other.value
and self.attributes == other.attributes
+ and self.context == other.context
)
def __repr__(self) -> str:
- return f"Observation(value={self.value}, attributes={self.attributes})"
+ return f"Observation(value={self.value}, attributes={self.attributes}, context={self.context})"
diff --git a/opentelemetry-api/tests/metrics/test_meter_provider.py b/opentelemetry-api/tests/metrics/test_meter_provider.py
index bce530d6caf..8caec848f65 100644
--- a/opentelemetry-api/tests/metrics/test_meter_provider.py
+++ b/opentelemetry-api/tests/metrics/test_meter_provider.py
@@ -279,13 +279,15 @@ def test_proxy_meter(self):
real_gauge.assert_not_called()
proxy_counter.add(amount, attributes=attributes)
- real_counter.add.assert_called_once_with(amount, attributes)
+ real_counter.add.assert_called_once_with(amount, attributes, None)
proxy_updowncounter.add(amount, attributes=attributes)
- real_updowncounter.add.assert_called_once_with(amount, attributes)
+ real_updowncounter.add.assert_called_once_with(
+ amount, attributes, None
+ )
proxy_histogram.record(amount, attributes=attributes)
- real_histogram.record.assert_called_once_with(amount, attributes)
+ real_histogram.record.assert_called_once_with(amount, attributes, None)
proxy_gauge.set(amount, attributes=attributes)
- real_gauge.set.assert_called_once_with(amount, attributes)
+ real_gauge.set.assert_called_once_with(amount, attributes, None)
def test_proxy_meter_with_real_meter(self) -> None:
# Creating new instruments on the _ProxyMeter with a real meter set
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
index a907a289760..b89c08da042 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
@@ -15,6 +15,16 @@
from opentelemetry.sdk.metrics._internal import Meter, MeterProvider
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ AlwaysOffExemplarFilter,
+ AlwaysOnExemplarFilter,
+ Exemplar,
+ ExemplarFilter,
+ ExemplarReservoir,
+ SimpleFixedSizeExemplarReservoir,
+ TraceBasedExemplarFilter,
+)
from opentelemetry.sdk.metrics._internal.instrument import Counter
from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge
from opentelemetry.sdk.metrics._internal.instrument import (
@@ -26,6 +36,12 @@
)
__all__ = [
+ "AlignedHistogramBucketExemplarReservoir",
+ "AlwaysOnExemplarFilter",
+ "AlwaysOffExemplarFilter",
+ "Exemplar",
+ "ExemplarFilter",
+ "ExemplarReservoir",
"Meter",
"MeterProvider",
"MetricsTimeoutError",
@@ -35,5 +51,7 @@
"ObservableCounter",
"ObservableGauge",
"ObservableUpDownCounter",
+ "SimpleFixedSizeExemplarReservoir",
"UpDownCounter",
+ "TraceBasedExemplarFilter",
]
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py
index 9dc95c0edb8..f9ed0280325 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py
@@ -33,8 +33,17 @@
)
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
from opentelemetry.metrics import _Gauge as APIGauge
-from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED
+from opentelemetry.sdk.environment_variables import (
+ OTEL_METRICS_EXEMPLAR_FILTER,
+ OTEL_SDK_DISABLED,
+)
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlwaysOffExemplarFilter,
+ AlwaysOnExemplarFilter,
+ ExemplarFilter,
+ TraceBasedExemplarFilter,
+)
from opentelemetry.sdk.metrics._internal.instrument import (
_Counter,
_Gauge,
@@ -340,6 +349,17 @@ def create_observable_up_down_counter(
return instrument
+def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter:
+ if exemplar_filter == "trace_based":
+ return TraceBasedExemplarFilter()
+ if exemplar_filter == "always_on":
+ return AlwaysOnExemplarFilter()
+ if exemplar_filter == "always_off":
+ return AlwaysOffExemplarFilter()
+ msg = f"Unknown exemplar filter '{exemplar_filter}'."
+ raise ValueError(msg)
+
+
class MeterProvider(APIMeterProvider):
r"""See `opentelemetry.metrics.MeterProvider`.
@@ -380,7 +400,8 @@ def __init__(
metric_readers: Sequence[
"opentelemetry.sdk.metrics.export.MetricReader"
] = (),
- resource: Resource = None,
+ resource: Optional[Resource] = None,
+ exemplar_filter: Optional[ExemplarFilter] = None,
shutdown_on_exit: bool = True,
views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
):
@@ -390,6 +411,12 @@ def __init__(
if resource is None:
resource = Resource.create({})
self._sdk_config = SdkConfiguration(
+ exemplar_filter=(
+ exemplar_filter
+ or _get_exemplar_filter(
+ environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based")
+ )
+ ),
resource=resource,
metric_readers=metric_readers,
views=views,
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
index cba09696129..5431d1fa02d 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
@@ -51,12 +51,20 @@ def __init__(
)
if not isinstance(self._view._aggregation, DefaultAggregation):
self._aggregation = self._view._aggregation._create_aggregation(
- self._instrument, None, 0
+ self._instrument,
+ None,
+ self._view._exemplar_reservoir_factory,
+ 0,
)
else:
self._aggregation = self._instrument_class_aggregation[
self._instrument.__class__
- ]._create_aggregation(self._instrument, None, 0)
+ ]._create_aggregation(
+ self._instrument,
+ None,
+ self._view._exemplar_reservoir_factory,
+ 0,
+ )
def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
# pylint: disable=protected-access
@@ -80,7 +88,9 @@ def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
return result
# pylint: disable=protected-access
- def consume_measurement(self, measurement: Measurement) -> None:
+ def consume_measurement(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
if self._view._attribute_keys is not None:
@@ -106,6 +116,7 @@ def consume_measurement(self, measurement: Measurement) -> None:
self._view._aggregation._create_aggregation(
self._instrument,
attributes,
+ self._view._exemplar_reservoir_factory,
time_ns(),
)
)
@@ -115,11 +126,14 @@ def consume_measurement(self, measurement: Measurement) -> None:
]._create_aggregation(
self._instrument,
attributes,
+ self._view._exemplar_reservoir_factory,
time_ns(),
)
self._attributes_aggregation[aggr_key] = aggregation
- self._attributes_aggregation[aggr_key].aggregate(measurement)
+ self._attributes_aggregation[aggr_key].aggregate(
+ measurement, should_sample_exemplar
+ )
def collect(
self,
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py
index 62ac967bbec..39c967e4c86 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py
@@ -17,10 +17,11 @@
from abc import ABC, abstractmethod
from bisect import bisect_left
from enum import IntEnum
+from functools import partial
from logging import getLogger
from math import inf
from threading import Lock
-from typing import Generic, List, Optional, Sequence, TypeVar
+from typing import Callable, Generic, List, Optional, Sequence, Type, TypeVar
from opentelemetry.metrics import (
Asynchronous,
@@ -34,6 +35,10 @@
UpDownCounter,
_Gauge,
)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ Exemplar,
+ ExemplarReservoirBuilder,
+)
from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import (
Buckets,
)
@@ -80,14 +85,26 @@ class AggregationTemporality(IntEnum):
class _Aggregation(ABC, Generic[_DataPointVarT]):
- def __init__(self, attributes: Attributes):
+ def __init__(
+ self,
+ attributes: Attributes,
+ reservoir_builder: ExemplarReservoirBuilder,
+ ):
self._lock = Lock()
self._attributes = attributes
+ self._reservoir = reservoir_builder()
self._previous_point = None
@abstractmethod
- def aggregate(self, measurement: Measurement) -> None:
- pass
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
+ """Aggregate a measurement.
+
+ Args:
+ measurement: Measurement to aggregate
+ should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not.
+ """
@abstractmethod
def collect(
@@ -97,9 +114,38 @@ def collect(
) -> Optional[_DataPointVarT]:
pass
+ def _collect_exemplars(self) -> Sequence[Exemplar]:
+ """Returns the collected exemplars.
+
+ Returns:
+ The exemplars collected by the reservoir
+ """
+ return self._reservoir.collect(self._attributes)
+
+ def _sample_exemplar(
+ self, measurement: Measurement, should_sample_exemplar: bool
+ ) -> None:
+ """Offer the measurement to the exemplar reservoir for sampling.
+
+ It should be called within the each :ref:`aggregate` call.
+
+ Args:
+ measurement: The new measurement
+ should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not.
+ """
+ if should_sample_exemplar:
+ self._reservoir.offer(
+ measurement.value,
+ measurement.time_unix_nano,
+ measurement.attributes,
+ measurement.context,
+ )
+
class _DropAggregation(_Aggregation):
- def aggregate(self, measurement: Measurement) -> None:
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
pass
def collect(
@@ -117,8 +163,9 @@ def __init__(
instrument_is_monotonic: bool,
instrument_aggregation_temporality: AggregationTemporality,
start_time_unix_nano: int,
+ reservoir_builder: ExemplarReservoirBuilder,
):
- super().__init__(attributes)
+ super().__init__(attributes, reservoir_builder)
self._start_time_unix_nano = start_time_unix_nano
self._instrument_aggregation_temporality = (
@@ -131,13 +178,17 @@ def __init__(
self._previous_collection_start_nano = self._start_time_unix_nano
self._previous_value = 0
- def aggregate(self, measurement: Measurement) -> None:
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
with self._lock:
if self._value is None:
self._value = 0
self._value = self._value + measurement.value
+ self._sample_exemplar(measurement, should_sample_exemplar)
+
def collect(
self,
collection_aggregation_temporality: AggregationTemporality,
@@ -290,6 +341,7 @@ def collect(
return NumberDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=previous_collection_start_nano,
time_unix_nano=collection_start_nano,
value=value,
@@ -302,6 +354,7 @@ def collect(
return NumberDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=self._start_time_unix_nano,
time_unix_nano=collection_start_nano,
value=self._previous_value,
@@ -330,6 +383,7 @@ def collect(
return NumberDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=previous_collection_start_nano,
time_unix_nano=collection_start_nano,
value=result_value,
@@ -337,6 +391,7 @@ def collect(
return NumberDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=self._start_time_unix_nano,
time_unix_nano=collection_start_nano,
value=value,
@@ -344,14 +399,22 @@ def collect(
class _LastValueAggregation(_Aggregation[GaugePoint]):
- def __init__(self, attributes: Attributes):
- super().__init__(attributes)
+ def __init__(
+ self,
+ attributes: Attributes,
+ reservoir_builder: ExemplarReservoirBuilder,
+ ):
+ super().__init__(attributes, reservoir_builder)
self._value = None
- def aggregate(self, measurement: Measurement):
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ):
with self._lock:
self._value = measurement.value
+ self._sample_exemplar(measurement, should_sample_exemplar)
+
def collect(
self,
collection_aggregation_temporality: AggregationTemporality,
@@ -366,8 +429,11 @@ def collect(
value = self._value
self._value = None
+ exemplars = self._collect_exemplars()
+
return NumberDataPoint(
attributes=self._attributes,
+ exemplars=exemplars,
start_time_unix_nano=None,
time_unix_nano=collection_start_nano,
value=value,
@@ -380,6 +446,7 @@ def __init__(
attributes: Attributes,
instrument_aggregation_temporality: AggregationTemporality,
start_time_unix_nano: int,
+ reservoir_builder: ExemplarReservoirBuilder,
boundaries: Sequence[float] = (
0.0,
5.0,
@@ -399,7 +466,12 @@ def __init__(
),
record_min_max: bool = True,
):
- super().__init__(attributes)
+ super().__init__(
+ attributes,
+ reservoir_builder=partial(
+ reservoir_builder, boundaries=boundaries
+ ),
+ )
self._instrument_aggregation_temporality = (
instrument_aggregation_temporality
@@ -423,7 +495,9 @@ def __init__(
def _get_empty_bucket_counts(self) -> List[int]:
return [0] * (len(self._boundaries) + 1)
- def aggregate(self, measurement: Measurement) -> None:
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
with self._lock:
if self._value is None:
@@ -439,6 +513,8 @@ def aggregate(self, measurement: Measurement) -> None:
self._value[bisect_left(self._boundaries, measurement_value)] += 1
+ self._sample_exemplar(measurement, should_sample_exemplar)
+
def collect(
self,
collection_aggregation_temporality: AggregationTemporality,
@@ -482,6 +558,7 @@ def collect(
return HistogramDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=previous_collection_start_nano,
time_unix_nano=collection_start_nano,
count=sum(value),
@@ -511,6 +588,7 @@ def collect(
return HistogramDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=self._start_time_unix_nano,
time_unix_nano=collection_start_nano,
count=sum(self._previous_value),
@@ -540,6 +618,7 @@ class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]):
def __init__(
self,
attributes: Attributes,
+ reservoir_builder: ExemplarReservoirBuilder,
instrument_aggregation_temporality: AggregationTemporality,
start_time_unix_nano: int,
# This is the default maximum number of buckets per positive or
@@ -583,7 +662,12 @@ def __init__(
# _ExplicitBucketHistogramAggregation both size and amount of buckets
# remain constant once it is instantiated).
- super().__init__(attributes)
+ super().__init__(
+ attributes,
+ reservoir_builder=partial(
+ reservoir_builder, size=min(20, max_size)
+ ),
+ )
self._instrument_aggregation_temporality = (
instrument_aggregation_temporality
@@ -614,7 +698,9 @@ def __init__(
self._mapping = self._new_mapping(self._max_scale)
- def aggregate(self, measurement: Measurement) -> None:
+ def aggregate(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
# pylint: disable=too-many-branches,too-many-statements, too-many-locals
with self._lock:
@@ -724,6 +810,8 @@ def aggregate(self, measurement: Measurement) -> None:
# in _ExplicitBucketHistogramAggregation.aggregate
value.increment_bucket(bucket_index)
+ self._sample_exemplar(measurement, should_sample_exemplar)
+
def collect(
self,
collection_aggregation_temporality: AggregationTemporality,
@@ -776,6 +864,7 @@ def collect(
return ExponentialHistogramDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=previous_collection_start_nano,
time_unix_nano=collection_start_nano,
count=count,
@@ -939,6 +1028,7 @@ def collect(
return ExponentialHistogramDataPoint(
attributes=self._attributes,
+ exemplars=self._collect_exemplars(),
start_time_unix_nano=self._start_time_unix_nano,
time_unix_nano=collection_start_nano,
count=self._previous_count,
@@ -1109,6 +1199,9 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
"""Creates an aggregation"""
@@ -1137,6 +1230,9 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
@@ -1144,6 +1240,7 @@ def _create_aggregation(
if isinstance(instrument, Counter):
return _SumAggregation(
attributes,
+ reservoir_builder=reservoir_factory(_SumAggregation),
instrument_is_monotonic=True,
instrument_aggregation_temporality=(
AggregationTemporality.DELTA
@@ -1153,6 +1250,7 @@ def _create_aggregation(
if isinstance(instrument, UpDownCounter):
return _SumAggregation(
attributes,
+ reservoir_builder=reservoir_factory(_SumAggregation),
instrument_is_monotonic=False,
instrument_aggregation_temporality=(
AggregationTemporality.DELTA
@@ -1163,6 +1261,7 @@ def _create_aggregation(
if isinstance(instrument, ObservableCounter):
return _SumAggregation(
attributes,
+ reservoir_builder=reservoir_factory(_SumAggregation),
instrument_is_monotonic=True,
instrument_aggregation_temporality=(
AggregationTemporality.CUMULATIVE
@@ -1173,6 +1272,7 @@ def _create_aggregation(
if isinstance(instrument, ObservableUpDownCounter):
return _SumAggregation(
attributes,
+ reservoir_builder=reservoir_factory(_SumAggregation),
instrument_is_monotonic=False,
instrument_aggregation_temporality=(
AggregationTemporality.CUMULATIVE
@@ -1183,6 +1283,9 @@ def _create_aggregation(
if isinstance(instrument, Histogram):
return _ExplicitBucketHistogramAggregation(
attributes,
+ reservoir_builder=reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
instrument_aggregation_temporality=(
AggregationTemporality.DELTA
),
@@ -1190,10 +1293,16 @@ def _create_aggregation(
)
if isinstance(instrument, ObservableGauge):
- return _LastValueAggregation(attributes)
+ return _LastValueAggregation(
+ attributes,
+ reservoir_builder=reservoir_factory(_LastValueAggregation),
+ )
if isinstance(instrument, _Gauge):
- return _LastValueAggregation(attributes)
+ return _LastValueAggregation(
+ attributes,
+ reservoir_builder=reservoir_factory(_LastValueAggregation),
+ )
# pylint: disable=broad-exception-raised
raise Exception(f"Invalid instrument type {type(instrument)} found")
@@ -1212,6 +1321,9 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
@@ -1225,6 +1337,7 @@ def _create_aggregation(
return _ExponentialBucketHistogramAggregation(
attributes,
+ reservoir_factory(_ExponentialBucketHistogramAggregation),
instrument_aggregation_temporality,
start_time_unix_nano,
max_size=self._max_size,
@@ -1274,6 +1387,9 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
@@ -1289,6 +1405,7 @@ def _create_aggregation(
attributes,
instrument_aggregation_temporality,
start_time_unix_nano,
+ reservoir_factory(_ExplicitBucketHistogramAggregation),
self._boundaries,
self._record_min_max,
)
@@ -1304,6 +1421,9 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
@@ -1320,6 +1440,7 @@ def _create_aggregation(
isinstance(instrument, (Counter, ObservableCounter)),
instrument_aggregation_temporality,
start_time_unix_nano,
+ reservoir_factory(_SumAggregation),
)
@@ -1335,9 +1456,15 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
- return _LastValueAggregation(attributes)
+ return _LastValueAggregation(
+ attributes,
+ reservoir_builder=reservoir_factory(_LastValueAggregation),
+ )
class DropAggregation(Aggregation):
@@ -1347,6 +1474,11 @@ def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
+ reservoir_factory: Callable[
+ [Type[_Aggregation]], ExemplarReservoirBuilder
+ ],
start_time_unix_nano: int,
) -> _Aggregation:
- return _DropAggregation(attributes)
+ return _DropAggregation(
+ attributes, reservoir_factory(_DropAggregation)
+ )
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py
new file mode 100644
index 00000000000..ee93dd18278
--- /dev/null
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py
@@ -0,0 +1,39 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .exemplar import Exemplar
+from .exemplar_filter import (
+ AlwaysOffExemplarFilter,
+ AlwaysOnExemplarFilter,
+ ExemplarFilter,
+ TraceBasedExemplarFilter,
+)
+from .exemplar_reservoir import (
+ AlignedHistogramBucketExemplarReservoir,
+ ExemplarReservoir,
+ ExemplarReservoirBuilder,
+ SimpleFixedSizeExemplarReservoir,
+)
+
+__all__ = [
+ "Exemplar",
+ "ExemplarFilter",
+ "AlwaysOffExemplarFilter",
+ "AlwaysOnExemplarFilter",
+ "TraceBasedExemplarFilter",
+ "AlignedHistogramBucketExemplarReservoir",
+ "ExemplarReservoir",
+ "ExemplarReservoirBuilder",
+ "SimpleFixedSizeExemplarReservoir",
+]
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py
new file mode 100644
index 00000000000..d3199c69abe
--- /dev/null
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py
@@ -0,0 +1,50 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import dataclasses
+from typing import Optional, Union
+
+from opentelemetry.util.types import Attributes
+
+
+@dataclasses.dataclass(frozen=True)
+class Exemplar:
+ """A representation of an exemplar, which is a sample input measurement.
+
+ Exemplars also hold information about the environment when the measurement
+ was recorded, for example the span and trace ID of the active span when the
+ exemplar was recorded.
+
+ Attributes
+ trace_id: (optional) The trace associated with a recording
+ span_id: (optional) The span associated with a recording
+ time_unix_nano: The time of the observation
+ value: The recorded value
+ filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made.
+
+ References:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
+ """
+
+ # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
+ # one will come from napoleon extension and the other from autodoc extension. This
+ # will raise an sphinx error of duplicated object description
+ # See https://github.com/sphinx-doc/sphinx/issues/8664
+
+ filtered_attributes: Attributes
+ value: Union[int, float]
+ time_unix_nano: int
+ span_id: Optional[str] = None
+ trace_id: Optional[str] = None
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py
new file mode 100644
index 00000000000..8961d101efe
--- /dev/null
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py
@@ -0,0 +1,134 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from typing import Union
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.trace.span import INVALID_SPAN
+from opentelemetry.util.types import Attributes
+
+
+class ExemplarFilter(ABC):
+ """``ExemplarFilter`` determines which measurements are eligible for becoming an
+ ``Exemplar``.
+
+ Exemplar filters are used to filter measurements before attempting to store them
+ in a reservoir.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter
+ """
+
+ @abstractmethod
+ def should_sample(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> bool:
+ """Returns whether or not a reservoir should attempt to filter a measurement.
+
+ Args:
+ value: The value of the measurement
+ timestamp: A timestamp that best represents when the measurement was taken
+ attributes: The complete set of measurement attributes
+ context: The Context of the measurement
+ """
+ raise NotImplementedError(
+ "ExemplarFilter.should_sample is not implemented"
+ )
+
+
+class AlwaysOnExemplarFilter(ExemplarFilter):
+ """An ExemplarFilter which makes all measurements eligible for being an Exemplar.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson
+ """
+
+ def should_sample(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> bool:
+ """Returns whether or not a reservoir should attempt to filter a measurement.
+
+ Args:
+ value: The value of the measurement
+ timestamp: A timestamp that best represents when the measurement was taken
+ attributes: The complete set of measurement attributes
+ context: The Context of the measurement
+ """
+ return True
+
+
+class AlwaysOffExemplarFilter(ExemplarFilter):
+ """An ExemplarFilter which makes no measurements eligible for being an Exemplar.
+
+ Using this ExemplarFilter is as good as disabling Exemplar feature.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff
+ """
+
+ def should_sample(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> bool:
+ """Returns whether or not a reservoir should attempt to filter a measurement.
+
+ Args:
+ value: The value of the measurement
+ timestamp: A timestamp that best represents when the measurement was taken
+ attributes: The complete set of measurement attributes
+ context: The Context of the measurement
+ """
+ return False
+
+
+class TraceBasedExemplarFilter(ExemplarFilter):
+ """An ExemplarFilter which makes those measurements eligible for being an Exemplar,
+ which are recorded in the context of a sampled parent span.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased
+ """
+
+ def should_sample(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> bool:
+ """Returns whether or not a reservoir should attempt to filter a measurement.
+
+ Args:
+ value: The value of the measurement
+ timestamp: A timestamp that best represents when the measurement was taken
+ attributes: The complete set of measurement attributes
+ context: The Context of the measurement
+ """
+ span = trace.get_current_span(context)
+ if span == INVALID_SPAN:
+ return False
+ return span.get_span_context().trace_flags.sampled
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py
new file mode 100644
index 00000000000..1dcbfe47dae
--- /dev/null
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py
@@ -0,0 +1,321 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from random import randrange
+from typing import Any, Callable, Dict, List, Optional, Sequence, Union
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.trace.span import INVALID_SPAN
+from opentelemetry.util.types import Attributes
+
+from .exemplar import Exemplar
+
+
+class ExemplarReservoir(ABC):
+ """ExemplarReservoir provide a method to offer measurements to the reservoir
+ and another to collect accumulated Exemplars.
+
+ Note:
+ The constructor MUST accept ``**kwargs`` that may be set from aggregation
+ parameters.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir
+ """
+
+ @abstractmethod
+ def offer(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> None:
+ """Offers a measurement to be sampled.
+
+ Args:
+ value: Measured value
+ time_unix_nano: Measurement instant
+ attributes: Measurement attributes
+ context: Measurement context
+ """
+ raise NotImplementedError("ExemplarReservoir.offer is not implemented")
+
+ @abstractmethod
+ def collect(self, point_attributes: Attributes) -> List[Exemplar]:
+ """Returns accumulated Exemplars and also resets the reservoir for the next
+ sampling period
+
+ Args:
+ point_attributes: The attributes associated with metric point.
+
+ Returns:
+ a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
+ exemplars contain the attributes that were filtered out by the aggregator,
+ but recorded alongside the original measurement.
+ """
+ raise NotImplementedError(
+ "ExemplarReservoir.collect is not implemented"
+ )
+
+
+class ExemplarBucket:
+ def __init__(self) -> None:
+ self.__value: Union[int, float] = 0
+ self.__attributes: Attributes = None
+ self.__time_unix_nano: int = 0
+ self.__span_id: Optional[str] = None
+ self.__trace_id: Optional[str] = None
+ self.__offered: bool = False
+
+ def offer(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> None:
+ """Offers a measurement to be sampled.
+
+ Args:
+ value: Measured value
+ time_unix_nano: Measurement instant
+ attributes: Measurement attributes
+ context: Measurement context
+ """
+ self.__value = value
+ self.__time_unix_nano = time_unix_nano
+ self.__attributes = attributes
+ span = trace.get_current_span(context)
+ if span != INVALID_SPAN:
+ span_context = span.get_span_context()
+ self.__span_id = span_context.span_id
+ self.__trace_id = span_context.trace_id
+
+ self.__offered = True
+
+ def collect(self, point_attributes: Attributes) -> Optional[Exemplar]:
+ """May return an Exemplar and resets the bucket for the next sampling period."""
+ if not self.__offered:
+ return None
+
+ # filters out attributes from the measurement that are already included in the metric data point
+ # See the specification for more details:
+ # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
+ filtered_attributes = (
+ {
+ k: v
+ for k, v in self.__attributes.items()
+ if k not in point_attributes
+ }
+ if self.__attributes
+ else None
+ )
+
+ exemplar = Exemplar(
+ filtered_attributes,
+ self.__value,
+ self.__time_unix_nano,
+ self.__span_id,
+ self.__trace_id,
+ )
+ self.__reset()
+ return exemplar
+
+ def __reset(self) -> None:
+ """Reset the bucket state after a collection cycle."""
+ self.__value = 0
+ self.__attributes = {}
+ self.__time_unix_nano = 0
+ self.__span_id = None
+ self.__trace_id = None
+ self.__offered = False
+
+
+class BucketIndexError(ValueError):
+ """An exception raised when the bucket index cannot be found."""
+
+
+class FixedSizeExemplarReservoirABC(ExemplarReservoir):
+ """Abstract class for a reservoir with fixed size."""
+
+ def __init__(self, size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self._size: int = size
+ self._reservoir_storage: List[ExemplarBucket] = [
+ ExemplarBucket() for _ in range(self._size)
+ ]
+
+ def collect(self, point_attributes: Attributes) -> List[Exemplar]:
+ """Returns accumulated Exemplars and also resets the reservoir for the next
+ sampling period
+
+ Args:
+ point_attributes: The attributes associated with metric point.
+
+ Returns:
+ a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
+ exemplars contain the attributes that were filtered out by the aggregator,
+ but recorded alongside the original measurement.
+ """
+ exemplars = filter(
+ lambda e: e is not None,
+ map(
+ lambda bucket: bucket.collect(point_attributes),
+ self._reservoir_storage,
+ ),
+ )
+ self._reset()
+ return [*exemplars]
+
+ def offer(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> None:
+ """Offers a measurement to be sampled.
+
+ Args:
+ value: Measured value
+ time_unix_nano: Measurement instant
+ attributes: Measurement attributes
+ context: Measurement context
+ """
+ try:
+ index = self._find_bucket_index(
+ value, time_unix_nano, attributes, context
+ )
+
+ self._reservoir_storage[index].offer(
+ value, time_unix_nano, attributes, context
+ )
+ except BucketIndexError:
+ # Ignore invalid bucket index
+ pass
+
+ @abstractmethod
+ def _find_bucket_index(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> int:
+ """Determines the bucket index for the given measurement.
+
+ It should be implemented by subclasses based on specific strategies.
+
+ Args:
+ value: Measured value
+ time_unix_nano: Measurement instant
+ attributes: Measurement attributes
+ context: Measurement context
+
+ Returns:
+ The bucket index
+
+ Raises:
+ BucketIndexError: If no bucket index can be found.
+ """
+
+ def _reset(self) -> None:
+ """Reset the reservoir by resetting any stateful logic after a collection cycle."""
+
+
+class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC):
+ """This reservoir uses an uniformly-weighted sampling algorithm based on the number
+ of samples the reservoir has seen so far to determine if the offered measurements
+ should be sampled.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+ """
+
+ def __init__(self, size: int = 1, **kwargs) -> None:
+ super().__init__(size, **kwargs)
+ self._measurements_seen: int = 0
+
+ def _reset(self) -> None:
+ super()._reset()
+ self._measurements_seen = 0
+
+ def _find_bucket_index(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> int:
+ self._measurements_seen += 1
+ if self._measurements_seen < self._size:
+ return self._measurements_seen - 1
+
+ index = randrange(0, self._measurements_seen)
+ if index < self._size:
+ return index
+
+ raise BucketIndexError("Unable to find the bucket index.")
+
+
+class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC):
+ """This Exemplar reservoir takes a configuration parameter that is the
+ configuration of a Histogram. This implementation keeps the last seen measurement
+ that falls within a histogram bucket.
+
+ Reference:
+ https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir
+ """
+
+ def __init__(self, boundaries: Sequence[float], **kwargs) -> None:
+ super().__init__(len(boundaries) + 1, **kwargs)
+ self._boundaries: Sequence[float] = boundaries
+
+ def offer(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> None:
+ """Offers a measurement to be sampled."""
+ index = self._find_bucket_index(
+ value, time_unix_nano, attributes, context
+ )
+ self._reservoir_storage[index].offer(
+ value, time_unix_nano, attributes, context
+ )
+
+ def _find_bucket_index(
+ self,
+ value: Union[int, float],
+ time_unix_nano: int,
+ attributes: Attributes,
+ context: Context,
+ ) -> int:
+ for index, boundary in enumerate(self._boundaries):
+ if value <= boundary:
+ return index
+ return len(self._boundaries)
+
+
+ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir]
+ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder.
+
+It may receive the Aggregation parameters it is bounded to; e.g.
+the _ExplicitBucketHistogramAggregation will provide the boundaries.
+"""
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py
index 2b02e67fc3d..ea373ccc6fb 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py
@@ -15,10 +15,12 @@
# pylint: disable=too-many-ancestors, unused-import
from logging import getLogger
+from time import time_ns
from typing import Dict, Generator, Iterable, List, Optional, Union
# This kind of import is needed to avoid Sphinx errors.
import opentelemetry.sdk.metrics
+from opentelemetry.context import Context, get_current
from opentelemetry.metrics import CallbackT
from opentelemetry.metrics import Counter as APICounter
from opentelemetry.metrics import Histogram as APIHistogram
@@ -137,7 +139,9 @@ def callback(
for api_measurement in callback(callback_options):
yield Measurement(
api_measurement.value,
+ time_unix_nano=time_ns(),
instrument=self,
+ context=api_measurement.context or get_current(),
attributes=api_measurement.attributes,
)
except Exception: # pylint: disable=broad-exception-caught
@@ -153,15 +157,25 @@ def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def add(
- self, amount: Union[int, float], attributes: Dict[str, str] = None
+ self,
+ amount: Union[int, float],
+ attributes: Dict[str, str] = None,
+ context: Optional[Context] = None,
):
if amount < 0:
_logger.warning(
"Add amount must be non-negative on Counter %s.", self.name
)
return
+ time_unix_nano = time_ns()
self._measurement_consumer.consume_measurement(
- Measurement(amount, self, attributes)
+ Measurement(
+ amount,
+ time_unix_nano,
+ self,
+ context or get_current(),
+ attributes,
+ )
)
@@ -172,10 +186,20 @@ def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def add(
- self, amount: Union[int, float], attributes: Dict[str, str] = None
+ self,
+ amount: Union[int, float],
+ attributes: Dict[str, str] = None,
+ context: Optional[Context] = None,
):
+ time_unix_nano = time_ns()
self._measurement_consumer.consume_measurement(
- Measurement(amount, self, attributes)
+ Measurement(
+ amount,
+ time_unix_nano,
+ self,
+ context or get_current(),
+ attributes,
+ )
)
@@ -204,7 +228,10 @@ def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def record(
- self, amount: Union[int, float], attributes: Dict[str, str] = None
+ self,
+ amount: Union[int, float],
+ attributes: Dict[str, str] = None,
+ context: Optional[Context] = None,
):
if amount < 0:
_logger.warning(
@@ -212,8 +239,15 @@ def record(
self.name,
)
return
+ time_unix_nano = time_ns()
self._measurement_consumer.consume_measurement(
- Measurement(amount, self, attributes)
+ Measurement(
+ amount,
+ time_unix_nano,
+ self,
+ context or get_current(),
+ attributes,
+ )
)
@@ -224,10 +258,20 @@ def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def set(
- self, amount: Union[int, float], attributes: Dict[str, str] = None
+ self,
+ amount: Union[int, float],
+ attributes: Dict[str, str] = None,
+ context: Optional[Context] = None,
):
+ time_unix_nano = time_ns()
self._measurement_consumer.consume_measurement(
- Measurement(amount, self, attributes)
+ Measurement(
+ amount,
+ time_unix_nano,
+ self,
+ context or get_current(),
+ attributes,
+ )
)
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py
index 0dced5bcd35..56619a83a1a 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py
@@ -15,6 +15,7 @@
from dataclasses import dataclass
from typing import Union
+from opentelemetry.context import Context
from opentelemetry.metrics import Instrument
from opentelemetry.util.types import Attributes
@@ -23,8 +24,22 @@
class Measurement:
"""
Represents a data point reported via the metrics API to the SDK.
+
+ Attributes
+ value: Measured value
+ time_unix_nano: The time the API call was made to record the Measurement
+ instrument: The instrument that produced this `Measurement`.
+ context: The active Context of the Measurement at API call time.
+ attributes: Measurement attributes
"""
+ # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
+ # one will come from napoleon extension and the other from autodoc extension. This
+ # will raise an sphinx error of duplicated object description
+ # See https://github.com/sphinx-doc/sphinx/issues/8664
+
value: Union[int, float]
+ time_unix_nano: int
instrument: Instrument
+ context: Context
attributes: Attributes = None
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py
index 4310061b823..b516d6abb05 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py
@@ -79,7 +79,15 @@ def __init__(
def consume_measurement(self, measurement: Measurement) -> None:
for reader_storage in self._reader_storages.values():
- reader_storage.consume_measurement(measurement)
+ reader_storage.consume_measurement(
+ measurement,
+ self._sdk_config.exemplar_filter.should_sample(
+ measurement.value,
+ measurement.time_unix_nano,
+ measurement.attributes,
+ measurement.context,
+ ),
+ )
def register_asynchronous_instrument(
self,
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py
index 7fac6c6c105..2564bbcd7f0 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py
@@ -113,11 +113,15 @@ def _get_or_init_view_instrument_match(
return view_instrument_matches
- def consume_measurement(self, measurement: Measurement) -> None:
+ def consume_measurement(
+ self, measurement: Measurement, should_sample_exemplar: bool = True
+ ) -> None:
for view_instrument_match in self._get_or_init_view_instrument_match(
measurement.instrument
):
- view_instrument_match.consume_measurement(measurement)
+ view_instrument_match.consume_measurement(
+ measurement, should_sample_exemplar
+ )
def collect(self) -> Optional[MetricsData]:
# Use a list instead of yielding to prevent a slow reader from holding
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py
index 473b45ed6b6..5df32ddaae1 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py
@@ -14,12 +14,13 @@
# pylint: disable=unused-import
-from dataclasses import asdict, dataclass
+from dataclasses import asdict, dataclass, field
from json import dumps, loads
from typing import Optional, Sequence, Union
# This kind of import is needed to avoid Sphinx errors.
import opentelemetry.sdk.metrics._internal
+from opentelemetry.sdk.metrics._internal.exemplar import Exemplar
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
from opentelemetry.util.types import Attributes
@@ -35,6 +36,7 @@ class NumberDataPoint:
start_time_unix_nano: int
time_unix_nano: int
value: Union[int, float]
+ exemplars: Sequence[Exemplar] = field(default_factory=list)
def to_json(self, indent=4) -> str:
return dumps(asdict(self), indent=indent)
@@ -55,6 +57,7 @@ class HistogramDataPoint:
explicit_bounds: Sequence[float]
min: float
max: float
+ exemplars: Sequence[Exemplar] = field(default_factory=list)
def to_json(self, indent=4) -> str:
return dumps(asdict(self), indent=indent)
@@ -85,6 +88,7 @@ class ExponentialHistogramDataPoint:
flags: int
min: float
max: float
+ exemplars: Sequence[Exemplar] = field(default_factory=list)
def to_json(self, indent=4) -> str:
return dumps(asdict(self), indent=indent)
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py
index 9594ab38a74..3d88facb0c3 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py
@@ -24,6 +24,7 @@
@dataclass
class SdkConfiguration:
+ exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter"
resource: "opentelemetry.sdk.resources.Resource"
metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"]
views: Sequence["opentelemetry.sdk.metrics.View"]
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py
index 9473acde4d4..5dd11be1f94 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py
@@ -15,17 +15,36 @@
from fnmatch import fnmatch
from logging import getLogger
-from typing import Optional, Set, Type
+from typing import Callable, Optional, Set, Type
from opentelemetry.metrics import Instrument
from opentelemetry.sdk.metrics._internal.aggregation import (
Aggregation,
DefaultAggregation,
+ _Aggregation,
+ _ExplicitBucketHistogramAggregation,
+ _ExponentialBucketHistogramAggregation,
+)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ ExemplarReservoirBuilder,
+ SimpleFixedSizeExemplarReservoir,
)
_logger = getLogger(__name__)
+def _default_reservoir_factory(
+ aggregation_type: Type[_Aggregation],
+) -> ExemplarReservoirBuilder:
+ """Default reservoir factory per aggregation."""
+ if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation):
+ return AlignedHistogramBucketExemplarReservoir
+ if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation):
+ return SimpleFixedSizeExemplarReservoir
+ return SimpleFixedSizeExemplarReservoir
+
+
class View:
"""
A `View` configuration parameters can be used for the following
@@ -73,6 +92,9 @@ class View:
corresponding metrics stream. If `None` an instance of
`DefaultAggregation` will be used.
+ exemplar_reservoir_factory: This is a metric stream customizing attribute:
+ the exemplar reservoir factory
+
instrument_unit: This is an instrument matching attribute: the unit the
instrument must have to match the view.
@@ -92,6 +114,9 @@ def __init__(
description: Optional[str] = None,
attribute_keys: Optional[Set[str]] = None,
aggregation: Optional[Aggregation] = None,
+ exemplar_reservoir_factory: Optional[
+ Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]
+ ] = None,
instrument_unit: Optional[str] = None,
):
if (
@@ -120,8 +145,8 @@ def __init__(
"characters in instrument_name"
)
- # _name, _description, _aggregation and _attribute_keys will be
- # accessed when instantiating a _ViewInstrumentMatch.
+ # _name, _description, _aggregation, _exemplar_reservoir_factory and
+ # _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch.
self._name = name
self._instrument_type = instrument_type
self._instrument_name = instrument_name
@@ -133,6 +158,9 @@ def __init__(
self._description = description
self._attribute_keys = attribute_keys
self._aggregation = aggregation or self._default_aggregation
+ self._exemplar_reservoir_factory = (
+ exemplar_reservoir_factory or _default_reservoir_factory
+ )
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py
index 7d025e33305..91106ac4d61 100644
--- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py
+++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py
@@ -21,9 +21,11 @@
from math import ldexp
from random import Random, randrange
from sys import float_info, maxsize
+from time import time_ns
from types import MethodType
from unittest.mock import Mock, patch
+from opentelemetry.context import Context
from opentelemetry.sdk.metrics._internal.aggregation import (
AggregationTemporality,
_ExponentialBucketHistogramAggregation,
@@ -45,6 +47,7 @@
from opentelemetry.sdk.metrics._internal.point import (
ExponentialHistogramDataPoint,
)
+from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory
from opentelemetry.sdk.metrics.view import (
ExponentialBucketHistogramAggregation,
)
@@ -52,7 +55,6 @@
def get_counts(buckets: Buckets) -> int:
-
counts = []
for index in range(len(buckets)):
@@ -72,7 +74,6 @@ def swap(
first: _ExponentialBucketHistogramAggregation,
second: _ExponentialBucketHistogramAggregation,
):
-
for attribute in [
"_value_positive",
"_value_negative",
@@ -93,7 +94,7 @@ class TestExponentialBucketHistogramAggregation(TestCase):
def test_create_aggregation(self, mock_logarithm_mapping):
exponential_bucket_histogram_aggregation = (
ExponentialBucketHistogramAggregation()
- )._create_aggregation(Mock(), Mock(), Mock())
+ )._create_aggregation(Mock(), Mock(), Mock(), Mock())
self.assertEqual(
exponential_bucket_histogram_aggregation._max_scale, 20
@@ -103,7 +104,7 @@ def test_create_aggregation(self, mock_logarithm_mapping):
exponential_bucket_histogram_aggregation = (
ExponentialBucketHistogramAggregation(max_scale=10)
- )._create_aggregation(Mock(), Mock(), Mock())
+ )._create_aggregation(Mock(), Mock(), Mock(), Mock())
self.assertEqual(
exponential_bucket_histogram_aggregation._max_scale, 10
@@ -114,7 +115,7 @@ def test_create_aggregation(self, mock_logarithm_mapping):
with self.assertLogs(level=WARNING):
exponential_bucket_histogram_aggregation = (
ExponentialBucketHistogramAggregation(max_scale=100)
- )._create_aggregation(Mock(), Mock(), Mock())
+ )._create_aggregation(Mock(), Mock(), Mock(), Mock())
self.assertEqual(
exponential_bucket_histogram_aggregation._max_scale, 100
@@ -127,7 +128,6 @@ def assertInEpsilon(self, first, second, epsilon):
self.assertGreaterEqual(first, (second * (1 - epsilon)))
def require_equal(self, a, b):
-
if a._sum == 0 or b._sum == 0:
self.assertAlmostEqual(a._sum, b._sum, 1e-6)
else:
@@ -167,13 +167,27 @@ def test_alternating_growth_0(self):
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=4
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=4,
)
)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(4, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(1, Mock()))
+ now = time_ns()
+ ctx = Context()
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(4, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(1, now, Mock(), ctx)
+ )
self.assertEqual(
exponential_histogram_aggregation._value_positive.offset, -1
@@ -194,16 +208,36 @@ def test_alternating_growth_1(self):
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=4
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=4,
)
)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(1, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(8, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock()))
+ now = time_ns()
+ ctx = Context()
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(1, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(8, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(0.5, now, Mock(), ctx)
+ )
self.assertEqual(
exponential_histogram_aggregation._value_positive.offset, -1
@@ -217,9 +251,11 @@ def test_alternating_growth_1(self):
def test_permutations(self):
"""
Tests that every permutation of certain sequences with maxSize=2
- results¶ in the same scale=-1 histogram.
+ results in the same scale=-1 histogram.
"""
+ now = time_ns()
+ ctx = Context()
for test_values, expected in [
[
[0.5, 1.0, 2.0],
@@ -252,12 +288,13 @@ def test_permutations(self):
},
],
]:
-
for permutation in permutations(test_values):
-
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
AggregationTemporality.DELTA,
Mock(),
max_size=2,
@@ -265,9 +302,8 @@ def test_permutations(self):
)
for value in permutation:
-
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
self.assertEqual(
@@ -292,21 +328,24 @@ def test_permutations(self):
)
def test_ascending_sequence(self):
-
for max_size in [3, 4, 6, 9]:
for offset in range(-5, 6):
for init_scale in [0, 4]:
self.ascending_sequence_test(max_size, offset, init_scale)
+ # pylint: disable=too-many-locals
def ascending_sequence_test(
self, max_size: int, offset: int, init_scale: int
):
-
+ now = time_ns()
+ ctx = Context()
for step in range(max_size, max_size * 4):
-
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
AggregationTemporality.DELTA,
Mock(),
max_size=max_size,
@@ -326,7 +365,7 @@ def ascending_sequence_test(
for index in range(max_size):
value = center_val(mapping, offset + index)
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
sum_ += value
@@ -339,7 +378,7 @@ def ascending_sequence_test(
)
exponential_histogram_aggregation.aggregate(
- Measurement(max_val, Mock())
+ Measurement(max_val, now, Mock(), ctx)
)
sum_ += max_val
@@ -403,7 +442,8 @@ def ascending_sequence_test(
)
def test_reset(self):
-
+ now = time_ns()
+ ctx = Context()
for increment in [0x1, 0x100, 0x10000, 0x100000000, 0x200000000]:
def mock_increment(self, bucket_index: int) -> None:
@@ -415,7 +455,13 @@ def mock_increment(self, bucket_index: int) -> None:
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=256
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=256,
)
)
@@ -439,7 +485,7 @@ def mock_increment(self, bucket_index: int) -> None:
),
):
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
exponential_histogram_aggregation._count *= increment
exponential_histogram_aggregation._sum *= increment
@@ -470,15 +516,29 @@ def mock_increment(self, bucket_index: int) -> None:
)
def test_move_into(self):
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation_0 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=256
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=256,
)
)
exponential_histogram_aggregation_1 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=256
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=256,
)
)
@@ -487,10 +547,10 @@ def test_move_into(self):
for index in range(2, 257):
expect += index
exponential_histogram_aggregation_0.aggregate(
- Measurement(index, Mock())
+ Measurement(index, now, Mock(), ctx)
)
exponential_histogram_aggregation_0.aggregate(
- Measurement(0, Mock())
+ Measurement(0, now, Mock(), ctx)
)
swap(
@@ -524,10 +584,18 @@ def test_move_into(self):
)
def test_very_large_numbers(self):
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=2
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=2,
)
)
@@ -546,10 +614,10 @@ def expect_balanced(count: int):
)
exponential_histogram_aggregation.aggregate(
- Measurement(2**-100, Mock())
+ Measurement(2**-100, now, Mock(), ctx)
)
exponential_histogram_aggregation.aggregate(
- Measurement(2**100, Mock())
+ Measurement(2**100, now, Mock(), ctx)
)
self.assertLessEqual(
@@ -565,10 +633,10 @@ def expect_balanced(count: int):
expect_balanced(1)
exponential_histogram_aggregation.aggregate(
- Measurement(2**-127, Mock())
+ Measurement(2**-127, now, Mock(), ctx)
)
exponential_histogram_aggregation.aggregate(
- Measurement(2**128, Mock())
+ Measurement(2**128, now, Mock(), ctx)
)
self.assertLessEqual(
@@ -584,10 +652,10 @@ def expect_balanced(count: int):
expect_balanced(2)
exponential_histogram_aggregation.aggregate(
- Measurement(2**-129, Mock())
+ Measurement(2**-129, now, Mock(), ctx)
)
exponential_histogram_aggregation.aggregate(
- Measurement(2**255, Mock())
+ Measurement(2**255, now, Mock(), ctx)
)
self.assertLessEqual(
@@ -602,19 +670,29 @@ def expect_balanced(count: int):
expect_balanced(3)
def test_full_range(self):
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=2
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=2,
)
)
exponential_histogram_aggregation.aggregate(
- Measurement(float_info.max, Mock())
+ Measurement(float_info.max, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(1, now, Mock(), ctx)
)
- exponential_histogram_aggregation.aggregate(Measurement(1, Mock()))
exponential_histogram_aggregation.aggregate(
- Measurement(2**-1074, Mock())
+ Measurement(2**-1074, now, Mock(), ctx)
)
self.assertEqual(
@@ -641,16 +719,22 @@ def test_full_range(self):
)
def test_aggregator_min_max(self):
-
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
for value in [1, 3, 5, 7, 9]:
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
self.assertEqual(1, exponential_histogram_aggregation._min)
@@ -658,41 +742,62 @@ def test_aggregator_min_max(self):
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
for value in [-1, -3, -5, -7, -9]:
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
self.assertEqual(-9, exponential_histogram_aggregation._min)
self.assertEqual(-1, exponential_histogram_aggregation._max)
def test_aggregator_copy_swap(self):
-
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation_0 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
for value in [1, 3, 5, 7, 9, -1, -3, -5]:
exponential_histogram_aggregation_0.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
exponential_histogram_aggregation_1 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
for value in [5, 4, 3, 2]:
exponential_histogram_aggregation_1.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
exponential_histogram_aggregation_2 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
@@ -735,10 +840,17 @@ def test_aggregator_copy_swap(self):
)
def test_zero_count_by_increment(self):
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation_0 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
@@ -746,11 +858,16 @@ def test_zero_count_by_increment(self):
for _ in range(increment):
exponential_histogram_aggregation_0.aggregate(
- Measurement(0, Mock())
+ Measurement(0, now, Mock(), ctx)
)
exponential_histogram_aggregation_1 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
@@ -772,7 +889,7 @@ def mock_increment(self, bucket_index: int) -> None:
),
):
exponential_histogram_aggregation_1.aggregate(
- Measurement(0, Mock())
+ Measurement(0, now, Mock(), ctx)
)
exponential_histogram_aggregation_1._count *= increment
exponential_histogram_aggregation_1._zero_count *= increment
@@ -783,10 +900,17 @@ def mock_increment(self, bucket_index: int) -> None:
)
def test_one_count_by_increment(self):
+ now = time_ns()
+ ctx = Context()
exponential_histogram_aggregation_0 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
@@ -794,11 +918,16 @@ def test_one_count_by_increment(self):
for _ in range(increment):
exponential_histogram_aggregation_0.aggregate(
- Measurement(1, Mock())
+ Measurement(1, now, Mock(), ctx)
)
exponential_histogram_aggregation_1 = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock()
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
)
)
@@ -820,7 +949,7 @@ def mock_increment(self, bucket_index: int) -> None:
),
):
exponential_histogram_aggregation_1.aggregate(
- Measurement(1, Mock())
+ Measurement(1, now, Mock(), ctx)
)
exponential_histogram_aggregation_1._count *= increment
exponential_histogram_aggregation_1._sum *= increment
@@ -831,13 +960,11 @@ def mock_increment(self, bucket_index: int) -> None:
)
def test_boundary_statistics(self):
-
total = MAX_NORMAL_EXPONENT - MIN_NORMAL_EXPONENT + 1
for scale in range(
LogarithmMapping._min_scale, LogarithmMapping._max_scale + 1
):
-
above = 0
below = 0
@@ -870,6 +997,9 @@ def test_min_max_size(self):
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
AggregationTemporality.DELTA,
Mock(),
max_size=_ExponentialBucketHistogramAggregation._min_max_size,
@@ -892,31 +1022,49 @@ def test_aggregate_collect(self):
"""
Tests a repeated cycle of aggregation and collection.
"""
+ now = time_ns()
+ ctx = Context()
+
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
AggregationTemporality.DELTA,
Mock(),
)
)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
exponential_histogram_aggregation.collect(
AggregationTemporality.CUMULATIVE, 0
)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
exponential_histogram_aggregation.collect(
AggregationTemporality.CUMULATIVE, 0
)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
exponential_histogram_aggregation.collect(
AggregationTemporality.CUMULATIVE, 0
)
def test_collect_results_cumulative(self) -> None:
+ now = time_ns()
+ ctx = Context()
+
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
AggregationTemporality.DELTA,
Mock(),
)
@@ -925,13 +1073,19 @@ def test_collect_results_cumulative(self) -> None:
self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20)
- exponential_histogram_aggregation.aggregate(Measurement(2, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(2, now, Mock(), ctx)
+ )
self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20)
- exponential_histogram_aggregation.aggregate(Measurement(4, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(4, now, Mock(), ctx)
+ )
self.assertEqual(exponential_histogram_aggregation._mapping._scale, 7)
- exponential_histogram_aggregation.aggregate(Measurement(1, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(1, now, Mock(), ctx)
+ )
self.assertEqual(exponential_histogram_aggregation._mapping._scale, 6)
collection_0 = exponential_histogram_aggregation.collect(
@@ -952,11 +1106,21 @@ def test_collect_results_cumulative(self) -> None:
self.assertEqual(collection_0.min, 1)
self.assertEqual(collection_0.max, 4)
- exponential_histogram_aggregation.aggregate(Measurement(1, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(8, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(0.1, Mock()))
- exponential_histogram_aggregation.aggregate(Measurement(0.045, Mock()))
+ exponential_histogram_aggregation.aggregate(
+ Measurement(1, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(8, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(0.5, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(0.1, now, Mock(), ctx)
+ )
+ exponential_histogram_aggregation.aggregate(
+ Measurement(0.045, now, Mock(), ctx)
+ )
collection_1 = exponential_histogram_aggregation.collect(
AggregationTemporality.CUMULATIVE, Mock()
@@ -1002,8 +1166,12 @@ def test_collect_results_cumulative(self) -> None:
self.assertEqual(collection_1.max, 8)
def test_cumulative_aggregation_with_random_data(self) -> None:
+ now = time_ns()
+ ctx = Context()
+
histogram = _ExponentialBucketHistogramAggregation(
Mock(),
+ _default_reservoir_factory(_ExponentialBucketHistogramAggregation),
AggregationTemporality.DELTA,
Mock(),
)
@@ -1053,22 +1221,31 @@ def collect_and_validate(values, histogram) -> None:
# avoid both values being 0
value = random_generator.randint(0 if i else 1, 1000)
values.append(value)
- histogram.aggregate(Measurement(value, Mock()))
+ histogram.aggregate(Measurement(value, now, Mock(), ctx))
if i % 20 == 0:
collect_and_validate(values, histogram)
collect_and_validate(values, histogram)
def test_merge_collect_cumulative(self):
+ now = time_ns()
+ ctx = Context()
+
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=4
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=4,
)
)
for value in [2, 4, 8, 16]:
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0)
@@ -1089,7 +1266,7 @@ def test_merge_collect_cumulative(self):
for value in [1, 2, 4, 8]:
exponential_histogram_aggregation.aggregate(
- Measurement(1 / value, Mock())
+ Measurement(1 / value, now, Mock(), ctx)
)
self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0)
@@ -1109,15 +1286,24 @@ def test_merge_collect_cumulative(self):
self.assertEqual(result_1.scale, -1)
def test_merge_collect_delta(self):
+ now = time_ns()
+ ctx = Context()
+
exponential_histogram_aggregation = (
_ExponentialBucketHistogramAggregation(
- Mock(), AggregationTemporality.DELTA, Mock(), max_size=4
+ Mock(),
+ _default_reservoir_factory(
+ _ExponentialBucketHistogramAggregation
+ ),
+ AggregationTemporality.DELTA,
+ Mock(),
+ max_size=4,
)
)
for value in [2, 4, 8, 16]:
exponential_histogram_aggregation.aggregate(
- Measurement(value, Mock())
+ Measurement(value, now, Mock(), ctx)
)
self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0)
@@ -1136,7 +1322,7 @@ def test_merge_collect_delta(self):
for value in [1, 2, 4, 8]:
exponential_histogram_aggregation.aggregate(
- Measurement(1 / value, Mock())
+ Measurement(1 / value, now, Mock(), ctx)
)
self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0)
diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py
index 1b3283717ae..7e77a878d87 100644
--- a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py
+++ b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py
@@ -14,16 +14,21 @@
from io import StringIO
from json import loads
+from os import linesep
from unittest import TestCase
+from unittest.mock import Mock, patch
+from opentelemetry.context import Context
from opentelemetry.metrics import get_meter, set_meter_provider
-from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics import AlwaysOnExemplarFilter, MeterProvider
from opentelemetry.sdk.metrics.export import (
ConsoleMetricExporter,
PeriodicExportingMetricReader,
)
from opentelemetry.test.globals_test import reset_metrics_globals
+TEST_TIMESTAMP = 1_234_567_890
+
class TestConsoleExporter(TestCase):
def setUp(self):
@@ -88,3 +93,53 @@ def test_console_exporter_no_export(self):
expected = ""
self.assertEqual(actual, expected)
+
+ @patch(
+ "opentelemetry.sdk.metrics._internal.instrument.time_ns",
+ Mock(return_value=TEST_TIMESTAMP),
+ )
+ def test_console_exporter_with_exemplars(self):
+ ctx = Context()
+
+ output = StringIO()
+ exporter = ConsoleMetricExporter(out=output)
+ reader = PeriodicExportingMetricReader(
+ exporter, export_interval_millis=100
+ )
+ provider = MeterProvider(
+ metric_readers=[reader], exemplar_filter=AlwaysOnExemplarFilter()
+ )
+ set_meter_provider(provider)
+ meter = get_meter(__name__)
+ counter = meter.create_counter(
+ "name", description="description", unit="unit"
+ )
+ counter.add(1, attributes={"a": "b"}, context=ctx)
+ provider.shutdown()
+
+ output.seek(0)
+ joined_output = "".join(output.readlines())
+ result_0 = loads(joined_output.strip(linesep))
+
+ self.assertGreater(len(result_0), 0)
+
+ metrics = result_0["resource_metrics"][0]["scope_metrics"][0]
+
+ self.assertEqual(metrics["scope"]["name"], "test_console_exporter")
+
+ point = metrics["metrics"][0]["data"]["data_points"][0]
+
+ self.assertEqual(point["attributes"], {"a": "b"})
+ self.assertEqual(point["value"], 1)
+ self.assertEqual(
+ point["exemplars"],
+ [
+ {
+ "filtered_attributes": {},
+ "value": 1,
+ "time_unix_nano": TEST_TIMESTAMP,
+ "span_id": None,
+ "trace_id": None,
+ }
+ ],
+ )
diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py b/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py
index 18b8cbdcea0..22f20002dea 100644
--- a/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py
+++ b/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py
@@ -16,14 +16,23 @@
import io
from typing import Generator, Iterable, List
from unittest import TestCase
+from unittest.mock import Mock, patch
+from opentelemetry.context import Context
from opentelemetry.metrics import CallbackOptions, Instrument, Observation
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics._internal.measurement import Measurement
# FIXME Test that the instrument methods can be called concurrently safely.
+TEST_TIMESTAMP = 1_234_567_890
+TEST_CONTEXT = Context()
+
+@patch(
+ "opentelemetry.sdk.metrics._internal.instrument.time_ns",
+ Mock(return_value=TEST_TIMESTAMP),
+)
class TestCpuTimeIntegration(TestCase):
"""Integration test of scraping CPU time from proc stat with an observable
counter"""
@@ -47,92 +56,128 @@ def create_measurements_expected(
return [
Measurement(
6150.29,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "user"},
),
Measurement(
3177.46,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "nice"},
),
Measurement(
5946.01,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "system"},
),
Measurement(
891264.59,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "idle"},
),
Measurement(
1296.29,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "iowait"},
),
Measurement(
0.0,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "irq"},
),
Measurement(
8343.46,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "softirq"},
),
Measurement(
421.37,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "guest"},
),
Measurement(
0,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu0", "state": "guest_nice"},
),
Measurement(
5882.32,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "user"},
),
Measurement(
3491.85,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "nice"},
),
Measurement(
6404.92,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "system"},
),
Measurement(
891564.11,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "idle"},
),
Measurement(
1244.85,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "iowait"},
),
Measurement(
0,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "irq"},
),
Measurement(
2410.04,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "softirq"},
),
Measurement(
418.62,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "guest"},
),
Measurement(
0,
+ TEST_TIMESTAMP,
instrument=instrument,
+ context=TEST_CONTEXT,
attributes={"cpu": "cpu1", "state": "guest_nice"},
),
]
diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py
index eaf590219ba..6095781cb21 100644
--- a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py
+++ b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py
@@ -15,6 +15,10 @@
from unittest import TestCase
from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlwaysOffExemplarFilter,
+ AlwaysOnExemplarFilter,
+)
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
@@ -88,3 +92,99 @@ def test_histogram_counter_collection(self):
),
1,
)
+
+ def test_histogram_with_exemplars(self):
+
+ in_memory_metric_reader = InMemoryMetricReader()
+
+ provider = MeterProvider(
+ resource=Resource.create({SERVICE_NAME: "otel-test"}),
+ metric_readers=[in_memory_metric_reader],
+ exemplar_filter=AlwaysOnExemplarFilter(),
+ )
+ meter = provider.get_meter("my-meter")
+ histogram = meter.create_histogram("my_histogram")
+
+ histogram.record(
+ 2, {"attribute": "value1"}
+ ) # Should go in the first bucket
+ histogram.record(
+ 7, {"attribute": "value2"}
+ ) # Should go in the second bucket
+ histogram.record(
+ 9, {"attribute": "value2"}
+ ) # Should also go in the second bucket
+ histogram.record(
+ 15, {"attribute": "value3"}
+ ) # Should go in the third bucket
+
+ metric_data = in_memory_metric_reader.get_metrics_data()
+
+ self.assertEqual(
+ len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1
+ )
+ histogram_metric = (
+ metric_data.resource_metrics[0].scope_metrics[0].metrics[0]
+ )
+
+ self.assertEqual(len(histogram_metric.data.data_points), 3)
+
+ self.assertEqual(
+ len(histogram_metric.data.data_points[0].exemplars), 1
+ )
+ self.assertEqual(
+ len(histogram_metric.data.data_points[1].exemplars), 1
+ )
+ self.assertEqual(
+ len(histogram_metric.data.data_points[2].exemplars), 1
+ )
+
+ self.assertEqual(histogram_metric.data.data_points[0].sum, 2)
+ self.assertEqual(histogram_metric.data.data_points[1].sum, 16)
+ self.assertEqual(histogram_metric.data.data_points[2].sum, 15)
+
+ self.assertEqual(
+ histogram_metric.data.data_points[0].exemplars[0].value, 2.0
+ )
+ self.assertEqual(
+ histogram_metric.data.data_points[1].exemplars[0].value, 9.0
+ )
+ self.assertEqual(
+ histogram_metric.data.data_points[2].exemplars[0].value, 15.0
+ )
+
+ def test_filter_with_exemplars(self):
+ in_memory_metric_reader = InMemoryMetricReader()
+
+ provider = MeterProvider(
+ resource=Resource.create({SERVICE_NAME: "otel-test"}),
+ metric_readers=[in_memory_metric_reader],
+ exemplar_filter=AlwaysOffExemplarFilter(),
+ )
+ meter = provider.get_meter("my-meter")
+ histogram = meter.create_histogram("my_histogram")
+
+ histogram.record(
+ 2, {"attribute": "value1"}
+ ) # Should go in the first bucket
+ histogram.record(
+ 7, {"attribute": "value2"}
+ ) # Should go in the second bucket
+
+ metric_data = in_memory_metric_reader.get_metrics_data()
+
+ self.assertEqual(
+ len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1
+ )
+ histogram_metric = (
+ metric_data.resource_metrics[0].scope_metrics[0].metrics[0]
+ )
+
+ self.assertEqual(len(histogram_metric.data.data_points), 2)
+
+ self.assertEqual(
+ len(histogram_metric.data.data_points[0].exemplars), 0
+ )
+ self.assertEqual(
+ len(histogram_metric.data.data_points[1].exemplars), 0
+ )
diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py
index 74a77eb5345..0d56ca92bc5 100644
--- a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py
+++ b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py
@@ -20,8 +20,10 @@
from pytest import mark
+from opentelemetry.context import Context
from opentelemetry.metrics import Observation
from opentelemetry.sdk.metrics import Counter, MeterProvider, ObservableCounter
+from opentelemetry.sdk.metrics._internal.exemplar import AlwaysOnExemplarFilter
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
InMemoryMetricReader,
@@ -474,3 +476,38 @@ def test_synchronous_cumulative_temporality(self):
start_time_unix_nano, metric_data.start_time_unix_nano
)
self.assertEqual(metric_data.value, 80)
+
+ def test_sum_aggregation_with_exemplars(self):
+
+ in_memory_metric_reader = InMemoryMetricReader()
+
+ provider = MeterProvider(
+ metric_readers=[in_memory_metric_reader],
+ exemplar_filter=AlwaysOnExemplarFilter(),
+ )
+
+ meter = provider.get_meter("my-meter")
+ counter = meter.create_counter("my_counter")
+
+ counter.add(2, {"attribute": "value1"}, context=Context())
+ counter.add(5, {"attribute": "value2"}, context=Context())
+ counter.add(3, {"attribute": "value3"}, context=Context())
+
+ metric_data = in_memory_metric_reader.get_metrics_data()
+
+ self.assertEqual(
+ len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1
+ )
+
+ sum_metric = (
+ metric_data.resource_metrics[0].scope_metrics[0].metrics[0]
+ )
+
+ data_points = sum_metric.data.data_points
+ self.assertEqual(len(data_points), 3)
+
+ self.assertEqual(data_points[0].exemplars[0].value, 2.0)
+ self.assertEqual(data_points[1].exemplars[0].value, 5.0)
+ self.assertEqual(data_points[2].exemplars[0].value, 3.0)
+
+ provider.shutdown()
diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py
index 7ea463ec8a8..3eeb63e26c3 100644
--- a/opentelemetry-sdk/tests/metrics/test_aggregation.py
+++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py
@@ -15,16 +15,21 @@
# pylint: disable=protected-access
from math import inf
-from time import sleep
+from time import sleep, time_ns
from typing import Union
from unittest import TestCase
from unittest.mock import Mock
+from opentelemetry.context import Context
from opentelemetry.sdk.metrics._internal.aggregation import (
_ExplicitBucketHistogramAggregation,
_LastValueAggregation,
_SumAggregation,
)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ SimpleFixedSizeExemplarReservoir,
+)
from opentelemetry.sdk.metrics._internal.instrument import (
_Counter,
_Gauge,
@@ -35,6 +40,7 @@
_UpDownCounter,
)
from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
NumberDataPoint,
@@ -51,7 +57,13 @@
def measurement(
value: Union[int, float], attributes: Attributes = None
) -> Measurement:
- return Measurement(value, instrument=Mock(), attributes=attributes)
+ return Measurement(
+ value,
+ time_ns(),
+ instrument=Mock(),
+ context=Context(),
+ attributes=attributes,
+ )
class TestSynchronousSumAggregation(TestCase):
@@ -61,7 +73,11 @@ def test_aggregate_delta(self):
"""
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.DELTA, 0
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -71,7 +87,11 @@ def test_aggregate_delta(self):
self.assertEqual(synchronous_sum_aggregation._value, 6)
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.DELTA, 0
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -86,7 +106,11 @@ def test_aggregate_cumulative(self):
"""
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.CUMULATIVE, 0
+ Mock(),
+ True,
+ AggregationTemporality.CUMULATIVE,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -96,7 +120,11 @@ def test_aggregate_cumulative(self):
self.assertEqual(synchronous_sum_aggregation._value, 6)
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.CUMULATIVE, 0
+ Mock(),
+ True,
+ AggregationTemporality.CUMULATIVE,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -111,7 +139,11 @@ def test_collect_delta(self):
"""
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.DELTA, 0
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -137,7 +169,11 @@ def test_collect_delta(self):
)
synchronous_sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.DELTA, 0
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
synchronous_sum_aggregation.aggregate(measurement(1))
@@ -168,7 +204,11 @@ def test_collect_cumulative(self):
"""
sum_aggregation = _SumAggregation(
- Mock(), True, AggregationTemporality.CUMULATIVE, 0
+ Mock(),
+ True,
+ AggregationTemporality.CUMULATIVE,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
)
sum_aggregation.aggregate(measurement(1))
@@ -204,7 +244,9 @@ def test_aggregate(self):
temporality
"""
- last_value_aggregation = _LastValueAggregation(Mock())
+ last_value_aggregation = _LastValueAggregation(
+ Mock(), _default_reservoir_factory(_LastValueAggregation)
+ )
last_value_aggregation.aggregate(measurement(1))
self.assertEqual(last_value_aggregation._value, 1)
@@ -220,7 +262,9 @@ def test_collect(self):
`LastValueAggregation` collects number data points
"""
- last_value_aggregation = _LastValueAggregation(Mock())
+ last_value_aggregation = _LastValueAggregation(
+ Mock(), _default_reservoir_factory(_LastValueAggregation)
+ )
self.assertIsNone(
last_value_aggregation.collect(
@@ -279,6 +323,9 @@ def test_aggregate(self):
Mock(),
AggregationTemporality.DELTA,
0,
+ _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
boundaries=[0, 2, 4],
)
)
@@ -316,7 +363,12 @@ def test_min_max(self):
explicit_bucket_histogram_aggregation = (
_ExplicitBucketHistogramAggregation(
- Mock(), AggregationTemporality.CUMULATIVE, 0
+ Mock(),
+ AggregationTemporality.CUMULATIVE,
+ 0,
+ _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
)
)
@@ -334,6 +386,9 @@ def test_min_max(self):
Mock(),
AggregationTemporality.CUMULATIVE,
0,
+ _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
record_min_max=False,
)
)
@@ -357,6 +412,9 @@ def test_collect(self):
Mock(),
AggregationTemporality.DELTA,
0,
+ _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
boundaries=[0, 1, 2],
)
)
@@ -392,7 +450,12 @@ def test_collect(self):
def test_boundaries(self):
self.assertEqual(
_ExplicitBucketHistogramAggregation(
- Mock(), AggregationTemporality.CUMULATIVE, 0
+ Mock(),
+ AggregationTemporality.CUMULATIVE,
+ 0,
+ _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ ),
)._boundaries,
(
0.0,
@@ -418,19 +481,25 @@ class TestAggregationFactory(TestCase):
def test_sum_factory(self):
counter = _Counter("name", Mock(), Mock())
factory = SumAggregation()
- aggregation = factory._create_aggregation(counter, Mock(), 0)
+ aggregation = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertIsInstance(aggregation, _SumAggregation)
self.assertTrue(aggregation._instrument_is_monotonic)
self.assertEqual(
aggregation._instrument_aggregation_temporality,
AggregationTemporality.DELTA,
)
- aggregation2 = factory._create_aggregation(counter, Mock(), 0)
+ aggregation2 = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertNotEqual(aggregation, aggregation2)
counter = _UpDownCounter("name", Mock(), Mock())
factory = SumAggregation()
- aggregation = factory._create_aggregation(counter, Mock(), 0)
+ aggregation = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertIsInstance(aggregation, _SumAggregation)
self.assertFalse(aggregation._instrument_is_monotonic)
self.assertEqual(
@@ -440,7 +509,9 @@ def test_sum_factory(self):
counter = _ObservableCounter("name", Mock(), Mock(), None)
factory = SumAggregation()
- aggregation = factory._create_aggregation(counter, Mock(), 0)
+ aggregation = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertIsInstance(aggregation, _SumAggregation)
self.assertTrue(aggregation._instrument_is_monotonic)
self.assertEqual(
@@ -457,19 +528,27 @@ def test_explicit_bucket_histogram_factory(self):
),
record_min_max=False,
)
- aggregation = factory._create_aggregation(histo, Mock(), 0)
+ aggregation = factory._create_aggregation(
+ histo, Mock(), _default_reservoir_factory, 0
+ )
self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation)
self.assertFalse(aggregation._record_min_max)
self.assertEqual(aggregation._boundaries, (0.0, 5.0))
- aggregation2 = factory._create_aggregation(histo, Mock(), 0)
+ aggregation2 = factory._create_aggregation(
+ histo, Mock(), _default_reservoir_factory, 0
+ )
self.assertNotEqual(aggregation, aggregation2)
def test_last_value_factory(self):
counter = _Counter("name", Mock(), Mock())
factory = LastValueAggregation()
- aggregation = factory._create_aggregation(counter, Mock(), 0)
+ aggregation = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertIsInstance(aggregation, _LastValueAggregation)
- aggregation2 = factory._create_aggregation(counter, Mock(), 0)
+ aggregation2 = factory._create_aggregation(
+ counter, Mock(), _default_reservoir_factory, 0
+ )
self.assertNotEqual(aggregation, aggregation2)
@@ -479,9 +558,11 @@ def setUpClass(cls):
cls.default_aggregation = DefaultAggregation()
def test_counter(self):
-
aggregation = self.default_aggregation._create_aggregation(
- _Counter("name", Mock(), Mock()), Mock(), 0
+ _Counter("name", Mock(), Mock()),
+ Mock(),
+ _default_reservoir_factory,
+ 0,
)
self.assertIsInstance(aggregation, _SumAggregation)
self.assertTrue(aggregation._instrument_is_monotonic)
@@ -491,9 +572,11 @@ def test_counter(self):
)
def test_up_down_counter(self):
-
aggregation = self.default_aggregation._create_aggregation(
- _UpDownCounter("name", Mock(), Mock()), Mock(), 0
+ _UpDownCounter("name", Mock(), Mock()),
+ Mock(),
+ _default_reservoir_factory,
+ 0,
)
self.assertIsInstance(aggregation, _SumAggregation)
self.assertFalse(aggregation._instrument_is_monotonic)
@@ -503,10 +586,10 @@ def test_up_down_counter(self):
)
def test_observable_counter(self):
-
aggregation = self.default_aggregation._create_aggregation(
_ObservableCounter("name", Mock(), Mock(), callbacks=[Mock()]),
Mock(),
+ _default_reservoir_factory,
0,
)
self.assertIsInstance(aggregation, _SumAggregation)
@@ -517,12 +600,12 @@ def test_observable_counter(self):
)
def test_observable_up_down_counter(self):
-
aggregation = self.default_aggregation._create_aggregation(
_ObservableUpDownCounter(
"name", Mock(), Mock(), callbacks=[Mock()]
),
Mock(),
+ _default_reservoir_factory,
0,
)
self.assertIsInstance(aggregation, _SumAggregation)
@@ -533,7 +616,6 @@ def test_observable_up_down_counter(self):
)
def test_histogram(self):
-
aggregation = self.default_aggregation._create_aggregation(
_Histogram(
"name",
@@ -541,12 +623,12 @@ def test_histogram(self):
Mock(),
),
Mock(),
+ _default_reservoir_factory,
0,
)
self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation)
def test_gauge(self):
-
aggregation = self.default_aggregation._create_aggregation(
_Gauge(
"name",
@@ -554,12 +636,12 @@ def test_gauge(self):
Mock(),
),
Mock(),
+ _default_reservoir_factory,
0,
)
self.assertIsInstance(aggregation, _LastValueAggregation)
def test_observable_gauge(self):
-
aggregation = self.default_aggregation._create_aggregation(
_ObservableGauge(
"name",
@@ -568,6 +650,101 @@ def test_observable_gauge(self):
callbacks=[Mock()],
),
Mock(),
+ _default_reservoir_factory,
0,
)
self.assertIsInstance(aggregation, _LastValueAggregation)
+
+
+class TestExemplarsFromAggregations(TestCase):
+
+ def test_collection_simple_fixed_size_reservoir(self):
+ synchronous_sum_aggregation = _SumAggregation(
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ lambda: SimpleFixedSizeExemplarReservoir(size=3),
+ )
+
+ synchronous_sum_aggregation.aggregate(measurement(1))
+ synchronous_sum_aggregation.aggregate(measurement(2))
+ synchronous_sum_aggregation.aggregate(measurement(3))
+
+ self.assertEqual(synchronous_sum_aggregation._value, 6)
+ datapoint = synchronous_sum_aggregation.collect(
+ AggregationTemporality.CUMULATIVE, 0
+ )
+ # As the reservoir as multiple buckets, it may store up to
+ # 3 exemplars
+ self.assertGreater(len(datapoint.exemplars), 0)
+ self.assertLessEqual(len(datapoint.exemplars), 3)
+
+ def test_collection_simple_fixed_size_reservoir_with_default_reservoir(
+ self,
+ ):
+
+ synchronous_sum_aggregation = _SumAggregation(
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ _default_reservoir_factory(_SumAggregation),
+ )
+
+ synchronous_sum_aggregation.aggregate(measurement(1))
+ synchronous_sum_aggregation.aggregate(measurement(2))
+ synchronous_sum_aggregation.aggregate(measurement(3))
+
+ self.assertEqual(synchronous_sum_aggregation._value, 6)
+ datapoint = synchronous_sum_aggregation.collect(
+ AggregationTemporality.CUMULATIVE, 0
+ )
+ self.assertEqual(len(datapoint.exemplars), 1)
+
+ def test_collection_aligned_histogram_bucket_reservoir(self):
+ boundaries = [5.0, 10.0, 20.0]
+ synchronous_sum_aggregation = _SumAggregation(
+ Mock(),
+ True,
+ AggregationTemporality.DELTA,
+ 0,
+ lambda: AlignedHistogramBucketExemplarReservoir(boundaries),
+ )
+
+ synchronous_sum_aggregation.aggregate(measurement(2.0))
+ synchronous_sum_aggregation.aggregate(measurement(4.0))
+ synchronous_sum_aggregation.aggregate(measurement(6.0))
+ synchronous_sum_aggregation.aggregate(measurement(15.0))
+ synchronous_sum_aggregation.aggregate(measurement(25.0))
+
+ datapoint = synchronous_sum_aggregation.collect(
+ AggregationTemporality.CUMULATIVE, 0
+ )
+ self.assertEqual(len(datapoint.exemplars), 4)
+
+ # Verify that exemplars are associated with the correct boundaries
+ expected_buckets = [
+ (
+ 4.0,
+ boundaries[0],
+ ), # First bucket, should hold the last value <= 5.0
+ (
+ 6.0,
+ boundaries[1],
+ ), # Second bucket, should hold the last value <= 10.0
+ (
+ 15.0,
+ boundaries[2],
+ ), # Third bucket, should hold the last value <= 20.0
+ (25.0, None), # Last bucket, should hold the value > 20.0
+ ]
+
+ for exemplar, (value, boundary) in zip(
+ datapoint.exemplars, expected_buckets
+ ):
+ self.assertEqual(exemplar.value, value)
+ if boundary is not None:
+ self.assertLessEqual(exemplar.value, boundary)
+ else:
+ self.assertGreater(exemplar.value, boundaries[-1])
diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py
new file mode 100644
index 00000000000..daca0e60618
--- /dev/null
+++ b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py
@@ -0,0 +1,58 @@
+from unittest import TestCase
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlwaysOffExemplarFilter,
+ AlwaysOnExemplarFilter,
+ TraceBasedExemplarFilter,
+)
+from opentelemetry.trace import TraceFlags
+from opentelemetry.trace.span import SpanContext
+
+
+class TestAlwaysOnExemplarFilter(TestCase):
+ def test_should_sample(self):
+ filter = AlwaysOnExemplarFilter()
+ self.assertTrue(filter.should_sample(10, 0, {}, Context()))
+
+
+class TestAlwaysOffExemplarFilter(TestCase):
+ def test_should_sample(self):
+ filter = AlwaysOffExemplarFilter()
+ self.assertFalse(filter.should_sample(10, 0, {}, Context()))
+
+
+class TestTraceBasedExemplarFilter(TestCase):
+ TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16)
+ SPAN_ID = int("6e0c63257de34c92", 16)
+
+ def test_should_not_sample_without_trace(self):
+ filter = TraceBasedExemplarFilter()
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.DEFAULT),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+ self.assertFalse(filter.should_sample(10, 0, {}, ctx))
+
+ def test_should_not_sample_with_invalid_span(self):
+ filter = TraceBasedExemplarFilter()
+ self.assertFalse(filter.should_sample(10, 0, {}, Context()))
+
+ def test_should_sample_when_trace_is_sampled(self):
+ filter = TraceBasedExemplarFilter()
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+ self.assertTrue(filter.should_sample(10, 0, {}, ctx))
diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py
new file mode 100644
index 00000000000..2c205a6ee0c
--- /dev/null
+++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py
@@ -0,0 +1,160 @@
+from time import time_ns
+from unittest import TestCase
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.sdk.metrics._internal.aggregation import (
+ _ExplicitBucketHistogramAggregation,
+ _LastValueAggregation,
+ _SumAggregation,
+)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ SimpleFixedSizeExemplarReservoir,
+)
+from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory
+from opentelemetry.trace import SpanContext, TraceFlags
+
+
+class TestSimpleFixedSizeExemplarReservoir(TestCase):
+
+ TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16)
+ SPAN_ID = int("6e0c63257de34c92", 16)
+
+ def test_no_measurements(self):
+ reservoir = SimpleFixedSizeExemplarReservoir(10)
+ self.assertEqual(len(reservoir.collect({})), 0)
+
+ def test_has_context(self):
+ reservoir = SimpleFixedSizeExemplarReservoir(1)
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+ reservoir.offer(1, time_ns(), {}, ctx)
+ exemplars = reservoir.collect({})
+ self.assertEqual(len(exemplars), 1)
+ self.assertEqual(exemplars[0].trace_id, self.TRACE_ID)
+ self.assertEqual(exemplars[0].span_id, self.SPAN_ID)
+
+ def test_filter_attributes(self):
+ reservoir = SimpleFixedSizeExemplarReservoir(1)
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+ reservoir.offer(
+ 1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx
+ )
+ exemplars = reservoir.collect({"key2": "value2"})
+ self.assertEqual(len(exemplars), 1)
+ self.assertIn("key1", exemplars[0].filtered_attributes)
+ self.assertNotIn("key2", exemplars[0].filtered_attributes)
+
+ def test_reset_after_collection(self):
+ reservoir = SimpleFixedSizeExemplarReservoir(4)
+
+ reservoir.offer(1.0, time_ns(), {"attribute": "value1"}, Context())
+ reservoir.offer(2.0, time_ns(), {"attribute": "value2"}, Context())
+ reservoir.offer(3.0, time_ns(), {"attribute": "value3"}, Context())
+
+ exemplars = reservoir.collect({})
+ self.assertEqual(len(exemplars), 3)
+
+ # Offer new measurements after reset
+ reservoir.offer(4.0, time_ns(), {"attribute": "value4"}, Context())
+ reservoir.offer(5.0, time_ns(), {"attribute": "value5"}, Context())
+
+ # Collect again and check the number of exemplars
+ new_exemplars = reservoir.collect({})
+ self.assertEqual(len(new_exemplars), 2)
+ self.assertEqual(new_exemplars[0].value, 4.0)
+ self.assertEqual(new_exemplars[1].value, 5.0)
+
+
+class TestAlignedHistogramBucketExemplarReservoir(TestCase):
+
+ TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16)
+ SPAN_ID = int("6e0c63257de34c92", 16)
+
+ def test_measurement_in_buckets(self):
+ reservoir = AlignedHistogramBucketExemplarReservoir(
+ [0, 5, 10, 25, 50, 75]
+ )
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+ reservoir.offer(80, time_ns(), {"bucket": "5"}, ctx) # outliner
+ reservoir.offer(52, time_ns(), {"bucket": "4"}, ctx)
+ reservoir.offer(7, time_ns(), {"bucket": "1"}, ctx)
+ reservoir.offer(6, time_ns(), {"bucket": "1"}, ctx)
+
+ exemplars = reservoir.collect({"bucket": "1"})
+
+ self.assertEqual(len(exemplars), 3)
+ self.assertEqual(exemplars[0].value, 6)
+ self.assertEqual(exemplars[1].value, 52)
+ self.assertEqual(exemplars[2].value, 80) # outliner
+ self.assertEqual(len(exemplars[0].filtered_attributes), 0)
+
+ def test_last_measurement_in_bucket(self):
+ reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25])
+ span_context = SpanContext(
+ trace_id=self.TRACE_ID,
+ span_id=self.SPAN_ID,
+ is_remote=False,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ trace_state={},
+ )
+ span = trace.NonRecordingSpan(span_context)
+ ctx = trace.set_span_in_context(span)
+
+ # Offer values to the reservoir
+ reservoir.offer(2, time_ns(), {"bucket": "1"}, ctx) # Bucket 1
+ reservoir.offer(7, time_ns(), {"bucket": "2"}, ctx) # Bucket 2
+ reservoir.offer(
+ 8, time_ns(), {"bucket": "2"}, ctx
+ ) # Bucket 2 - should replace the 7
+ reservoir.offer(15, time_ns(), {"bucket": "3"}, ctx) # Bucket 3
+
+ exemplars = reservoir.collect({})
+
+ # Check that each bucket has the correct value
+ self.assertEqual(len(exemplars), 3)
+ self.assertEqual(exemplars[0].value, 2)
+ self.assertEqual(exemplars[1].value, 8)
+ self.assertEqual(exemplars[2].value, 15)
+
+
+class TestExemplarReservoirFactory(TestCase):
+ def test_sum_aggregation(self):
+ exemplar_reservoir = _default_reservoir_factory(_SumAggregation)
+ self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir)
+
+ def test_last_value_aggregation(self):
+ exemplar_reservoir = _default_reservoir_factory(_LastValueAggregation)
+ self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir)
+
+ def test_explicit_histogram_aggregation(self):
+ exemplar_reservoir = _default_reservoir_factory(
+ _ExplicitBucketHistogramAggregation
+ )
+ self.assertEqual(
+ exemplar_reservoir, AlignedHistogramBucketExemplarReservoir
+ )
diff --git a/opentelemetry-sdk/tests/metrics/test_instrument.py b/opentelemetry-sdk/tests/metrics/test_instrument.py
index d4a2ddf5094..4bd10e3fe7f 100644
--- a/opentelemetry-sdk/tests/metrics/test_instrument.py
+++ b/opentelemetry-sdk/tests/metrics/test_instrument.py
@@ -15,9 +15,12 @@
# pylint: disable=no-self-use
from logging import WARNING
+
+# from time import time_ns
from unittest import TestCase
-from unittest.mock import Mock
+from unittest.mock import Mock, patch
+from opentelemetry.context import Context
from opentelemetry.metrics import Observation
from opentelemetry.metrics._internal.instrument import CallbackOptions
from opentelemetry.sdk.metrics import (
@@ -85,21 +88,23 @@ def test_disallow_direct_up_down_counter_creation(self):
TEST_ATTRIBUTES = {"foo": "bar"}
+TEST_CONTEXT = Context()
+TEST_TIMESTAMP = 1_000_000_000
def callable_callback_0(options: CallbackOptions):
return [
- Observation(1, attributes=TEST_ATTRIBUTES),
- Observation(2, attributes=TEST_ATTRIBUTES),
- Observation(3, attributes=TEST_ATTRIBUTES),
+ Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
]
def callable_callback_1(options: CallbackOptions):
return [
- Observation(4, attributes=TEST_ATTRIBUTES),
- Observation(5, attributes=TEST_ATTRIBUTES),
- Observation(6, attributes=TEST_ATTRIBUTES),
+ Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
]
@@ -107,9 +112,9 @@ def generator_callback_0():
options = yield
assert isinstance(options, CallbackOptions)
options = yield [
- Observation(1, attributes=TEST_ATTRIBUTES),
- Observation(2, attributes=TEST_ATTRIBUTES),
- Observation(3, attributes=TEST_ATTRIBUTES),
+ Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
]
assert isinstance(options, CallbackOptions)
@@ -118,13 +123,17 @@ def generator_callback_1():
options = yield
assert isinstance(options, CallbackOptions)
options = yield [
- Observation(4, attributes=TEST_ATTRIBUTES),
- Observation(5, attributes=TEST_ATTRIBUTES),
- Observation(6, attributes=TEST_ATTRIBUTES),
+ Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
+ Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT),
]
assert isinstance(options, CallbackOptions)
+@patch(
+ "opentelemetry.sdk.metrics._internal.instrument.time_ns",
+ Mock(return_value=TEST_TIMESTAMP),
+)
class TestObservableGauge(TestCase):
def testname(self):
self.assertEqual(_ObservableGauge("name", Mock(), Mock()).name, "name")
@@ -135,19 +144,30 @@ def test_callable_callback_0(self):
"name", Mock(), Mock(), [callable_callback_0]
)
- self.assertEqual(
- list(observable_gauge.callback(CallbackOptions())),
+ assert list(observable_gauge.callback(CallbackOptions())) == (
[
Measurement(
- 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 1,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 2,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 3,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
- ],
+ ]
)
def test_callable_multiple_callable_callback(self):
@@ -159,22 +179,46 @@ def test_callable_multiple_callable_callback(self):
list(observable_gauge.callback(CallbackOptions())),
[
Measurement(
- 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 1,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 2,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 3,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 4, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 4,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 5, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 5,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 6, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 6,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
],
)
@@ -188,13 +232,25 @@ def test_generator_callback_0(self):
list(observable_gauge.callback(CallbackOptions())),
[
Measurement(
- 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 1,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 2,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 3,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
],
)
@@ -211,22 +267,46 @@ def test_generator_multiple_generator_callback(self):
list(observable_gauge.callback(CallbackOptions())),
[
Measurement(
- 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 1,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 2,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 3,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 4, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 4,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 5, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 5,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
Measurement(
- 6, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
+ 6,
+ TEST_TIMESTAMP,
+ instrument=observable_gauge,
+ context=TEST_CONTEXT,
+ attributes=TEST_ATTRIBUTES,
),
],
)
@@ -237,6 +317,10 @@ def test_disallow_direct_observable_gauge_creation(self):
ObservableGauge("name", Mock(), Mock())
+@patch(
+ "opentelemetry.sdk.metrics._internal.instrument.time_ns",
+ Mock(return_value=TEST_TIMESTAMP),
+)
class TestObservableCounter(TestCase):
def test_callable_callback_0(self):
observable_counter = _ObservableCounter(
@@ -248,17 +332,23 @@ def test_callable_callback_0(self):
[
Measurement(
1,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
],
@@ -274,17 +364,23 @@ def test_generator_callback_0(self):
[
Measurement(
1,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
+ TEST_TIMESTAMP,
instrument=observable_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
],
@@ -313,6 +409,10 @@ def test_disallow_direct_counter_creation(self):
_SDKGauge("name", Mock(), Mock())
+@patch(
+ "opentelemetry.sdk.metrics._internal.instrument.time_ns",
+ Mock(return_value=TEST_TIMESTAMP),
+)
class TestObservableUpDownCounter(TestCase):
def test_callable_callback_0(self):
observable_up_down_counter = _ObservableUpDownCounter(
@@ -324,17 +424,23 @@ def test_callable_callback_0(self):
[
Measurement(
1,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
],
@@ -350,17 +456,23 @@ def test_generator_callback_0(self):
[
Measurement(
1,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
+ TEST_TIMESTAMP,
instrument=observable_up_down_counter,
+ context=TEST_CONTEXT,
attributes=TEST_ATTRIBUTES,
),
],
diff --git a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py
index 91a49955b70..bedffaaeff0 100644
--- a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py
+++ b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py
@@ -43,6 +43,7 @@ def test_creates_metric_reader_storages(self, MockMetricReaderStorage):
reader_mocks = [Mock() for _ in range(5)]
SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=reader_mocks,
views=Mock(),
@@ -59,6 +60,7 @@ def test_measurements_passed_to_each_reader_storage(
consumer = SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(should_sample=Mock(return_value=False)),
resource=Mock(),
metric_readers=reader_mocks,
views=Mock(),
@@ -69,7 +71,7 @@ def test_measurements_passed_to_each_reader_storage(
for rs_mock in reader_storage_mocks:
rs_mock.consume_measurement.assert_called_once_with(
- measurement_mock
+ measurement_mock, False
)
def test_collect_passed_to_reader_stage(self, MockMetricReaderStorage):
@@ -80,6 +82,7 @@ def test_collect_passed_to_reader_stage(self, MockMetricReaderStorage):
consumer = SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=reader_mocks,
views=Mock(),
@@ -98,6 +101,7 @@ def test_collect_calls_async_instruments(self, MockMetricReaderStorage):
MockMetricReaderStorage.return_value = reader_storage_mock
consumer = SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=[reader_mock],
views=Mock(),
@@ -125,6 +129,7 @@ def test_collect_timeout(self, MockMetricReaderStorage):
MockMetricReaderStorage.return_value = reader_storage_mock
consumer = SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=[reader_mock],
views=Mock(),
@@ -157,6 +162,7 @@ def test_collect_deadline(
MockMetricReaderStorage.return_value = reader_storage_mock
consumer = SynchronousMeasurementConsumer(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=[reader_mock],
views=Mock(),
diff --git a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py
index 2aac9874659..1785c8ec24f 100644
--- a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py
+++ b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py
@@ -15,8 +15,10 @@
# pylint: disable=protected-access,invalid-name
from logging import WARNING
+from time import time_ns
from unittest.mock import MagicMock, Mock, patch
+from opentelemetry.context import Context
from opentelemetry.sdk.metrics._internal.aggregation import (
_LastValueAggregation,
)
@@ -75,6 +77,7 @@ def test_creates_view_instrument_matches(
view2 = mock_view_matching("view_2", instrument1, instrument2)
storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(view1, view2),
@@ -89,21 +92,27 @@ def test_creates_view_instrument_matches(
# instrument1 matches view1 and view2, so should create two
# ViewInstrumentMatch objects
- storage.consume_measurement(Measurement(1, instrument1))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument1, Context())
+ )
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
2,
MockViewInstrumentMatch.mock_calls,
)
# they should only be created the first time the instrument is seen
- storage.consume_measurement(Measurement(1, instrument1))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument1, Context())
+ )
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2)
# instrument2 matches view2, so should create a single
# ViewInstrumentMatch
MockViewInstrumentMatch.call_args_list.clear()
with self.assertLogs(level=WARNING):
- storage.consume_measurement(Measurement(1, instrument2))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument2, Context())
+ )
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
@patch(
@@ -113,9 +122,15 @@ def test_creates_view_instrument_matches(
def test_forwards_calls_to_view_instrument_match(
self, MockViewInstrumentMatch: Mock
):
- view_instrument_match1 = Mock(_aggregation=_LastValueAggregation({}))
- view_instrument_match2 = Mock(_aggregation=_LastValueAggregation({}))
- view_instrument_match3 = Mock(_aggregation=_LastValueAggregation({}))
+ view_instrument_match1 = Mock(
+ _aggregation=_LastValueAggregation({}, Mock())
+ )
+ view_instrument_match2 = Mock(
+ _aggregation=_LastValueAggregation({}, Mock())
+ )
+ view_instrument_match3 = Mock(
+ _aggregation=_LastValueAggregation({}, Mock())
+ )
MockViewInstrumentMatch.side_effect = [
view_instrument_match1,
view_instrument_match2,
@@ -129,6 +144,7 @@ def test_forwards_calls_to_view_instrument_match(
storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(view1, view2),
@@ -143,21 +159,21 @@ def test_forwards_calls_to_view_instrument_match(
# Measurements from an instrument should be passed on to each
# ViewInstrumentMatch objects created for that instrument
- measurement = Measurement(1, instrument1)
+ measurement = Measurement(1, time_ns(), instrument1, Context())
storage.consume_measurement(measurement)
view_instrument_match1.consume_measurement.assert_called_once_with(
- measurement
+ measurement, True
)
view_instrument_match2.consume_measurement.assert_called_once_with(
- measurement
+ measurement, True
)
view_instrument_match3.consume_measurement.assert_not_called()
- measurement = Measurement(1, instrument2)
+ measurement = Measurement(1, time_ns(), instrument2, Context())
with self.assertLogs(level=WARNING):
storage.consume_measurement(measurement)
view_instrument_match3.consume_measurement.assert_called_once_with(
- measurement
+ measurement, True
)
# collect() should call collect on all of its _ViewInstrumentMatch
@@ -238,6 +254,7 @@ def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock):
view1 = mock_view_matching(instrument1)
storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(view1,),
@@ -251,7 +268,9 @@ def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock):
)
def send_measurement():
- storage.consume_measurement(Measurement(1, instrument1))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument1, Context())
+ )
# race sending many measurements concurrently
self.run_with_many_threads(send_measurement)
@@ -270,6 +289,7 @@ def test_default_view_enabled(self, MockViewInstrumentMatch: Mock):
storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(),
@@ -282,17 +302,23 @@ def test_default_view_enabled(self, MockViewInstrumentMatch: Mock):
MagicMock(**{"__getitem__.return_value": DefaultAggregation()}),
)
- storage.consume_measurement(Measurement(1, instrument1))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument1, Context())
+ )
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
1,
MockViewInstrumentMatch.mock_calls,
)
- storage.consume_measurement(Measurement(1, instrument1))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument1, Context())
+ )
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
MockViewInstrumentMatch.call_args_list.clear()
- storage.consume_measurement(Measurement(1, instrument2))
+ storage.consume_measurement(
+ Measurement(1, time_ns(), instrument2, Context())
+ )
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
def test_drop_aggregation(self):
@@ -300,6 +326,7 @@ def test_drop_aggregation(self):
counter = _Counter("name", Mock(), Mock())
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -315,7 +342,9 @@ def test_drop_aggregation(self):
),
MagicMock(**{"__getitem__.return_value": DefaultAggregation()}),
)
- metric_reader_storage.consume_measurement(Measurement(1, counter))
+ metric_reader_storage.consume_measurement(
+ Measurement(1, time_ns(), counter, Context())
+ )
self.assertIsNone(metric_reader_storage.collect())
@@ -326,6 +355,7 @@ def test_same_collection_start(self):
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(View(instrument_name="name"),),
@@ -338,9 +368,11 @@ def test_same_collection_start(self):
MagicMock(**{"__getitem__.return_value": DefaultAggregation()}),
)
- metric_reader_storage.consume_measurement(Measurement(1, counter))
metric_reader_storage.consume_measurement(
- Measurement(1, up_down_counter)
+ Measurement(1, time_ns(), counter, Context())
+ )
+ metric_reader_storage.consume_measurement(
+ Measurement(1, time_ns(), up_down_counter, Context())
)
actual = metric_reader_storage.collect()
@@ -371,6 +403,7 @@ def test_conflicting_view_configuration(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -390,7 +423,7 @@ def test_conflicting_view_configuration(self):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter)
+ Measurement(1, time_ns(), observable_counter, Context())
)
self.assertIs(
@@ -419,6 +452,7 @@ def test_view_instrument_match_conflict_0(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -437,12 +471,12 @@ def test_view_instrument_match_conflict_0(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_0)
+ Measurement(1, time_ns(), observable_counter_0, Context())
)
with self.assertLogs(level=WARNING) as log:
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_1)
+ Measurement(1, time_ns(), observable_counter_1, Context())
)
self.assertIn(
@@ -476,6 +510,7 @@ def test_view_instrument_match_conflict_1(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -494,12 +529,14 @@ def test_view_instrument_match_conflict_1(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_foo)
+ Measurement(
+ 1, time_ns(), observable_counter_foo, Context()
+ )
)
with self.assertLogs(level=WARNING) as log:
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_bar)
+ Measurement(1, time_ns(), observable_counter_bar, Context())
)
self.assertIn(
@@ -509,7 +546,7 @@ def test_view_instrument_match_conflict_1(self):
with self.assertLogs(level=WARNING) as log:
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_baz)
+ Measurement(1, time_ns(), observable_counter_baz, Context())
)
self.assertIn(
@@ -544,6 +581,7 @@ def test_view_instrument_match_conflict_2(self):
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -562,13 +600,17 @@ def test_view_instrument_match_conflict_2(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_foo)
+ Measurement(
+ 1, time_ns(), observable_counter_foo, Context()
+ )
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_bar)
+ Measurement(
+ 1, time_ns(), observable_counter_bar, Context()
+ )
)
def test_view_instrument_match_conflict_3(self):
@@ -592,6 +634,7 @@ def test_view_instrument_match_conflict_3(self):
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -610,13 +653,15 @@ def test_view_instrument_match_conflict_3(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, counter_bar)
+ Measurement(1, time_ns(), counter_bar, Context())
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_baz)
+ Measurement(
+ 1, time_ns(), observable_counter_baz, Context()
+ )
)
def test_view_instrument_match_conflict_4(self):
@@ -640,6 +685,7 @@ def test_view_instrument_match_conflict_4(self):
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -658,13 +704,13 @@ def test_view_instrument_match_conflict_4(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, counter_bar)
+ Measurement(1, time_ns(), counter_bar, Context())
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, up_down_counter_baz)
+ Measurement(1, time_ns(), up_down_counter_baz, Context())
)
def test_view_instrument_match_conflict_5(self):
@@ -686,6 +732,7 @@ def test_view_instrument_match_conflict_5(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -704,13 +751,13 @@ def test_view_instrument_match_conflict_5(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_0)
+ Measurement(1, time_ns(), observable_counter_0, Context())
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_1)
+ Measurement(1, time_ns(), observable_counter_1, Context())
)
def test_view_instrument_match_conflict_6(self):
@@ -740,6 +787,7 @@ def test_view_instrument_match_conflict_6(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -759,19 +807,19 @@ def test_view_instrument_match_conflict_6(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter)
+ Measurement(1, time_ns(), observable_counter, Context())
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, histogram)
+ Measurement(1, time_ns(), histogram, Context())
)
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, gauge)
+ Measurement(1, time_ns(), gauge, Context())
)
def test_view_instrument_match_conflict_7(self):
@@ -794,6 +842,7 @@ def test_view_instrument_match_conflict_7(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -812,12 +861,12 @@ def test_view_instrument_match_conflict_7(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_0)
+ Measurement(1, time_ns(), observable_counter_0, Context())
)
with self.assertLogs(level=WARNING) as log:
metric_reader_storage.consume_measurement(
- Measurement(1, observable_counter_1)
+ Measurement(1, time_ns(), observable_counter_1, Context())
)
self.assertIn(
@@ -848,6 +897,7 @@ def test_view_instrument_match_conflict_8(self):
)
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
+ exemplar_filter=Mock(),
resource=Mock(),
metric_readers=(),
views=(
@@ -870,12 +920,12 @@ def test_view_instrument_match_conflict_8(self):
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
metric_reader_storage.consume_measurement(
- Measurement(1, up_down_counter)
+ Measurement(1, time_ns(), up_down_counter, Context())
)
with self.assertLogs(level=WARNING) as log:
metric_reader_storage.consume_measurement(
- Measurement(1, histogram)
+ Measurement(1, time_ns(), histogram, Context())
)
self.assertIn(
diff --git a/opentelemetry-sdk/tests/metrics/test_point.py b/opentelemetry-sdk/tests/metrics/test_point.py
index 846f2c2fc9f..e773f3187f5 100644
--- a/opentelemetry-sdk/tests/metrics/test_point.py
+++ b/opentelemetry-sdk/tests/metrics/test_point.py
@@ -67,7 +67,7 @@ def setUpClass(cls):
time_unix_nano=2,
value=3.3,
)
- cls.number_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "value": 3.3}}'
+ cls.number_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "value": 3.3, "exemplars": []}}'
cls.number_data_point_1 = NumberDataPoint(
attributes=cls.attributes_1,
@@ -75,7 +75,7 @@ def setUpClass(cls):
time_unix_nano=3,
value=4.4,
)
- cls.number_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "value": 4.4}}'
+ cls.number_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "value": 4.4, "exemplars": []}}'
cls.histogram_data_point_0 = HistogramDataPoint(
attributes=cls.attributes_0,
@@ -88,7 +88,7 @@ def setUpClass(cls):
min=0.2,
max=3.3,
)
- cls.histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 3, "sum": 3.3, "bucket_counts": [1, 1, 1], "explicit_bounds": [0.1, 1.2, 2.3, 3.4], "min": 0.2, "max": 3.3}}'
+ cls.histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 3, "sum": 3.3, "bucket_counts": [1, 1, 1], "explicit_bounds": [0.1, 1.2, 2.3, 3.4], "min": 0.2, "max": 3.3, "exemplars": []}}'
cls.histogram_data_point_1 = HistogramDataPoint(
attributes=cls.attributes_1,
@@ -101,7 +101,7 @@ def setUpClass(cls):
min=0.3,
max=4.4,
)
- cls.histogram_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "count": 4, "sum": 4.4, "bucket_counts": [2, 1, 1], "explicit_bounds": [1.2, 2.3, 3.4, 4.5], "min": 0.3, "max": 4.4}}'
+ cls.histogram_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "count": 4, "sum": 4.4, "bucket_counts": [2, 1, 1], "explicit_bounds": [1.2, 2.3, 3.4, 4.5], "min": 0.3, "max": 4.4, "exemplars": []}}'
cls.exp_histogram_data_point_0 = ExponentialHistogramDataPoint(
attributes=cls.attributes_0,
@@ -117,7 +117,7 @@ def setUpClass(cls):
min=10,
max=10,
)
- cls.exp_histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 1, "sum": 10, "scale": 1, "zero_count": 0, "positive": {{"offset": 0, "bucket_counts": [1]}}, "negative": {{"offset": 0, "bucket_counts": [0]}}, "flags": 0, "min": 10, "max": 10}}'
+ cls.exp_histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 1, "sum": 10, "scale": 1, "zero_count": 0, "positive": {{"offset": 0, "bucket_counts": [1]}}, "negative": {{"offset": 0, "bucket_counts": [0]}}, "flags": 0, "min": 10, "max": 10, "exemplars": []}}'
cls.sum_0 = Sum(
data_points=[cls.number_data_point_0, cls.number_data_point_1],
diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py
index b49a508eaf1..197c58f75be 100644
--- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py
+++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py
@@ -15,21 +15,31 @@
# pylint: disable=protected-access
from time import time_ns
+from typing import Callable, Sequence, Type
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
+from opentelemetry.context import Context
from opentelemetry.sdk.metrics._internal._view_instrument_match import (
_ViewInstrumentMatch,
)
from opentelemetry.sdk.metrics._internal.aggregation import (
+ _Aggregation,
_DropAggregation,
+ _ExplicitBucketHistogramAggregation,
_LastValueAggregation,
)
-from opentelemetry.sdk.metrics._internal.instrument import _Counter
+from opentelemetry.sdk.metrics._internal.exemplar import (
+ AlignedHistogramBucketExemplarReservoir,
+ ExemplarReservoirBuilder,
+ SimpleFixedSizeExemplarReservoir,
+)
+from opentelemetry.sdk.metrics._internal.instrument import _Counter, _Histogram
from opentelemetry.sdk.metrics._internal.measurement import Measurement
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
SdkConfiguration,
)
+from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory
from opentelemetry.sdk.metrics.export import AggregationTemporality
from opentelemetry.sdk.metrics.view import (
DefaultAggregation,
@@ -39,10 +49,29 @@
)
+def generalized_reservoir_factory(
+ size: int = 1, boundaries: Sequence[float] = None
+) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]:
+ def factory(
+ aggregation_type: Type[_Aggregation],
+ ) -> ExemplarReservoirBuilder:
+ if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation):
+ return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(
+ boundaries=boundaries or [],
+ **{k: v for k, v in kwargs.items() if k != "boundaries"},
+ )
+
+ return lambda **kwargs: SimpleFixedSizeExemplarReservoir(
+ size=size,
+ **{k: v for k, v in kwargs.items() if k != "size"},
+ )
+
+ return factory
+
+
class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name
@classmethod
def setUpClass(cls):
-
cls.mock_aggregation_factory = Mock()
cls.mock_created_aggregation = (
cls.mock_aggregation_factory._create_aggregation()
@@ -50,6 +79,7 @@ def setUpClass(cls):
cls.mock_resource = Mock()
cls.mock_instrumentation_scope = Mock()
cls.sdk_configuration = SdkConfiguration(
+ exemplar_filter=Mock(),
resource=cls.mock_resource,
metric_readers=[],
views=[],
@@ -74,7 +104,9 @@ def test_consume_measurement(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=instrument1,
+ context=Context(),
attributes={"c": "d", "f": "g"},
)
)
@@ -86,7 +118,9 @@ def test_consume_measurement(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=instrument1,
+ context=Context(),
attributes={"w": "x", "y": "z"},
)
)
@@ -115,7 +149,9 @@ def test_consume_measurement(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=instrument1,
+ context=Context(),
attributes={"c": "d", "f": "g"},
)
)
@@ -143,7 +179,13 @@ def test_consume_measurement(self):
),
)
view_instrument_match.consume_measurement(
- Measurement(value=0, instrument=instrument1, attributes=None)
+ Measurement(
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes=None,
+ )
)
self.assertEqual(
view_instrument_match._attributes_aggregation,
@@ -167,7 +209,13 @@ def test_consume_measurement(self):
),
)
view_instrument_match.consume_measurement(
- Measurement(value=0, instrument=instrument1, attributes=None)
+ Measurement(
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes=None,
+ )
)
self.assertIsInstance(
view_instrument_match._attributes_aggregation[frozenset({})],
@@ -199,7 +247,9 @@ def test_collect(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"c": "d", "f": "g"},
)
)
@@ -239,11 +289,18 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns):
# +1 call to _create_aggregation
view_instrument_match.consume_measurement(
Measurement(
- value=0, instrument=instrument, attributes={"foo": "bar0"}
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument,
+ attributes={"foo": "bar0"},
+ context=Context(),
)
)
view_instrument_match._view._aggregation._create_aggregation.assert_called_with(
- instrument, {"foo": "bar0"}, start_time_unix_nano
+ instrument,
+ {"foo": "bar0"},
+ _default_reservoir_factory,
+ start_time_unix_nano,
)
collection_start_time_unix_nano = time_ns()
collected_data_points = view_instrument_match.collect(
@@ -255,11 +312,15 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns):
# +1 call to _create_aggregation
view_instrument_match.consume_measurement(
Measurement(
- value=0, instrument=instrument, attributes={"foo": "bar1"}
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument,
+ attributes={"foo": "bar1"},
+ context=Context(),
)
)
view_instrument_match._view._aggregation._create_aggregation.assert_called_with(
- instrument, {"foo": "bar1"}, 1
+ instrument, {"foo": "bar1"}, _default_reservoir_factory, 1
)
collection_start_time_unix_nano = time_ns()
collected_data_points = view_instrument_match.collect(
@@ -273,21 +334,33 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns):
# +1 call to create_aggregation
view_instrument_match.consume_measurement(
Measurement(
- value=0, instrument=instrument, attributes={"foo": "bar"}
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument,
+ attributes={"foo": "bar"},
+ context=Context(),
)
)
view_instrument_match._view._aggregation._create_aggregation.assert_called_with(
- instrument, {"foo": "bar"}, 2
+ instrument, {"foo": "bar"}, _default_reservoir_factory, 2
)
# No new calls to _create_aggregation because attributes remain the same
view_instrument_match.consume_measurement(
Measurement(
- value=0, instrument=instrument, attributes={"foo": "bar"}
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument,
+ attributes={"foo": "bar"},
+ context=Context(),
)
)
view_instrument_match.consume_measurement(
Measurement(
- value=0, instrument=instrument, attributes={"foo": "bar"}
+ value=0,
+ time_unix_nano=time_ns(),
+ instrument=instrument,
+ attributes={"foo": "bar"},
+ context=Context(),
)
)
# In total we have 5 calls for _create_aggregation
@@ -338,28 +411,36 @@ def test_data_point_check(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"c": "d", "f": "g"},
)
)
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"h": "i", "j": "k"},
)
)
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"l": "m", "n": "o"},
)
)
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"p": "q", "r": "s"},
)
)
@@ -393,7 +474,9 @@ def test_setting_aggregation(self):
view_instrument_match.consume_measurement(
Measurement(
value=0,
+ time_unix_nano=time_ns(),
instrument=Mock(name="instrument1"),
+ context=Context(),
attributes={"c": "d", "f": "g"},
)
)
@@ -404,3 +487,269 @@ def test_setting_aggregation(self):
],
_LastValueAggregation,
)
+
+
+class TestSimpleFixedSizeExemplarReservoir(TestCase):
+ def test_consume_measurement_with_custom_reservoir_factory(self):
+ simple_fixed_size_factory = generalized_reservoir_factory(size=10)
+
+ # Create an instance of _Counter
+ instrument1 = _Counter(
+ name="instrument1",
+ instrumentation_scope=None,
+ measurement_consumer=None,
+ description="description",
+ unit="unit",
+ )
+
+ view_instrument_match = _ViewInstrumentMatch(
+ view=View(
+ instrument_name="instrument1",
+ name="name",
+ aggregation=DefaultAggregation(),
+ exemplar_reservoir_factory=simple_fixed_size_factory,
+ ),
+ instrument=instrument1,
+ instrument_class_aggregation={_Counter: DefaultAggregation()},
+ )
+
+ # Consume measurements with the same attributes to ensure aggregation
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=2.0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute1": "value1"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=4.0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=5.0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ data_points = list(
+ view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0)
+ )
+
+ # Ensure only one data point is collected
+ self.assertEqual(len(data_points), 2)
+
+ # Verify that exemplars have been correctly stored and collected
+ self.assertEqual(len(data_points[0].exemplars), 1)
+ self.assertEqual(len(data_points[1].exemplars), 2)
+
+ self.assertEqual(data_points[0].exemplars[0].value, 2.0)
+ self.assertEqual(data_points[1].exemplars[0].value, 4.0)
+ self.assertEqual(data_points[1].exemplars[1].value, 5.0)
+
+ def test_consume_measurement_with_exemplars(self):
+ # Create an instance of _Counter
+ instrument1 = _Counter(
+ name="instrument1",
+ instrumentation_scope=None, # No mock, set to None or actual scope if available
+ measurement_consumer=None, # No mock, set to None or actual consumer if available
+ description="description",
+ unit="unit",
+ )
+
+ view_instrument_match = _ViewInstrumentMatch(
+ view=View(
+ instrument_name="instrument1",
+ name="name",
+ aggregation=DefaultAggregation(),
+ ),
+ instrument=instrument1,
+ instrument_class_aggregation={_Counter: DefaultAggregation()},
+ )
+
+ # Consume measurements with the same attributes to ensure aggregation
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=4.0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=5.0,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ # Collect the data points
+ data_points = list(
+ view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0)
+ )
+
+ # Ensure only one data point is collected
+ self.assertEqual(len(data_points), 1)
+
+ # Verify that exemplars have been correctly stored and collected
+ # As the default reservoir as only one bucket, it will retain
+ # either one of the measurements based on random selection
+ self.assertEqual(len(data_points[0].exemplars), 1)
+
+ self.assertIn(data_points[0].exemplars[0].value, [4.0, 5.0])
+
+ def test_consume_measurement_with_exemplars_and_view_attributes_filter(
+ self,
+ ):
+ value = 22
+ # Create an instance of _Counter
+ instrument1 = _Counter(
+ name="instrument1",
+ instrumentation_scope=None, # No mock, set to None or actual scope if available
+ measurement_consumer=None, # No mock, set to None or actual consumer if available
+ )
+
+ view_instrument_match = _ViewInstrumentMatch(
+ view=View(
+ instrument_name="instrument1",
+ name="name",
+ attribute_keys={"X", "Y"},
+ ),
+ instrument=instrument1,
+ instrument_class_aggregation={_Counter: DefaultAggregation()},
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=value,
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"X": "x-value", "Y": "y-value", "Z": "z-value"},
+ )
+ )
+
+ # Collect the data points
+ data_points = list(
+ view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0)
+ )
+
+ # Ensure only one data point is collected
+ self.assertEqual(len(data_points), 1)
+
+ # Verify that exemplars have been correctly stored and collected
+ self.assertEqual(len(data_points[0].exemplars), 1)
+
+ # Check the exemplar has the dropped attribute
+ exemplar = list(data_points[0].exemplars)[0]
+ self.assertEqual(exemplar.value, value)
+ self.assertDictEqual(exemplar.filtered_attributes, {"Z": "z-value"})
+
+
+class TestAlignedHistogramBucketExemplarReservoir(TestCase):
+ def test_consume_measurement_with_custom_reservoir_factory(self):
+ # Custom factory for AlignedHistogramBucketExemplarReservoir with specific boundaries
+ histogram_reservoir_factory = generalized_reservoir_factory(
+ boundaries=[0, 5, 10, 25]
+ )
+
+ # Create an instance of _Histogram
+ instrument1 = _Histogram(
+ name="instrument1",
+ instrumentation_scope=None,
+ measurement_consumer=None,
+ description="description",
+ unit="unit",
+ )
+
+ view_instrument_match = _ViewInstrumentMatch(
+ view=View(
+ instrument_name="instrument1",
+ name="name",
+ aggregation=DefaultAggregation(),
+ exemplar_reservoir_factory=histogram_reservoir_factory,
+ ),
+ instrument=instrument1,
+ instrument_class_aggregation={_Histogram: DefaultAggregation()},
+ )
+
+ # Consume measurements with different values to ensure they are placed in the correct buckets
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=2.0, # Should go into the first bucket (0 to 5)
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute1": "value1"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=7.0, # Should go into the second bucket (5 to 10)
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=8.0, # Should go into the second bucket (5 to 10)
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute2": "value2"},
+ )
+ )
+
+ view_instrument_match.consume_measurement(
+ Measurement(
+ value=15.0, # Should go into the third bucket (10 to 25)
+ time_unix_nano=time_ns(),
+ instrument=instrument1,
+ context=Context(),
+ attributes={"attribute3": "value3"},
+ )
+ )
+
+ # Collect the data points
+ data_points = list(
+ view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0)
+ )
+
+ # Ensure three data points are collected, one for each bucket
+ self.assertEqual(len(data_points), 3)
+
+ # Verify that exemplars have been correctly stored and collected in their respective buckets
+ self.assertEqual(len(data_points[0].exemplars), 1)
+ self.assertEqual(len(data_points[1].exemplars), 1)
+ self.assertEqual(len(data_points[2].exemplars), 1)
+
+ self.assertEqual(
+ data_points[0].exemplars[0].value, 2.0
+ ) # First bucket
+ self.assertEqual(
+ data_points[1].exemplars[0].value, 8.0
+ ) # Second bucket
+ self.assertEqual(
+ data_points[2].exemplars[0].value, 15.0
+ ) # Third bucket