From 9935e633680ebc1f1ecec4e2439df3f660ba1cbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 30 Jul 2024 17:26:31 +0200 Subject: [PATCH 01/48] Add based classes --- .../metrics/_internal/exemplar/__init__.py | 35 +++ .../metrics/_internal/exemplar/exemplar.py | 45 ++++ .../_internal/exemplar/exemplar_filter.py | 132 ++++++++++ .../_internal/exemplar/exemplar_reservoir.py | 237 ++++++++++++++++++ 4 files changed, 449 insertions(+) create mode 100644 opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py create mode 100644 opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py create mode 100644 opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py create mode 100644 opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py new file mode 100644 index 0000000000..a83b5b82b8 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .exemplar import Exemplar +from .exemplar_filter import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + TraceBasedExemplarFilter, +) +from .exemplar_reservoir import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + SimpleFixedSizeExemplarReservoir, +) + +__all__ = [ + "Exemplar", + "AlwaysOffExemplarFilter", + "AlwaysOnExemplarFilter", + "TraceBasedExemplarFilter", + "AlignedHistogramBucketExemplarReservoir", + "ExemplarReservoir", + "SimpleFixedSizeExemplarReservoir", +] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py new file mode 100644 index 0000000000..a047a01fb8 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -0,0 +1,45 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +from typing import Optional, Union + +from opentelemetry.util.types import Attributes + + +@dataclasses.dataclass(frozen=True) +class Exemplar: + """A representation of an exemplar, which is a sample input measurement. + + Exemplars also hold information about the environment when the measurement + was recorded, for example the span and trace ID of the active span when the + exemplar was recorded. + + Attributes: + trace_id: (optional) The trace associated with a recording + span_id: (optional) The span associated with a recording + time_unix_nano: The time of the observation + value: The recorded value + filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made. + + References: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar + """ + filtered_attributes: Attributes + value: Union[int, float] + time_unix_nano: int + span_id: Optional[str] = None + trace_id: Optional[str] = None + diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py new file mode 100644 index 0000000000..cc2b25d9ce --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py @@ -0,0 +1,132 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from typing import Union + +from opentelemetry import trace +from opentelemetry.context import Context +from opentelemetry.trace.span import INVALID_SPAN +from opentelemetry.util.types import Attributes + + +class ExemplarFilter(ABC): + """``ExemplarFilter`` determines which measurements are eligible for becoming an + ``Exemplar``. + + Exemplar filters are used to filter measurements before attempting to store them + in a reservoir. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter + """ + + @abstractmethod + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Attributes: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + ctx: The Context of the measurement + """ + raise NotImplementedError("ExemplarFilter.should_sample is not implemented") + + +class AlwaysOnExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes all measurements eligible for being an Exemplar. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Attributes: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + ctx: The Context of the measurement + """ + return True + + +class AlwaysOffExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes no measurements eligible for being an Exemplar. + + Using this ExemplarFilter is as good as disabling Exemplar feature. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Attributes: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + ctx: The Context of the measurement + """ + return False + + +class TraceBasedExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes those measurements eligible for being an Exemplar, + which are recorded in the context of a sampled parent span. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Attributes: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + ctx: The Context of the measurement + """ + span = trace.get_current_span(ctx) + if span == INVALID_SPAN: + return False + return span.get_span_context().trace_flags.sampled diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py new file mode 100644 index 0000000000..7df4ed2b56 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -0,0 +1,237 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from random import randrange +from typing import Optional, Sequence, Union + +from opentelemetry import trace +from opentelemetry.context import Context +from opentelemetry.trace.span import INVALID_SPAN +from opentelemetry.util.types import Attributes + +from .exemplar import Exemplar + + +class ExemplarReservoir(ABC): + """ExemplarReservoir provide a method to offer measurements to the reservoir + and another to collect accumulated Exemplars. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir + """ + + @abstractmethod + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> None: + """Offers a measurement to be sampled.""" + raise NotImplementedError("ExemplarReservoir.offer is not implemented") + + @abstractmethod + def collect(self, point_attributes: Attributes) -> list[Exemplar]: + """Returns accumulated Exemplars and also resets the reservoir for the next + sampling period + + Args: + point_attributes The attributes associated with metric point. + + Returns: + a list of :class:`opentelemetry.sdk.metrics.exemplar.Exemplar`s. Returned + exemplars contain the attributes that were filtered out by the aggregator, + but recorded alongside the original measurement. + """ + raise NotImplementedError("ExemplarReservoir.collect is not implemented") + + +class ExemplarBucket: + def __init__(self) -> None: + self.__value: Union[int, float] = 0 + self.__attributes: Attributes = {} + self.__time_unix_nano: int = 0 + self.__span_id: Optional[str] = None + self.__trace_id: Optional[str] = None + self.__offered: bool = False + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> None: + """Offers a measurement to be sampled.""" + self.__value = value + self.__time_unix_nano = time_unix_nano + self.__attributes = attributes + span = trace.get_current_span(ctx) + if span != INVALID_SPAN: + span_context = span.get_span_context() + self.__span_id = span_context.span_id + self.__trace_id = span_context.trace_id + + self.__offered = True + + def collect(self, point_attributes: Attributes) -> Exemplar | None: + """May return an Exemplar and resets the bucket for the next sampling period.""" + if not self.__offered: + return None + + current_attributes = { + k: v for k, v in self.__attributes.items() if k not in point_attributes + } + + exemplar = Exemplar( + current_attributes, + self.__value, + self.__time_unix_nano, + self.__span_id, + self.__trace_id, + ) + self.__reset() + return exemplar + + def __reset(self) -> None: + self.__value = 0 + self.__attributes = {} + self.__time_unix_nano = 0 + self.__span_id = None + self.__trace_id = None + self.__offered = False + + +class FixedSizeExemplarReservoirABC(ExemplarReservoir): + """Abstract class for a reservoir with fixed size.""" + + def __init__(self, size: int) -> None: + super().__init__() + self._size: int = size + self._reservoir_storage: list[ExemplarBucket] = [ + ExemplarBucket() for _ in range(self._size) + ] + + def maxSize(self) -> int: + """Reservoir maximal size""" + return self._size + + def collect(self, point_attributes: Attributes) -> list[Exemplar]: + """Returns accumulated Exemplars and also resets the reservoir for the next + sampling period + + Args: + point_attributes The attributes associated with metric point. + + Returns: + a list of :class:`opentelemetry.sdk.metrics.exemplar.Exemplar`s. Returned + exemplars contain the attributes that were filtered out by the aggregator, + but recorded alongside the original measurement. + """ + exemplars = filter( + lambda e: e is not None, + map( + lambda bucket: bucket.collect(point_attributes), + self._reservoir_storage, + ), + ) + self._reset() + return [exemplars] + + def _reset(self) -> None: + """Reset the reservoir.""" + pass + + +class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): + """This reservoir uses an uniformly-weighted sampling algorithm based on the number + of samples the reservoir has seen so far to determine if the offered measurements + should be sampled. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + """ + + def __init__(self, size: int = 1) -> None: + super().__init__(size) + self._measurements_seen: int = 0 + + def _reset(self) -> None: + super()._reset() + self._measurements_seen = 0 + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> None: + """Offers a measurement to be sampled.""" + index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) + if index != -1: + self._reservoir_storage[index].offer(value, time_unix_nano, attributes, ctx) + + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> int: + self._measurements_seen += 1 + if self._measurements_seen < self._size: + return self._measurements_seen + + index = randrange(0, self._measurements_seen) + return index if index < self._size else -1 + + +class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC): + """This Exemplar reservoir takes a configuration parameter that is the + configuration of a Histogram. This implementation keeps the last seen measurement + that falls within a histogram bucket. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir + """ + + def __init__(self, boundaries: Sequence[float]) -> None: + super().__init__(len(boundaries) + 1) + self._boundaries: Sequence[float] = boundaries + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> None: + """Offers a measurement to be sampled.""" + index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) + self._reservoir_storage[index].offer(value, time_unix_nano, attributes, ctx) + + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> int: + for i, boundary in enumerate(self._boundaries): + if value <= boundary: + return i + return len(self._boundaries) From 5aa8353adef8cbeefd61c3a056bf90c28f628fcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 31 Jul 2024 13:40:58 +0200 Subject: [PATCH 02/48] Add exemplar to datapoint --- .../src/opentelemetry/sdk/metrics/_internal/point.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py index 42420b9008..66cf7f231f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py @@ -14,12 +14,13 @@ # pylint: disable=unused-import -from dataclasses import asdict, dataclass +from dataclasses import asdict, dataclass, field from json import dumps, loads from typing import Optional, Sequence, Union # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics._internal +from opentelemetry.sdk.metrics._internal.exemplar import Exemplar from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.util.types import Attributes @@ -35,6 +36,7 @@ class NumberDataPoint: start_time_unix_nano: int time_unix_nano: int value: Union[int, float] + exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent=4) -> str: return dumps(asdict(self), indent=indent) @@ -55,6 +57,7 @@ class HistogramDataPoint: explicit_bounds: Sequence[float] min: float max: float + exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent=4) -> str: return dumps(asdict(self), indent=indent) @@ -85,6 +88,7 @@ class ExponentialHistogramDataPoint: flags: int min: float max: float + exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent=4) -> str: return dumps(asdict(self), indent=indent) From d699f8d91c7ef3d3c1647aefd795cba83ef66b5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Sun, 11 Aug 2024 10:41:27 +0200 Subject: [PATCH 03/48] Add time to Measurement --- .../sdk/metrics/_internal/instrument.py | 49 +++++++------------ .../sdk/metrics/_internal/measurement.py | 10 ++++ 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index 2b02e67fc3..8901b2939b 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -15,6 +15,7 @@ # pylint: disable=too-many-ancestors, unused-import from logging import getLogger +from time import time_ns from typing import Dict, Generator, Iterable, List, Optional, Union # This kind of import is needed to avoid Sphinx errors. @@ -36,9 +37,7 @@ _logger = getLogger(__name__) -_ERROR_MESSAGE = ( - "Expected ASCII string of maximum length 63 characters but got {}" -) +_ERROR_MESSAGE = "Expected ASCII string of maximum length 63 characters but got {}" class _Synchronous: @@ -108,11 +107,8 @@ def __init__( self._callbacks: List[CallbackT] = [] if callbacks is not None: - for callback in callbacks: - if isinstance(callback, Generator): - # advance generator to it's first yield next(callback) @@ -129,21 +125,18 @@ def inner( else: self._callbacks.append(callback) - def callback( - self, callback_options: CallbackOptions - ) -> Iterable[Measurement]: + def callback(self, callback_options: CallbackOptions) -> Iterable[Measurement]: for callback in self._callbacks: try: for api_measurement in callback(callback_options): yield Measurement( api_measurement.value, + time_unix_nano=time_ns(), instrument=self, attributes=api_measurement.attributes, ) except Exception: # pylint: disable=broad-exception-caught - _logger.exception( - "Callback failed for instrument %s.", self.name - ) + _logger.exception("Callback failed for instrument %s.", self.name) class Counter(_Synchronous, APICounter): @@ -152,16 +145,13 @@ def __new__(cls, *args, **kwargs): raise TypeError("Counter must be instantiated via a meter.") return super().__new__(cls) - def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None - ): + def add(self, amount: Union[int, float], attributes: Dict[str, str] = None): if amount < 0: - _logger.warning( - "Add amount must be non-negative on Counter %s.", self.name - ) + _logger.warning("Add amount must be non-negative on Counter %s.", self.name) return + time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, self, attributes) + Measurement(amount, time_unix_nano, self, attributes) ) @@ -174,26 +164,23 @@ def __new__(cls, *args, **kwargs): def add( self, amount: Union[int, float], attributes: Dict[str, str] = None ): + time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, self, attributes) + Measurement(amount, time_unix_nano, self, attributes) ) class ObservableCounter(_Asynchronous, APIObservableCounter): def __new__(cls, *args, **kwargs): if cls is ObservableCounter: - raise TypeError( - "ObservableCounter must be instantiated via a meter." - ) + raise TypeError("ObservableCounter must be instantiated via a meter.") return super().__new__(cls) class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter): def __new__(cls, *args, **kwargs): if cls is ObservableUpDownCounter: - raise TypeError( - "ObservableUpDownCounter must be instantiated via a meter." - ) + raise TypeError("ObservableUpDownCounter must be instantiated via a meter.") return super().__new__(cls) @@ -203,17 +190,16 @@ def __new__(cls, *args, **kwargs): raise TypeError("Histogram must be instantiated via a meter.") return super().__new__(cls) - def record( - self, amount: Union[int, float], attributes: Dict[str, str] = None - ): + def record(self, amount: Union[int, float], attributes: Dict[str, str] = None): if amount < 0: _logger.warning( "Record amount must be non-negative on Histogram %s.", self.name, ) return + time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, self, attributes) + Measurement(amount, time_unix_nano, self, attributes) ) @@ -226,8 +212,9 @@ def __new__(cls, *args, **kwargs): def set( self, amount: Union[int, float], attributes: Dict[str, str] = None ): + time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, self, attributes) + Measurement(amount, time_unix_nano, self, attributes) ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py index 0dced5bcd3..01c0a93e51 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py @@ -15,6 +15,7 @@ from dataclasses import dataclass from typing import Union +from opentelemetry.context import Context from opentelemetry.metrics import Instrument from opentelemetry.util.types import Attributes @@ -23,8 +24,17 @@ class Measurement: """ Represents a data point reported via the metrics API to the SDK. + + Attributes: + value: Measured value + time_unix_nano: The time the API call was made to record the Measurement + instrument: Measurement instrument + context: The active Context of the Measurement at API call time. + attributes: Measurement attributes """ value: Union[int, float] + time_unix_nano: int instrument: Instrument + context: Context attributes: Attributes = None From 3b1e40dc6228767d02efe2b6cc35742eacfda756 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Sun, 11 Aug 2024 10:58:40 +0200 Subject: [PATCH 04/48] Add context to measurements --- .../metrics/_internal/observation.py | 12 ++++++++++-- .../sdk/metrics/_internal/instrument.py | 18 ++++++++++-------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py index 7aa24e3342..16b5d66832 100644 --- a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py +++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Union +from opentelemetry.context import Context from opentelemetry.util.types import Attributes @@ -25,13 +26,15 @@ class Observation: Args: value: The float or int measured value attributes: The measurement's attributes + context: The measurement's context """ def __init__( - self, value: Union[int, float], attributes: Attributes = None + self, value: Union[int, float], attributes: Attributes = None, context: Optional[Context] = None ) -> None: self._value = value self._attributes = attributes + self._context = context @property def value(self) -> Union[float, int]: @@ -40,12 +43,17 @@ def value(self) -> Union[float, int]: @property def attributes(self) -> Attributes: return self._attributes + + @property + def context(self) -> Optional[Context]: + return self._context def __eq__(self, other: object) -> bool: return ( isinstance(other, Observation) and self.value == other.value and self.attributes == other.attributes + and self.context == other.context ) def __repr__(self) -> str: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index 8901b2939b..6fd4adbc30 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -20,6 +20,7 @@ # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics +from opentelemetry.context import Context, get_current from opentelemetry.metrics import CallbackT from opentelemetry.metrics import Counter as APICounter from opentelemetry.metrics import Histogram as APIHistogram @@ -134,6 +135,7 @@ def callback(self, callback_options: CallbackOptions) -> Iterable[Measurement]: time_unix_nano=time_ns(), instrument=self, attributes=api_measurement.attributes, + context=api_measurement.context or get_current() ) except Exception: # pylint: disable=broad-exception-caught _logger.exception("Callback failed for instrument %s.", self.name) @@ -145,13 +147,13 @@ def __new__(cls, *args, **kwargs): raise TypeError("Counter must be instantiated via a meter.") return super().__new__(cls) - def add(self, amount: Union[int, float], attributes: Dict[str, str] = None): + def add(self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None): if amount < 0: _logger.warning("Add amount must be non-negative on Counter %s.", self.name) return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes) + Measurement(amount, time_unix_nano, self, attributes, context or get_current()) ) @@ -162,11 +164,11 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None + self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes) + Measurement(amount, time_unix_nano, self, attributes, context or get_current()) ) @@ -190,7 +192,7 @@ def __new__(cls, *args, **kwargs): raise TypeError("Histogram must be instantiated via a meter.") return super().__new__(cls) - def record(self, amount: Union[int, float], attributes: Dict[str, str] = None): + def record(self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None): if amount < 0: _logger.warning( "Record amount must be non-negative on Histogram %s.", @@ -199,7 +201,7 @@ def record(self, amount: Union[int, float], attributes: Dict[str, str] = None): return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes) + Measurement(amount, time_unix_nano, self, attributes, context or get_current()) ) @@ -210,11 +212,11 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def set( - self, amount: Union[int, float], attributes: Dict[str, str] = None + self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes) + Measurement(amount, time_unix_nano, self, attributes, context or get_current()) ) From 05ebc34baed6716c550807e611aab6036e3e7adb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Mon, 12 Aug 2024 16:01:48 +0200 Subject: [PATCH 05/48] First propagation of filter and reservoir factory --- .../src/opentelemetry/sdk/metrics/__init__.py | 3 + .../sdk/metrics/_internal/__init__.py | 3 + .../_internal/_view_instrument_match.py | 6 +- .../sdk/metrics/_internal/aggregation.py | 97 ++++++++++++++----- .../metrics/_internal/exemplar/__init__.py | 4 + .../_internal/exemplar/exemplar_reservoir.py | 26 +++-- .../metrics/_internal/measurement_consumer.py | 2 +- .../_internal/metric_reader_storage.py | 4 +- .../metrics/_internal/sdk_configuration.py | 1 + .../sdk/metrics/_internal/view.py | 29 +++++- 10 files changed, 138 insertions(+), 37 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index a907a28976..5f66331305 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -15,6 +15,7 @@ from opentelemetry.sdk.metrics._internal import Meter, MeterProvider from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.exemplar import ExemplarFilter, ExemplarReservoir from opentelemetry.sdk.metrics._internal.instrument import Counter from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge from opentelemetry.sdk.metrics._internal.instrument import ( @@ -26,6 +27,8 @@ ) __all__ = [ + "ExemplarFilter", + "ExemplarReservoir", "Meter", "MeterProvider", "MetricsTimeoutError", diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index 9dc95c0edb..a0eb87e2fd 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -35,6 +35,7 @@ from opentelemetry.metrics import _Gauge as APIGauge from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.exemplar import ExemplarFilter, TraceBasedExemplarFilter from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, @@ -381,6 +382,7 @@ def __init__( "opentelemetry.sdk.metrics.export.MetricReader" ] = (), resource: Resource = None, + exemplar_filter: Optional[ExemplarFilter] = None, shutdown_on_exit: bool = True, views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), ): @@ -390,6 +392,7 @@ def __init__( if resource is None: resource = Resource.create({}) self._sdk_config = SdkConfiguration( + exemplar_filter = TraceBasedExemplarFilter() if exemplar_filter is None else exemplar_filter, resource=resource, metric_readers=metric_readers, views=views, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py index 7dd7f58f27..b527f0e5df 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py @@ -81,7 +81,7 @@ def conflicts(self, other: "_ViewInstrumentMatch") -> bool: return result # pylint: disable=protected-access - def consume_measurement(self, measurement: Measurement) -> None: + def consume_measurement(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: if self._view._attribute_keys is not None: @@ -107,6 +107,7 @@ def consume_measurement(self, measurement: Measurement) -> None: self._view._aggregation._create_aggregation( self._instrument, attributes, + self._view._exemplar_reservoir_factory, self._start_time_unix_nano, ) ) @@ -116,11 +117,12 @@ def consume_measurement(self, measurement: Measurement) -> None: ]._create_aggregation( self._instrument, attributes, + self._view._exemplar_reservoir_factory, self._start_time_unix_nano, ) self._attributes_aggregation[aggr_key] = aggregation - self._attributes_aggregation[aggr_key].aggregate(measurement) + self._attributes_aggregation[aggr_key].aggregate(measurement, should_sample_exemplar) def collect( self, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 62ac967bbe..07535029aa 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -17,10 +17,11 @@ from abc import ABC, abstractmethod from bisect import bisect_left from enum import IntEnum +from functools import partial from logging import getLogger from math import inf from threading import Lock -from typing import Generic, List, Optional, Sequence, TypeVar +from typing import Callable, Generic, List, Optional, Sequence, Type, TypeVar from opentelemetry.metrics import ( Asynchronous, @@ -34,6 +35,7 @@ UpDownCounter, _Gauge, ) +from opentelemetry.sdk.metrics._internal.exemplar import Exemplar, ExemplarReservoirFactory from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( Buckets, ) @@ -80,14 +82,20 @@ class AggregationTemporality(IntEnum): class _Aggregation(ABC, Generic[_DataPointVarT]): - def __init__(self, attributes: Attributes): + def __init__(self, attributes: Attributes, reservoir_factory: ExemplarReservoirFactory): self._lock = Lock() self._attributes = attributes + self._reservoir = reservoir_factory() self._previous_point = None - @abstractmethod - def aggregate(self, measurement: Measurement) -> None: - pass + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + if should_sample_exemplar: + self._reservoir.offer( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ) @abstractmethod def collect( @@ -97,9 +105,14 @@ def collect( ) -> Optional[_DataPointVarT]: pass + def _collect_exemplars(self) -> Sequence[Exemplar]: + return self._reservoir.collect( + self._attributes + ) # FIXME provide filtered data point attributes + class _DropAggregation(_Aggregation): - def aggregate(self, measurement: Measurement) -> None: + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: pass def collect( @@ -117,13 +130,12 @@ def __init__( instrument_is_monotonic: bool, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, + reservoir_factory: ExemplarReservoirFactory, ): - super().__init__(attributes) + super().__init__(attributes, reservoir_factory) self._start_time_unix_nano = start_time_unix_nano - self._instrument_aggregation_temporality = ( - instrument_aggregation_temporality - ) + self._instrument_aggregation_temporality = instrument_aggregation_temporality self._instrument_is_monotonic = instrument_is_monotonic self._value = None @@ -131,13 +143,15 @@ def __init__( self._previous_collection_start_nano = self._start_time_unix_nano self._previous_value = 0 - def aggregate(self, measurement: Measurement) -> None: + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: with self._lock: if self._value is None: self._value = 0 self._value = self._value + measurement.value + super().aggregate(measurement, should_sample_exemplar) + def collect( self, collection_aggregation_temporality: AggregationTemporality, @@ -266,6 +280,7 @@ def collect( with self._lock: value = self._value self._value = None + exemplars = self._collect_exemplars() if ( self._instrument_aggregation_temporality @@ -290,6 +305,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=value, @@ -302,6 +318,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=self._previous_value, @@ -330,6 +347,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=result_value, @@ -337,6 +355,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=value, @@ -344,13 +363,15 @@ def collect( class _LastValueAggregation(_Aggregation[GaugePoint]): - def __init__(self, attributes: Attributes): - super().__init__(attributes) + def __init__(self, attributes: Attributes, reservoir_factory: ExemplarReservoirFactory): + super().__init__(attributes, reservoir_factory) self._value = None - def aggregate(self, measurement: Measurement): + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True): with self._lock: self._value = measurement.value + + super().aggregate(measurement, should_sample_exemplar) def collect( self, @@ -366,8 +387,11 @@ def collect( value = self._value self._value = None + exemplars = self._collect_exemplars() + return NumberDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=None, time_unix_nano=collection_start_nano, value=value, @@ -380,6 +404,7 @@ def __init__( attributes: Attributes, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, + reservoir_factory: ExemplarReservoirFactory, boundaries: Sequence[float] = ( 0.0, 5.0, @@ -399,7 +424,7 @@ def __init__( ), record_min_max: bool = True, ): - super().__init__(attributes) + super().__init__(attributes, reservoir_factory=partial(reservoir_factory, boundaries=boundaries)) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality @@ -423,7 +448,7 @@ def __init__( def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) - def aggregate(self, measurement: Measurement) -> None: + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: with self._lock: if self._value is None: @@ -439,6 +464,8 @@ def aggregate(self, measurement: Measurement) -> None: self._value[bisect_left(self._boundaries, measurement_value)] += 1 + super().aggregate(measurement, should_sample_exemplar) + def collect( self, collection_aggregation_temporality: AggregationTemporality, @@ -459,6 +486,8 @@ def collect( self._min = inf self._max = -inf + exemplars = self._collect_exemplars() + if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA @@ -482,6 +511,7 @@ def collect( return HistogramDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=sum(value), @@ -511,6 +541,7 @@ def collect( return HistogramDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=sum(self._previous_value), @@ -540,6 +571,7 @@ class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): def __init__( self, attributes: Attributes, + reservoir_factory: ExemplarReservoirFactory, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, # This is the default maximum number of buckets per positive or @@ -583,7 +615,7 @@ def __init__( # _ExplicitBucketHistogramAggregation both size and amount of buckets # remain constant once it is instantiated). - super().__init__(attributes) + super().__init__(attributes, reservoir_factory=partial(reservoir_factory, size=min(20, max_size))) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality @@ -614,7 +646,7 @@ def __init__( self._mapping = self._new_mapping(self._max_scale) - def aggregate(self, measurement: Measurement) -> None: + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals with self._lock: @@ -724,6 +756,8 @@ def aggregate(self, measurement: Measurement) -> None: # in _ExplicitBucketHistogramAggregation.aggregate value.increment_bucket(bucket_index) + super().aggregate(measurement, should_sample_exemplar) + def collect( self, collection_aggregation_temporality: AggregationTemporality, @@ -753,6 +787,8 @@ def collect( self._zero_count = 0 self._scale = None + exemplars = self._collect_exemplars() + if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA @@ -776,6 +812,7 @@ def collect( return ExponentialHistogramDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=count, @@ -939,6 +976,7 @@ def collect( return ExponentialHistogramDataPoint( attributes=self._attributes, + exemplars=exemplars, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=self._previous_count, @@ -1109,6 +1147,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: """Creates an aggregation""" @@ -1137,6 +1176,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: @@ -1144,6 +1184,7 @@ def _create_aggregation( if isinstance(instrument, Counter): return _SumAggregation( attributes, + reservoir_factory=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.DELTA @@ -1153,6 +1194,7 @@ def _create_aggregation( if isinstance(instrument, UpDownCounter): return _SumAggregation( attributes, + reservoir_factory=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.DELTA @@ -1163,6 +1205,7 @@ def _create_aggregation( if isinstance(instrument, ObservableCounter): return _SumAggregation( attributes, + reservoir_factory=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE @@ -1173,6 +1216,7 @@ def _create_aggregation( if isinstance(instrument, ObservableUpDownCounter): return _SumAggregation( attributes, + reservoir_factory=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE @@ -1183,6 +1227,7 @@ def _create_aggregation( if isinstance(instrument, Histogram): return _ExplicitBucketHistogramAggregation( attributes, + reservoir_factory=reservoir_factory(_ExplicitBucketHistogramAggregation), instrument_aggregation_temporality=( AggregationTemporality.DELTA ), @@ -1190,10 +1235,10 @@ def _create_aggregation( ) if isinstance(instrument, ObservableGauge): - return _LastValueAggregation(attributes) + return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) if isinstance(instrument, _Gauge): - return _LastValueAggregation(attributes) + return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) # pylint: disable=broad-exception-raised raise Exception(f"Invalid instrument type {type(instrument)} found") @@ -1212,6 +1257,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: @@ -1225,6 +1271,7 @@ def _create_aggregation( return _ExponentialBucketHistogramAggregation( attributes, + reservoir_factory(_ExponentialBucketHistogramAggregation), instrument_aggregation_temporality, start_time_unix_nano, max_size=self._max_size, @@ -1274,6 +1321,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: @@ -1289,6 +1337,7 @@ def _create_aggregation( attributes, instrument_aggregation_temporality, start_time_unix_nano, + reservoir_factory(_ExplicitBucketHistogramAggregation), self._boundaries, self._record_min_max, ) @@ -1304,6 +1353,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: @@ -1320,6 +1370,7 @@ def _create_aggregation( isinstance(instrument, (Counter, ObservableCounter)), instrument_aggregation_temporality, start_time_unix_nano, + reservoir_factory(_SumAggregation), ) @@ -1335,9 +1386,10 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: - return _LastValueAggregation(attributes) + return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) class DropAggregation(Aggregation): @@ -1347,6 +1399,7 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, + reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], start_time_unix_nano: int, ) -> _Aggregation: - return _DropAggregation(attributes) + return _DropAggregation(attributes, reservoir_factory(_DropAggregation)) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py index a83b5b82b8..c5ed4454e5 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py @@ -14,6 +14,7 @@ from .exemplar import Exemplar from .exemplar_filter import ( + ExemplarFilter, AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, @@ -21,15 +22,18 @@ from .exemplar_reservoir import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, + ExemplarReservoirFactory, SimpleFixedSizeExemplarReservoir, ) __all__ = [ "Exemplar", + "ExemplarFilter", "AlwaysOffExemplarFilter", "AlwaysOnExemplarFilter", "TraceBasedExemplarFilter", "AlignedHistogramBucketExemplarReservoir", "ExemplarReservoir", + "ExemplarReservoirFactory", "SimpleFixedSizeExemplarReservoir", ] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 7df4ed2b56..81db862944 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -14,7 +14,7 @@ from abc import ABC, abstractmethod from random import randrange -from typing import Optional, Sequence, Union +from typing import Any, Callable, Optional, Sequence, TypeAlias, Union from opentelemetry import trace from opentelemetry.context import Context @@ -28,6 +28,10 @@ class ExemplarReservoir(ABC): """ExemplarReservoir provide a method to offer measurements to the reservoir and another to collect accumulated Exemplars. + Note: + The constructor MUST accept ``**kwargs`` that may be set from aggregation + parameters. + Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir """ @@ -118,8 +122,8 @@ def __reset(self) -> None: class FixedSizeExemplarReservoirABC(ExemplarReservoir): """Abstract class for a reservoir with fixed size.""" - def __init__(self, size: int) -> None: - super().__init__() + def __init__(self, size: int, **kwargs) -> None: + super().__init__(**kwargs) self._size: int = size self._reservoir_storage: list[ExemplarBucket] = [ ExemplarBucket() for _ in range(self._size) @@ -165,8 +169,8 @@ class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir """ - def __init__(self, size: int = 1) -> None: - super().__init__(size) + def __init__(self, size: int = 1, **kwargs) -> None: + super().__init__(size, **kwargs) self._measurements_seen: int = 0 def _reset(self) -> None: @@ -209,8 +213,8 @@ class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC): https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir """ - def __init__(self, boundaries: Sequence[float]) -> None: - super().__init__(len(boundaries) + 1) + def __init__(self, boundaries: Sequence[float], **kwargs) -> None: + super().__init__(len(boundaries) + 1, **kwargs) self._boundaries: Sequence[float] = boundaries def offer( @@ -235,3 +239,11 @@ def _find_bucket_index( if value <= boundary: return i return len(self._boundaries) + + +ExemplarReservoirFactory: TypeAlias = Callable[[dict[str, Any]], ExemplarReservoir] +ExemplarReservoirFactory.__doc__ = """ExemplarReservoir factory. + +It may receive the Aggregation parameters it is bounded to; e.g. +the _ExplicitBucketHistogramAggregation will provide the boundaries. +""" \ No newline at end of file diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py index 4310061b82..2d755b6e5f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py @@ -79,7 +79,7 @@ def __init__( def consume_measurement(self, measurement: Measurement) -> None: for reader_storage in self._reader_storages.values(): - reader_storage.consume_measurement(measurement) + reader_storage.consume_measurement(measurement, self._sdk_config.exemplar_filter.should_sample(measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context)) def register_asynchronous_instrument( self, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py index 7fac6c6c10..e8d3bd802f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py @@ -113,11 +113,11 @@ def _get_or_init_view_instrument_match( return view_instrument_matches - def consume_measurement(self, measurement: Measurement) -> None: + def consume_measurement(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: for view_instrument_match in self._get_or_init_view_instrument_match( measurement.instrument ): - view_instrument_match.consume_measurement(measurement) + view_instrument_match.consume_measurement(measurement, should_sample_exemplar) def collect(self) -> Optional[MetricsData]: # Use a list instead of yielding to prevent a slow reader from holding diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py index 9594ab38a7..3d88facb0c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py @@ -24,6 +24,7 @@ @dataclass class SdkConfiguration: + exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter" resource: "opentelemetry.sdk.resources.Resource" metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"] views: Sequence["opentelemetry.sdk.metrics.View"] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 9473acde4d..31fee511b4 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -15,17 +15,35 @@ from fnmatch import fnmatch from logging import getLogger -from typing import Optional, Set, Type +from typing import Callable, Optional, Set, Type from opentelemetry.metrics import Instrument from opentelemetry.sdk.metrics._internal.aggregation import ( + _Aggregation, Aggregation, DefaultAggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir, ) _logger = getLogger(__name__) +def _default_reservoir_factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: + """Default reservoir factory per aggregation.""" + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return AlignedHistogramBucketExemplarReservoir + elif issubclass(aggregationType, _ExponentialBucketHistogramAggregation): + return SimpleFixedSizeExemplarReservoir + return SimpleFixedSizeExemplarReservoir + + class View: """ A `View` configuration parameters can be used for the following @@ -73,6 +91,9 @@ class View: corresponding metrics stream. If `None` an instance of `DefaultAggregation` will be used. + exemplar_reservoir_factory: This is a metric stream customizing attribute: + the exemplar reservoir factory + instrument_unit: This is an instrument matching attribute: the unit the instrument must have to match the view. @@ -92,6 +113,7 @@ def __init__( description: Optional[str] = None, attribute_keys: Optional[Set[str]] = None, aggregation: Optional[Aggregation] = None, + exemplar_reservoir_factory: Optional[Callable[[Type[_Aggregation]], ExemplarReservoirFactory]] = None, instrument_unit: Optional[str] = None, ): if ( @@ -120,8 +142,8 @@ def __init__( "characters in instrument_name" ) - # _name, _description, _aggregation and _attribute_keys will be - # accessed when instantiating a _ViewInstrumentMatch. + # _name, _description, _aggregation, _exemplar_reservoir_factory and + # _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch. self._name = name self._instrument_type = instrument_type self._instrument_name = instrument_name @@ -133,6 +155,7 @@ def __init__( self._description = description self._attribute_keys = attribute_keys self._aggregation = aggregation or self._default_aggregation + self._exemplar_reservoir_factory = exemplar_reservoir_factory or _default_reservoir_factory # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches From 6f74b3c41a61173bac4c31eefd8e65f681f6839c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 13 Aug 2024 17:23:15 +0200 Subject: [PATCH 06/48] Reduce autoformat noise --- .../sdk/metrics/_internal/instrument.py | 31 ++++++++++++++----- .../sdk/metrics/_internal/view.py | 1 - 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index 6fd4adbc30..2867951740 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -108,8 +108,11 @@ def __init__( self._callbacks: List[CallbackT] = [] if callbacks is not None: + for callback in callbacks: + if isinstance(callback, Generator): + # advance generator to it's first yield next(callback) @@ -126,7 +129,9 @@ def inner( else: self._callbacks.append(callback) - def callback(self, callback_options: CallbackOptions) -> Iterable[Measurement]: + def callback( + self, callback_options: CallbackOptions + ) -> Iterable[Measurement]: for callback in self._callbacks: try: for api_measurement in callback(callback_options): @@ -138,7 +143,9 @@ def callback(self, callback_options: CallbackOptions) -> Iterable[Measurement]: context=api_measurement.context or get_current() ) except Exception: # pylint: disable=broad-exception-caught - _logger.exception("Callback failed for instrument %s.", self.name) + _logger.exception( + "Callback failed for instrument %s.", self.name + ) class Counter(_Synchronous, APICounter): @@ -147,9 +154,13 @@ def __new__(cls, *args, **kwargs): raise TypeError("Counter must be instantiated via a meter.") return super().__new__(cls) - def add(self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None): + def add( + self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None + ): if amount < 0: - _logger.warning("Add amount must be non-negative on Counter %s.", self.name) + _logger.warning( + "Add amount must be non-negative on Counter %s.", self.name + ) return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( @@ -175,14 +186,18 @@ def add( class ObservableCounter(_Asynchronous, APIObservableCounter): def __new__(cls, *args, **kwargs): if cls is ObservableCounter: - raise TypeError("ObservableCounter must be instantiated via a meter.") + raise TypeError( + "ObservableCounter must be instantiated via a meter." + ) return super().__new__(cls) class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter): def __new__(cls, *args, **kwargs): if cls is ObservableUpDownCounter: - raise TypeError("ObservableUpDownCounter must be instantiated via a meter.") + raise TypeError( + "ObservableUpDownCounter must be instantiated via a meter." + ) return super().__new__(cls) @@ -192,7 +207,9 @@ def __new__(cls, *args, **kwargs): raise TypeError("Histogram must be instantiated via a meter.") return super().__new__(cls) - def record(self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None): + def record( + self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None + ): if amount < 0: _logger.warning( "Record amount must be non-negative on Histogram %s.", diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 31fee511b4..625796ba85 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -27,7 +27,6 @@ ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, ExemplarReservoirFactory, SimpleFixedSizeExemplarReservoir, ) From 5fc775ba134af7f48a075911d8376d7d0bccdd39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 13 Aug 2024 18:26:51 +0200 Subject: [PATCH 07/48] Fixing existing test - part 1 --- .../_internal/_view_instrument_match.py | 6 +- .../sdk/metrics/_internal/view.py | 4 +- .../tests/metrics/test_aggregation.py | 187 +++++++++++------- .../tests/metrics/test_instrument.py | 171 ++++++++++++---- .../metrics/test_measurement_consumer.py | 8 +- .../metrics/test_metric_reader_storage.py | 96 +++++---- .../metrics/test_view_instrument_match.py | 34 +++- 7 files changed, 351 insertions(+), 155 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py index b527f0e5df..700be3d5c3 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py @@ -52,12 +52,14 @@ def __init__( ) if not isinstance(self._view._aggregation, DefaultAggregation): self._aggregation = self._view._aggregation._create_aggregation( - self._instrument, None, 0 + self._instrument, None, self._view._exemplar_reservoir_factory, 0 ) else: self._aggregation = self._instrument_class_aggregation[ self._instrument.__class__ - ]._create_aggregation(self._instrument, None, 0) + ]._create_aggregation( + self._instrument, None, self._view._exemplar_reservoir_factory, 0 + ) def conflicts(self, other: "_ViewInstrumentMatch") -> bool: # pylint: disable=protected-access diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 625796ba85..9cbf602c6f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -34,7 +34,7 @@ _logger = getLogger(__name__) -def _default_reservoir_factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: +def default_reservoir_factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: """Default reservoir factory per aggregation.""" if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir @@ -154,7 +154,7 @@ def __init__( self._description = description self._attribute_keys = attribute_keys self._aggregation = aggregation or self._default_aggregation - self._exemplar_reservoir_factory = exemplar_reservoir_factory or _default_reservoir_factory + self._exemplar_reservoir_factory = exemplar_reservoir_factory or default_reservoir_factory # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 7ea463ec8a..af687d4905 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -15,11 +15,12 @@ # pylint: disable=protected-access from math import inf -from time import sleep +from time import sleep, time_ns from typing import Union from unittest import TestCase from unittest.mock import Mock +from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _ExplicitBucketHistogramAggregation, _LastValueAggregation, @@ -35,6 +36,7 @@ _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory from opentelemetry.sdk.metrics.export import ( AggregationTemporality, NumberDataPoint, @@ -48,10 +50,10 @@ from opentelemetry.util.types import Attributes -def measurement( - value: Union[int, float], attributes: Attributes = None -) -> Measurement: - return Measurement(value, instrument=Mock(), attributes=attributes) +def measurement(value: Union[int, float], attributes: Attributes = None) -> Measurement: + return Measurement( + value, time_ns(), instrument=Mock(), context=Context(), attributes=attributes + ) class TestSynchronousSumAggregation(TestCase): @@ -61,7 +63,11 @@ def test_aggregate_delta(self): """ synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.DELTA, 0 + Mock(), + True, + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -71,7 +77,11 @@ def test_aggregate_delta(self): self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.DELTA, 0 + Mock(), + True, + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -86,7 +96,11 @@ def test_aggregate_cumulative(self): """ synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.CUMULATIVE, 0 + Mock(), + True, + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -96,7 +110,11 @@ def test_aggregate_cumulative(self): self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.CUMULATIVE, 0 + Mock(), + True, + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -111,7 +129,11 @@ def test_collect_delta(self): """ synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.DELTA, 0 + Mock(), + True, + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -137,15 +159,17 @@ def test_collect_delta(self): ) synchronous_sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.DELTA, 0 + Mock(), + True, + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. - first_sum = synchronous_sum_aggregation.collect( - AggregationTemporality.DELTA, 1 - ) + first_sum = synchronous_sum_aggregation.collect(AggregationTemporality.DELTA, 1) self.assertEqual(first_sum.value, 1) @@ -168,21 +192,21 @@ def test_collect_cumulative(self): """ sum_aggregation = _SumAggregation( - Mock(), True, AggregationTemporality.CUMULATIVE, 0 + Mock(), + True, + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_SumAggregation), ) sum_aggregation.aggregate(measurement(1)) - first_sum = sum_aggregation.collect( - AggregationTemporality.CUMULATIVE, 1 - ) + first_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) self.assertEqual(first_sum.value, 1) # should have been reset after first collect sum_aggregation.aggregate(measurement(1)) - second_sum = sum_aggregation.collect( - AggregationTemporality.CUMULATIVE, 1 - ) + second_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) self.assertEqual(second_sum.value, 1) @@ -191,9 +215,7 @@ def test_collect_cumulative(self): ) # if no point seen for a whole interval, should return None - third_sum = sum_aggregation.collect( - AggregationTemporality.CUMULATIVE, 1 - ) + third_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) self.assertIsNone(third_sum) @@ -204,7 +226,9 @@ def test_aggregate(self): temporality """ - last_value_aggregation = _LastValueAggregation(Mock()) + last_value_aggregation = _LastValueAggregation( + Mock(), default_reservoir_factory(_LastValueAggregation) + ) last_value_aggregation.aggregate(measurement(1)) self.assertEqual(last_value_aggregation._value, 1) @@ -220,12 +244,12 @@ def test_collect(self): `LastValueAggregation` collects number data points """ - last_value_aggregation = _LastValueAggregation(Mock()) + last_value_aggregation = _LastValueAggregation( + Mock(), default_reservoir_factory(_LastValueAggregation) + ) self.assertIsNone( - last_value_aggregation.collect( - AggregationTemporality.CUMULATIVE, 1 - ) + last_value_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) ) last_value_aggregation.aggregate(measurement(1)) @@ -274,13 +298,12 @@ def test_aggregate(self): Test `ExplicitBucketHistogramAggregation with custom boundaries """ - explicit_bucket_histogram_aggregation = ( - _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.DELTA, - 0, - boundaries=[0, 2, 4], - ) + explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + boundaries=[0, 2, 4], ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -314,10 +337,11 @@ def test_min_max(self): maximum value in the population """ - explicit_bucket_histogram_aggregation = ( - _ExplicitBucketHistogramAggregation( - Mock(), AggregationTemporality.CUMULATIVE, 0 - ) + explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -329,13 +353,12 @@ def test_min_max(self): self.assertEqual(explicit_bucket_histogram_aggregation._min, -1) self.assertEqual(explicit_bucket_histogram_aggregation._max, 9999) - explicit_bucket_histogram_aggregation = ( - _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.CUMULATIVE, - 0, - record_min_max=False, - ) + explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + record_min_max=False, ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -352,13 +375,12 @@ def test_collect(self): `_ExplicitBucketHistogramAggregation` collects sum metric points """ - explicit_bucket_histogram_aggregation = ( - _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.DELTA, - 0, - boundaries=[0, 1, 2], - ) + explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + boundaries=[0, 1, 2], ) explicit_bucket_histogram_aggregation.aggregate(measurement(1)) @@ -392,7 +414,10 @@ def test_collect(self): def test_boundaries(self): self.assertEqual( _ExplicitBucketHistogramAggregation( - Mock(), AggregationTemporality.CUMULATIVE, 0 + Mock(), + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), )._boundaries, ( 0.0, @@ -418,19 +443,25 @@ class TestAggregationFactory(TestCase): def test_sum_factory(self): counter = _Counter("name", Mock(), Mock()) factory = SumAggregation() - aggregation = factory._create_aggregation(counter, Mock(), 0) + aggregation = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.DELTA, ) - aggregation2 = factory._create_aggregation(counter, Mock(), 0) + aggregation2 = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertNotEqual(aggregation, aggregation2) counter = _UpDownCounter("name", Mock(), Mock()) factory = SumAggregation() - aggregation = factory._create_aggregation(counter, Mock(), 0) + aggregation = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) self.assertEqual( @@ -440,7 +471,9 @@ def test_sum_factory(self): counter = _ObservableCounter("name", Mock(), Mock(), None) factory = SumAggregation() - aggregation = factory._create_aggregation(counter, Mock(), 0) + aggregation = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( @@ -457,19 +490,27 @@ def test_explicit_bucket_histogram_factory(self): ), record_min_max=False, ) - aggregation = factory._create_aggregation(histo, Mock(), 0) + aggregation = factory._create_aggregation( + histo, Mock(), default_reservoir_factory, 0 + ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) self.assertFalse(aggregation._record_min_max) self.assertEqual(aggregation._boundaries, (0.0, 5.0)) - aggregation2 = factory._create_aggregation(histo, Mock(), 0) + aggregation2 = factory._create_aggregation( + histo, Mock(), default_reservoir_factory, 0 + ) self.assertNotEqual(aggregation, aggregation2) def test_last_value_factory(self): counter = _Counter("name", Mock(), Mock()) factory = LastValueAggregation() - aggregation = factory._create_aggregation(counter, Mock(), 0) + aggregation = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertIsInstance(aggregation, _LastValueAggregation) - aggregation2 = factory._create_aggregation(counter, Mock(), 0) + aggregation2 = factory._create_aggregation( + counter, Mock(), default_reservoir_factory, 0 + ) self.assertNotEqual(aggregation, aggregation2) @@ -479,9 +520,8 @@ def setUpClass(cls): cls.default_aggregation = DefaultAggregation() def test_counter(self): - aggregation = self.default_aggregation._create_aggregation( - _Counter("name", Mock(), Mock()), Mock(), 0 + _Counter("name", Mock(), Mock()), Mock(), default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) @@ -491,9 +531,8 @@ def test_counter(self): ) def test_up_down_counter(self): - aggregation = self.default_aggregation._create_aggregation( - _UpDownCounter("name", Mock(), Mock()), Mock(), 0 + _UpDownCounter("name", Mock(), Mock()), Mock(), default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) @@ -503,10 +542,10 @@ def test_up_down_counter(self): ) def test_observable_counter(self): - aggregation = self.default_aggregation._create_aggregation( _ObservableCounter("name", Mock(), Mock(), callbacks=[Mock()]), Mock(), + default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -517,12 +556,10 @@ def test_observable_counter(self): ) def test_observable_up_down_counter(self): - aggregation = self.default_aggregation._create_aggregation( - _ObservableUpDownCounter( - "name", Mock(), Mock(), callbacks=[Mock()] - ), + _ObservableUpDownCounter("name", Mock(), Mock(), callbacks=[Mock()]), Mock(), + default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -533,7 +570,6 @@ def test_observable_up_down_counter(self): ) def test_histogram(self): - aggregation = self.default_aggregation._create_aggregation( _Histogram( "name", @@ -541,12 +577,12 @@ def test_histogram(self): Mock(), ), Mock(), + default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) def test_gauge(self): - aggregation = self.default_aggregation._create_aggregation( _Gauge( "name", @@ -554,12 +590,12 @@ def test_gauge(self): Mock(), ), Mock(), + default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) def test_observable_gauge(self): - aggregation = self.default_aggregation._create_aggregation( _ObservableGauge( "name", @@ -568,6 +604,7 @@ def test_observable_gauge(self): callbacks=[Mock()], ), Mock(), + default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) diff --git a/opentelemetry-sdk/tests/metrics/test_instrument.py b/opentelemetry-sdk/tests/metrics/test_instrument.py index d4a2ddf509..ba260e1071 100644 --- a/opentelemetry-sdk/tests/metrics/test_instrument.py +++ b/opentelemetry-sdk/tests/metrics/test_instrument.py @@ -15,9 +15,11 @@ # pylint: disable=no-self-use from logging import WARNING +# from time import time_ns from unittest import TestCase -from unittest.mock import Mock +from unittest.mock import Mock, patch +from opentelemetry.context import Context from opentelemetry.metrics import Observation from opentelemetry.metrics._internal.instrument import CallbackOptions from opentelemetry.sdk.metrics import ( @@ -39,6 +41,7 @@ _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory class TestCounter(TestCase): @@ -85,21 +88,23 @@ def test_disallow_direct_up_down_counter_creation(self): TEST_ATTRIBUTES = {"foo": "bar"} +TEST_CONTEXT = Context() +TEST_TIMESTAMP = 1_000_000_000 def callable_callback_0(options: CallbackOptions): return [ - Observation(1, attributes=TEST_ATTRIBUTES), - Observation(2, attributes=TEST_ATTRIBUTES), - Observation(3, attributes=TEST_ATTRIBUTES), + Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] def callable_callback_1(options: CallbackOptions): return [ - Observation(4, attributes=TEST_ATTRIBUTES), - Observation(5, attributes=TEST_ATTRIBUTES), - Observation(6, attributes=TEST_ATTRIBUTES), + Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] @@ -107,9 +112,9 @@ def generator_callback_0(): options = yield assert isinstance(options, CallbackOptions) options = yield [ - Observation(1, attributes=TEST_ATTRIBUTES), - Observation(2, attributes=TEST_ATTRIBUTES), - Observation(3, attributes=TEST_ATTRIBUTES), + Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] assert isinstance(options, CallbackOptions) @@ -118,13 +123,14 @@ def generator_callback_1(): options = yield assert isinstance(options, CallbackOptions) options = yield [ - Observation(4, attributes=TEST_ATTRIBUTES), - Observation(5, attributes=TEST_ATTRIBUTES), - Observation(6, attributes=TEST_ATTRIBUTES), + Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), + Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] assert isinstance(options, CallbackOptions) +@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) class TestObservableGauge(TestCase): def testname(self): self.assertEqual(_ObservableGauge("name", Mock(), Mock()).name, "name") @@ -135,19 +141,30 @@ def test_callable_callback_0(self): "name", Mock(), Mock(), [callable_callback_0] ) - self.assertEqual( - list(observable_gauge.callback(CallbackOptions())), + assert list(observable_gauge.callback(CallbackOptions())) == ( [ Measurement( - 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 1, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 2, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 3, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), - ], + ] ) def test_callable_multiple_callable_callback(self): @@ -159,22 +176,46 @@ def test_callable_multiple_callable_callback(self): list(observable_gauge.callback(CallbackOptions())), [ Measurement( - 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 1, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 2, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 3, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 4, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 4, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 5, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 5, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 6, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 6, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), ], ) @@ -188,13 +229,25 @@ def test_generator_callback_0(self): list(observable_gauge.callback(CallbackOptions())), [ Measurement( - 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 1, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 2, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 3, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), ], ) @@ -211,22 +264,46 @@ def test_generator_multiple_generator_callback(self): list(observable_gauge.callback(CallbackOptions())), [ Measurement( - 1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 1, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 2, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 3, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 4, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 4, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 5, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 5, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), Measurement( - 6, instrument=observable_gauge, attributes=TEST_ATTRIBUTES + 6, + TEST_TIMESTAMP, + instrument=observable_gauge, + context=TEST_CONTEXT, + attributes=TEST_ATTRIBUTES, ), ], ) @@ -237,6 +314,7 @@ def test_disallow_direct_observable_gauge_creation(self): ObservableGauge("name", Mock(), Mock()) +@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) class TestObservableCounter(TestCase): def test_callable_callback_0(self): observable_counter = _ObservableCounter( @@ -248,17 +326,23 @@ def test_callable_callback_0(self): [ Measurement( 1, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], @@ -274,17 +358,23 @@ def test_generator_callback_0(self): [ Measurement( 1, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, + TEST_TIMESTAMP, instrument=observable_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], @@ -313,6 +403,7 @@ def test_disallow_direct_counter_creation(self): _SDKGauge("name", Mock(), Mock()) +@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) class TestObservableUpDownCounter(TestCase): def test_callable_callback_0(self): observable_up_down_counter = _ObservableUpDownCounter( @@ -324,17 +415,23 @@ def test_callable_callback_0(self): [ Measurement( 1, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], @@ -350,17 +447,23 @@ def test_generator_callback_0(self): [ Measurement( 1, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, + TEST_TIMESTAMP, instrument=observable_up_down_counter, + context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], diff --git a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py index 91a49955b7..bedffaaeff 100644 --- a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py +++ b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py @@ -43,6 +43,7 @@ def test_creates_metric_reader_storages(self, MockMetricReaderStorage): reader_mocks = [Mock() for _ in range(5)] SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=reader_mocks, views=Mock(), @@ -59,6 +60,7 @@ def test_measurements_passed_to_each_reader_storage( consumer = SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(should_sample=Mock(return_value=False)), resource=Mock(), metric_readers=reader_mocks, views=Mock(), @@ -69,7 +71,7 @@ def test_measurements_passed_to_each_reader_storage( for rs_mock in reader_storage_mocks: rs_mock.consume_measurement.assert_called_once_with( - measurement_mock + measurement_mock, False ) def test_collect_passed_to_reader_stage(self, MockMetricReaderStorage): @@ -80,6 +82,7 @@ def test_collect_passed_to_reader_stage(self, MockMetricReaderStorage): consumer = SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=reader_mocks, views=Mock(), @@ -98,6 +101,7 @@ def test_collect_calls_async_instruments(self, MockMetricReaderStorage): MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=[reader_mock], views=Mock(), @@ -125,6 +129,7 @@ def test_collect_timeout(self, MockMetricReaderStorage): MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=[reader_mock], views=Mock(), @@ -157,6 +162,7 @@ def test_collect_deadline( MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=[reader_mock], views=Mock(), diff --git a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py index 2aac987465..ecf46c4c44 100644 --- a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py +++ b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py @@ -15,8 +15,10 @@ # pylint: disable=protected-access,invalid-name from logging import WARNING +from time import time_ns from unittest.mock import MagicMock, Mock, patch +from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _LastValueAggregation, ) @@ -75,6 +77,7 @@ def test_creates_view_instrument_matches( view2 = mock_view_matching("view_2", instrument1, instrument2) storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1, view2), @@ -89,21 +92,21 @@ def test_creates_view_instrument_matches( # instrument1 matches view1 and view2, so should create two # ViewInstrumentMatch objects - storage.consume_measurement(Measurement(1, instrument1)) + storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 2, MockViewInstrumentMatch.mock_calls, ) # they should only be created the first time the instrument is seen - storage.consume_measurement(Measurement(1, instrument1)) + storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2) # instrument2 matches view2, so should create a single # ViewInstrumentMatch MockViewInstrumentMatch.call_args_list.clear() with self.assertLogs(level=WARNING): - storage.consume_measurement(Measurement(1, instrument2)) + storage.consume_measurement(Measurement(1, time_ns(), instrument2, Context())) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) @patch( @@ -113,9 +116,9 @@ def test_creates_view_instrument_matches( def test_forwards_calls_to_view_instrument_match( self, MockViewInstrumentMatch: Mock ): - view_instrument_match1 = Mock(_aggregation=_LastValueAggregation({})) - view_instrument_match2 = Mock(_aggregation=_LastValueAggregation({})) - view_instrument_match3 = Mock(_aggregation=_LastValueAggregation({})) + view_instrument_match1 = Mock(_aggregation=_LastValueAggregation({}, Mock())) + view_instrument_match2 = Mock(_aggregation=_LastValueAggregation({}, Mock())) + view_instrument_match3 = Mock(_aggregation=_LastValueAggregation({}, Mock())) MockViewInstrumentMatch.side_effect = [ view_instrument_match1, view_instrument_match2, @@ -129,6 +132,7 @@ def test_forwards_calls_to_view_instrument_match( storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1, view2), @@ -143,21 +147,21 @@ def test_forwards_calls_to_view_instrument_match( # Measurements from an instrument should be passed on to each # ViewInstrumentMatch objects created for that instrument - measurement = Measurement(1, instrument1) + measurement = Measurement(1, time_ns(), instrument1, Context()) storage.consume_measurement(measurement) view_instrument_match1.consume_measurement.assert_called_once_with( - measurement + measurement, True ) view_instrument_match2.consume_measurement.assert_called_once_with( - measurement + measurement, True ) view_instrument_match3.consume_measurement.assert_not_called() - measurement = Measurement(1, instrument2) + measurement = Measurement(1, time_ns(), instrument2, Context()) with self.assertLogs(level=WARNING): storage.consume_measurement(measurement) view_instrument_match3.consume_measurement.assert_called_once_with( - measurement + measurement, True ) # collect() should call collect on all of its _ViewInstrumentMatch @@ -238,6 +242,7 @@ def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock): view1 = mock_view_matching(instrument1) storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1,), @@ -251,7 +256,7 @@ def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock): ) def send_measurement(): - storage.consume_measurement(Measurement(1, instrument1)) + storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) # race sending many measurements concurrently self.run_with_many_threads(send_measurement) @@ -270,6 +275,7 @@ def test_default_view_enabled(self, MockViewInstrumentMatch: Mock): storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(), @@ -282,17 +288,17 @@ def test_default_view_enabled(self, MockViewInstrumentMatch: Mock): MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - storage.consume_measurement(Measurement(1, instrument1)) + storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 1, MockViewInstrumentMatch.mock_calls, ) - storage.consume_measurement(Measurement(1, instrument1)) + storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) MockViewInstrumentMatch.call_args_list.clear() - storage.consume_measurement(Measurement(1, instrument2)) + storage.consume_measurement(Measurement(1, time_ns(), instrument2, Context())) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) def test_drop_aggregation(self): @@ -300,6 +306,7 @@ def test_drop_aggregation(self): counter = _Counter("name", Mock(), Mock()) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -315,7 +322,7 @@ def test_drop_aggregation(self): ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - metric_reader_storage.consume_measurement(Measurement(1, counter)) + metric_reader_storage.consume_measurement(Measurement(1, time_ns(), counter, Context())) self.assertIsNone(metric_reader_storage.collect()) @@ -326,6 +333,7 @@ def test_same_collection_start(self): metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(View(instrument_name="name"),), @@ -338,9 +346,9 @@ def test_same_collection_start(self): MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - metric_reader_storage.consume_measurement(Measurement(1, counter)) + metric_reader_storage.consume_measurement(Measurement(1, time_ns(), counter, Context())) metric_reader_storage.consume_measurement( - Measurement(1, up_down_counter) + Measurement(1, time_ns(), up_down_counter, Context()) ) actual = metric_reader_storage.collect() @@ -371,6 +379,7 @@ def test_conflicting_view_configuration(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -390,7 +399,7 @@ def test_conflicting_view_configuration(self): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter) + Measurement(1, time_ns(), observable_counter, Context()) ) self.assertIs( @@ -419,6 +428,7 @@ def test_view_instrument_match_conflict_0(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -437,12 +447,12 @@ def test_view_instrument_match_conflict_0(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_0) + Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_1) + Measurement(1, time_ns(), observable_counter_1, Context()) ) self.assertIn( @@ -476,6 +486,7 @@ def test_view_instrument_match_conflict_1(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -494,12 +505,12 @@ def test_view_instrument_match_conflict_1(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_foo) + Measurement(1, time_ns(), observable_counter_foo, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_bar) + Measurement(1, time_ns(), observable_counter_bar, Context()) ) self.assertIn( @@ -509,7 +520,7 @@ def test_view_instrument_match_conflict_1(self): with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_baz) + Measurement(1, time_ns(), observable_counter_baz, Context()) ) self.assertIn( @@ -544,6 +555,7 @@ def test_view_instrument_match_conflict_2(self): metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -562,13 +574,13 @@ def test_view_instrument_match_conflict_2(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_foo) + Measurement(1, time_ns(), observable_counter_foo, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_bar) + Measurement(1, time_ns(), observable_counter_bar, Context()) ) def test_view_instrument_match_conflict_3(self): @@ -592,6 +604,7 @@ def test_view_instrument_match_conflict_3(self): metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -610,13 +623,13 @@ def test_view_instrument_match_conflict_3(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, counter_bar) + Measurement(1, time_ns(), counter_bar, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_baz) + Measurement(1, time_ns(), observable_counter_baz, Context()) ) def test_view_instrument_match_conflict_4(self): @@ -640,6 +653,7 @@ def test_view_instrument_match_conflict_4(self): metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -658,13 +672,13 @@ def test_view_instrument_match_conflict_4(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, counter_bar) + Measurement(1, time_ns(), counter_bar, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, up_down_counter_baz) + Measurement(1, time_ns(), up_down_counter_baz, Context()) ) def test_view_instrument_match_conflict_5(self): @@ -686,6 +700,7 @@ def test_view_instrument_match_conflict_5(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -704,13 +719,13 @@ def test_view_instrument_match_conflict_5(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_0) + Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_1) + Measurement(1, time_ns(), observable_counter_1, Context()) ) def test_view_instrument_match_conflict_6(self): @@ -740,6 +755,7 @@ def test_view_instrument_match_conflict_6(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -759,19 +775,19 @@ def test_view_instrument_match_conflict_6(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter) + Measurement(1, time_ns(), observable_counter, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, histogram) + Measurement(1, time_ns(), histogram, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, gauge) + Measurement(1, time_ns(), gauge, Context()) ) def test_view_instrument_match_conflict_7(self): @@ -794,6 +810,7 @@ def test_view_instrument_match_conflict_7(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -812,12 +829,12 @@ def test_view_instrument_match_conflict_7(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_0) + Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( - Measurement(1, observable_counter_1) + Measurement(1, time_ns(), observable_counter_1, Context()) ) self.assertIn( @@ -848,6 +865,7 @@ def test_view_instrument_match_conflict_8(self): ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( + exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( @@ -870,12 +888,12 @@ def test_view_instrument_match_conflict_8(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, up_down_counter) + Measurement(1, time_ns(), up_down_counter, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( - Measurement(1, histogram) + Measurement(1, time_ns(), histogram, Context()) ) self.assertIn( diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index f4d2d02351..41526c6d3a 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -14,9 +14,11 @@ # pylint: disable=protected-access +from time import time_ns from unittest import TestCase from unittest.mock import MagicMock, Mock +from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal._view_instrument_match import ( _ViewInstrumentMatch, ) @@ -49,6 +51,7 @@ def setUpClass(cls): cls.mock_resource = Mock() cls.mock_instrumentation_scope = Mock() cls.sdk_configuration = SdkConfiguration( + exemplar_filter=Mock(), resource=cls.mock_resource, metric_readers=[], views=[], @@ -73,7 +76,9 @@ def test_consume_measurement(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=instrument1, + context=Context(), attributes={"c": "d", "f": "g"}, ) ) @@ -85,7 +90,9 @@ def test_consume_measurement(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=instrument1, + context=Context(), attributes={"w": "x", "y": "z"}, ) ) @@ -114,7 +121,9 @@ def test_consume_measurement(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=instrument1, + context=Context(), attributes={"c": "d", "f": "g"}, ) ) @@ -142,7 +151,13 @@ def test_consume_measurement(self): ), ) view_instrument_match.consume_measurement( - Measurement(value=0, instrument=instrument1, attributes=None) + Measurement( + value=0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes=None + ) ) self.assertEqual( view_instrument_match._attributes_aggregation, @@ -166,7 +181,10 @@ def test_consume_measurement(self): ), ) view_instrument_match.consume_measurement( - Measurement(value=0, instrument=instrument1, attributes=None) + Measurement(value=0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), attributes=None) ) self.assertIsInstance( view_instrument_match._attributes_aggregation[frozenset({})], @@ -198,7 +216,9 @@ def test_collect(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"c": "d", "f": "g"}, ) ) @@ -254,28 +274,36 @@ def test_data_point_check(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"c": "d", "f": "g"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"h": "i", "j": "k"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"l": "m", "n": "o"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"p": "q", "r": "s"}, ) ) @@ -309,7 +337,9 @@ def test_setting_aggregation(self): view_instrument_match.consume_measurement( Measurement( value=0, + time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), + context=Context(), attributes={"c": "d", "f": "g"}, ) ) From 80f040d269580459214a4338125b12770b6ed882 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 13 Aug 2024 18:30:13 +0200 Subject: [PATCH 08/48] Lint the code --- .../metrics/_internal/observation.py | 7 +- .../src/opentelemetry/sdk/metrics/__init__.py | 5 +- .../sdk/metrics/_internal/__init__.py | 11 +- .../_internal/_view_instrument_match.py | 18 ++- .../sdk/metrics/_internal/aggregation.py | 112 ++++++++++++++---- .../metrics/_internal/exemplar/__init__.py | 2 +- .../metrics/_internal/exemplar/exemplar.py | 2 +- .../_internal/exemplar/exemplar_filter.py | 14 ++- .../_internal/exemplar/exemplar_reservoir.py | 24 ++-- .../sdk/metrics/_internal/instrument.py | 60 ++++++++-- .../metrics/_internal/measurement_consumer.py | 10 +- .../_internal/metric_reader_storage.py | 8 +- .../sdk/metrics/_internal/view.py | 14 ++- .../tests/metrics/test_aggregation.py | 98 +++++++++------ .../tests/metrics/test_instrument.py | 17 ++- .../metrics/test_metric_reader_storage.py | 64 +++++++--- .../metrics/test_view_instrument_match.py | 11 +- 17 files changed, 353 insertions(+), 124 deletions(-) diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py index 16b5d66832..fdfef68fb8 100644 --- a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py +++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py @@ -30,7 +30,10 @@ class Observation: """ def __init__( - self, value: Union[int, float], attributes: Attributes = None, context: Optional[Context] = None + self, + value: Union[int, float], + attributes: Attributes = None, + context: Optional[Context] = None, ) -> None: self._value = value self._attributes = attributes @@ -43,7 +46,7 @@ def value(self) -> Union[float, int]: @property def attributes(self) -> Attributes: return self._attributes - + @property def context(self) -> Optional[Context]: return self._context diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index 5f66331305..204b015572 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -15,7 +15,10 @@ from opentelemetry.sdk.metrics._internal import Meter, MeterProvider from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError -from opentelemetry.sdk.metrics._internal.exemplar import ExemplarFilter, ExemplarReservoir +from opentelemetry.sdk.metrics._internal.exemplar import ( + ExemplarFilter, + ExemplarReservoir, +) from opentelemetry.sdk.metrics._internal.instrument import Counter from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge from opentelemetry.sdk.metrics._internal.instrument import ( diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index a0eb87e2fd..1e96f0b4b6 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -35,7 +35,10 @@ from opentelemetry.metrics import _Gauge as APIGauge from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError -from opentelemetry.sdk.metrics._internal.exemplar import ExemplarFilter, TraceBasedExemplarFilter +from opentelemetry.sdk.metrics._internal.exemplar import ( + ExemplarFilter, + TraceBasedExemplarFilter, +) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, @@ -392,7 +395,11 @@ def __init__( if resource is None: resource = Resource.create({}) self._sdk_config = SdkConfiguration( - exemplar_filter = TraceBasedExemplarFilter() if exemplar_filter is None else exemplar_filter, + exemplar_filter=( + TraceBasedExemplarFilter() + if exemplar_filter is None + else exemplar_filter + ), resource=resource, metric_readers=metric_readers, views=views, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py index 700be3d5c3..4f8e0b1251 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py @@ -52,13 +52,19 @@ def __init__( ) if not isinstance(self._view._aggregation, DefaultAggregation): self._aggregation = self._view._aggregation._create_aggregation( - self._instrument, None, self._view._exemplar_reservoir_factory, 0 + self._instrument, + None, + self._view._exemplar_reservoir_factory, + 0, ) else: self._aggregation = self._instrument_class_aggregation[ self._instrument.__class__ ]._create_aggregation( - self._instrument, None, self._view._exemplar_reservoir_factory, 0 + self._instrument, + None, + self._view._exemplar_reservoir_factory, + 0, ) def conflicts(self, other: "_ViewInstrumentMatch") -> bool: @@ -83,7 +89,9 @@ def conflicts(self, other: "_ViewInstrumentMatch") -> bool: return result # pylint: disable=protected-access - def consume_measurement(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def consume_measurement( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: if self._view._attribute_keys is not None: @@ -124,7 +132,9 @@ def consume_measurement(self, measurement: Measurement, should_sample_exemplar: ) self._attributes_aggregation[aggr_key] = aggregation - self._attributes_aggregation[aggr_key].aggregate(measurement, should_sample_exemplar) + self._attributes_aggregation[aggr_key].aggregate( + measurement, should_sample_exemplar + ) def collect( self, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 07535029aa..c6f17be776 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -35,7 +35,10 @@ UpDownCounter, _Gauge, ) -from opentelemetry.sdk.metrics._internal.exemplar import Exemplar, ExemplarReservoirFactory +from opentelemetry.sdk.metrics._internal.exemplar import ( + Exemplar, + ExemplarReservoirFactory, +) from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( Buckets, ) @@ -82,13 +85,19 @@ class AggregationTemporality(IntEnum): class _Aggregation(ABC, Generic[_DataPointVarT]): - def __init__(self, attributes: Attributes, reservoir_factory: ExemplarReservoirFactory): + def __init__( + self, + attributes: Attributes, + reservoir_factory: ExemplarReservoirFactory, + ): self._lock = Lock() self._attributes = attributes self._reservoir = reservoir_factory() self._previous_point = None - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: if should_sample_exemplar: self._reservoir.offer( measurement.value, @@ -112,7 +121,9 @@ def _collect_exemplars(self) -> Sequence[Exemplar]: class _DropAggregation(_Aggregation): - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: pass def collect( @@ -135,7 +146,9 @@ def __init__( super().__init__(attributes, reservoir_factory) self._start_time_unix_nano = start_time_unix_nano - self._instrument_aggregation_temporality = instrument_aggregation_temporality + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) self._instrument_is_monotonic = instrument_is_monotonic self._value = None @@ -143,7 +156,9 @@ def __init__( self._previous_collection_start_nano = self._start_time_unix_nano self._previous_value = 0 - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: with self._lock: if self._value is None: self._value = 0 @@ -363,14 +378,20 @@ def collect( class _LastValueAggregation(_Aggregation[GaugePoint]): - def __init__(self, attributes: Attributes, reservoir_factory: ExemplarReservoirFactory): + def __init__( + self, + attributes: Attributes, + reservoir_factory: ExemplarReservoirFactory, + ): super().__init__(attributes, reservoir_factory) self._value = None - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True): + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ): with self._lock: self._value = measurement.value - + super().aggregate(measurement, should_sample_exemplar) def collect( @@ -424,7 +445,12 @@ def __init__( ), record_min_max: bool = True, ): - super().__init__(attributes, reservoir_factory=partial(reservoir_factory, boundaries=boundaries)) + super().__init__( + attributes, + reservoir_factory=partial( + reservoir_factory, boundaries=boundaries + ), + ) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality @@ -448,7 +474,9 @@ def __init__( def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: with self._lock: if self._value is None: @@ -615,7 +643,12 @@ def __init__( # _ExplicitBucketHistogramAggregation both size and amount of buckets # remain constant once it is instantiated). - super().__init__(attributes, reservoir_factory=partial(reservoir_factory, size=min(20, max_size))) + super().__init__( + attributes, + reservoir_factory=partial( + reservoir_factory, size=min(20, max_size) + ), + ) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality @@ -646,7 +679,9 @@ def __init__( self._mapping = self._new_mapping(self._max_scale) - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals with self._lock: @@ -1147,7 +1182,9 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: """Creates an aggregation""" @@ -1176,7 +1213,9 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1227,7 +1266,9 @@ def _create_aggregation( if isinstance(instrument, Histogram): return _ExplicitBucketHistogramAggregation( attributes, - reservoir_factory=reservoir_factory(_ExplicitBucketHistogramAggregation), + reservoir_factory=reservoir_factory( + _ExplicitBucketHistogramAggregation + ), instrument_aggregation_temporality=( AggregationTemporality.DELTA ), @@ -1235,10 +1276,16 @@ def _create_aggregation( ) if isinstance(instrument, ObservableGauge): - return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) + return _LastValueAggregation( + attributes, + reservoir_factory=reservoir_factory(_LastValueAggregation), + ) if isinstance(instrument, _Gauge): - return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) + return _LastValueAggregation( + attributes, + reservoir_factory=reservoir_factory(_LastValueAggregation), + ) # pylint: disable=broad-exception-raised raise Exception(f"Invalid instrument type {type(instrument)} found") @@ -1257,7 +1304,9 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1321,7 +1370,9 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1353,7 +1404,9 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1386,10 +1439,15 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: - return _LastValueAggregation(attributes, reservoir_factory=reservoir_factory(_LastValueAggregation)) + return _LastValueAggregation( + attributes, + reservoir_factory=reservoir_factory(_LastValueAggregation), + ) class DropAggregation(Aggregation): @@ -1399,7 +1457,11 @@ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, - reservoir_factory: Callable[[Type[_Aggregation]], ExemplarReservoirFactory], + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirFactory + ], start_time_unix_nano: int, ) -> _Aggregation: - return _DropAggregation(attributes, reservoir_factory(_DropAggregation)) + return _DropAggregation( + attributes, reservoir_factory(_DropAggregation) + ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py index c5ed4454e5..f3032c5d1e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py @@ -14,9 +14,9 @@ from .exemplar import Exemplar from .exemplar_filter import ( - ExemplarFilter, AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, + ExemplarFilter, TraceBasedExemplarFilter, ) from .exemplar_reservoir import ( diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py index a047a01fb8..e460f38a48 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -37,9 +37,9 @@ class Exemplar: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar """ + filtered_attributes: Attributes value: Union[int, float] time_unix_nano: int span_id: Optional[str] = None trace_id: Optional[str] = None - diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py index cc2b25d9ce..bbb26d3ed0 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py @@ -48,12 +48,14 @@ def should_sample( attributes: The complete set of measurement attributes ctx: The Context of the measurement """ - raise NotImplementedError("ExemplarFilter.should_sample is not implemented") + raise NotImplementedError( + "ExemplarFilter.should_sample is not implemented" + ) class AlwaysOnExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes all measurements eligible for being an Exemplar. - + Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson """ @@ -78,9 +80,9 @@ def should_sample( class AlwaysOffExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes no measurements eligible for being an Exemplar. - + Using this ExemplarFilter is as good as disabling Exemplar feature. - + Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff """ @@ -106,7 +108,7 @@ def should_sample( class TraceBasedExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes those measurements eligible for being an Exemplar, which are recorded in the context of a sampled parent span. - + Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased """ @@ -125,7 +127,7 @@ def should_sample( timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes ctx: The Context of the measurement - """ + """ span = trace.get_current_span(ctx) if span == INVALID_SPAN: return False diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 81db862944..74449fc48e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -28,7 +28,7 @@ class ExemplarReservoir(ABC): """ExemplarReservoir provide a method to offer measurements to the reservoir and another to collect accumulated Exemplars. - Note: + Note: The constructor MUST accept ``**kwargs`` that may be set from aggregation parameters. @@ -60,7 +60,9 @@ def collect(self, point_attributes: Attributes) -> list[Exemplar]: exemplars contain the attributes that were filtered out by the aggregator, but recorded alongside the original measurement. """ - raise NotImplementedError("ExemplarReservoir.collect is not implemented") + raise NotImplementedError( + "ExemplarReservoir.collect is not implemented" + ) class ExemplarBucket: @@ -97,7 +99,9 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: return None current_attributes = { - k: v for k, v in self.__attributes.items() if k not in point_attributes + k: v + for k, v in self.__attributes.items() + if k not in point_attributes } exemplar = Exemplar( @@ -187,7 +191,9 @@ def offer( """Offers a measurement to be sampled.""" index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) if index != -1: - self._reservoir_storage[index].offer(value, time_unix_nano, attributes, ctx) + self._reservoir_storage[index].offer( + value, time_unix_nano, attributes, ctx + ) def _find_bucket_index( self, @@ -226,7 +232,9 @@ def offer( ) -> None: """Offers a measurement to be sampled.""" index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) - self._reservoir_storage[index].offer(value, time_unix_nano, attributes, ctx) + self._reservoir_storage[index].offer( + value, time_unix_nano, attributes, ctx + ) def _find_bucket_index( self, @@ -241,9 +249,11 @@ def _find_bucket_index( return len(self._boundaries) -ExemplarReservoirFactory: TypeAlias = Callable[[dict[str, Any]], ExemplarReservoir] +ExemplarReservoirFactory: TypeAlias = Callable[ + [dict[str, Any]], ExemplarReservoir +] ExemplarReservoirFactory.__doc__ = """ExemplarReservoir factory. It may receive the Aggregation parameters it is bounded to; e.g. the _ExplicitBucketHistogramAggregation will provide the boundaries. -""" \ No newline at end of file +""" diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index 2867951740..ff20aa1414 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -38,7 +38,9 @@ _logger = getLogger(__name__) -_ERROR_MESSAGE = "Expected ASCII string of maximum length 63 characters but got {}" +_ERROR_MESSAGE = ( + "Expected ASCII string of maximum length 63 characters but got {}" +) class _Synchronous: @@ -140,7 +142,7 @@ def callback( time_unix_nano=time_ns(), instrument=self, attributes=api_measurement.attributes, - context=api_measurement.context or get_current() + context=api_measurement.context or get_current(), ) except Exception: # pylint: disable=broad-exception-caught _logger.exception( @@ -155,7 +157,10 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None + self, + amount: Union[int, float], + attributes: Dict[str, str] = None, + context: Optional[Context] = None, ): if amount < 0: _logger.warning( @@ -164,7 +169,13 @@ def add( return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes, context or get_current()) + Measurement( + amount, + time_unix_nano, + self, + attributes, + context or get_current(), + ) ) @@ -175,11 +186,20 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None + self, + amount: Union[int, float], + attributes: Dict[str, str] = None, + context: Optional[Context] = None, ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes, context or get_current()) + Measurement( + amount, + time_unix_nano, + self, + attributes, + context or get_current(), + ) ) @@ -208,8 +228,11 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def record( - self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None - ): + self, + amount: Union[int, float], + attributes: Dict[str, str] = None, + context: Optional[Context] = None, + ): if amount < 0: _logger.warning( "Record amount must be non-negative on Histogram %s.", @@ -218,7 +241,13 @@ def record( return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes, context or get_current()) + Measurement( + amount, + time_unix_nano, + self, + attributes, + context or get_current(), + ) ) @@ -229,11 +258,20 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def set( - self, amount: Union[int, float], attributes: Dict[str, str] = None, context: Optional[Context] = None + self, + amount: Union[int, float], + attributes: Dict[str, str] = None, + context: Optional[Context] = None, ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( - Measurement(amount, time_unix_nano, self, attributes, context or get_current()) + Measurement( + amount, + time_unix_nano, + self, + attributes, + context or get_current(), + ) ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py index 2d755b6e5f..b516d6abb0 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py @@ -79,7 +79,15 @@ def __init__( def consume_measurement(self, measurement: Measurement) -> None: for reader_storage in self._reader_storages.values(): - reader_storage.consume_measurement(measurement, self._sdk_config.exemplar_filter.should_sample(measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context)) + reader_storage.consume_measurement( + measurement, + self._sdk_config.exemplar_filter.should_sample( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ), + ) def register_asynchronous_instrument( self, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py index e8d3bd802f..2564bbcd7f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py @@ -113,11 +113,15 @@ def _get_or_init_view_instrument_match( return view_instrument_matches - def consume_measurement(self, measurement: Measurement, should_sample_exemplar: bool = True) -> None: + def consume_measurement( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: for view_instrument_match in self._get_or_init_view_instrument_match( measurement.instrument ): - view_instrument_match.consume_measurement(measurement, should_sample_exemplar) + view_instrument_match.consume_measurement( + measurement, should_sample_exemplar + ) def collect(self) -> Optional[MetricsData]: # Use a list instead of yielding to prevent a slow reader from holding diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 9cbf602c6f..c2859b1e5d 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -19,9 +19,9 @@ from opentelemetry.metrics import Instrument from opentelemetry.sdk.metrics._internal.aggregation import ( - _Aggregation, Aggregation, DefaultAggregation, + _Aggregation, _ExplicitBucketHistogramAggregation, _ExponentialBucketHistogramAggregation, ) @@ -34,7 +34,9 @@ _logger = getLogger(__name__) -def default_reservoir_factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: +def default_reservoir_factory( + aggregationType: Type[_Aggregation], +) -> ExemplarReservoirFactory: """Default reservoir factory per aggregation.""" if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir @@ -112,7 +114,9 @@ def __init__( description: Optional[str] = None, attribute_keys: Optional[Set[str]] = None, aggregation: Optional[Aggregation] = None, - exemplar_reservoir_factory: Optional[Callable[[Type[_Aggregation]], ExemplarReservoirFactory]] = None, + exemplar_reservoir_factory: Optional[ + Callable[[Type[_Aggregation]], ExemplarReservoirFactory] + ] = None, instrument_unit: Optional[str] = None, ): if ( @@ -154,7 +158,9 @@ def __init__( self._description = description self._attribute_keys = attribute_keys self._aggregation = aggregation or self._default_aggregation - self._exemplar_reservoir_factory = exemplar_reservoir_factory or default_reservoir_factory + self._exemplar_reservoir_factory = ( + exemplar_reservoir_factory or default_reservoir_factory + ) # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index af687d4905..58c67263aa 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -50,9 +50,15 @@ from opentelemetry.util.types import Attributes -def measurement(value: Union[int, float], attributes: Attributes = None) -> Measurement: +def measurement( + value: Union[int, float], attributes: Attributes = None +) -> Measurement: return Measurement( - value, time_ns(), instrument=Mock(), context=Context(), attributes=attributes + value, + time_ns(), + instrument=Mock(), + context=Context(), + attributes=attributes, ) @@ -169,7 +175,9 @@ def test_collect_delta(self): synchronous_sum_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. - first_sum = synchronous_sum_aggregation.collect(AggregationTemporality.DELTA, 1) + first_sum = synchronous_sum_aggregation.collect( + AggregationTemporality.DELTA, 1 + ) self.assertEqual(first_sum.value, 1) @@ -200,13 +208,17 @@ def test_collect_cumulative(self): ) sum_aggregation.aggregate(measurement(1)) - first_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) + first_sum = sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 1 + ) self.assertEqual(first_sum.value, 1) # should have been reset after first collect sum_aggregation.aggregate(measurement(1)) - second_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) + second_sum = sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 1 + ) self.assertEqual(second_sum.value, 1) @@ -215,7 +227,9 @@ def test_collect_cumulative(self): ) # if no point seen for a whole interval, should return None - third_sum = sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) + third_sum = sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 1 + ) self.assertIsNone(third_sum) @@ -249,7 +263,9 @@ def test_collect(self): ) self.assertIsNone( - last_value_aggregation.collect(AggregationTemporality.CUMULATIVE, 1) + last_value_aggregation.collect( + AggregationTemporality.CUMULATIVE, 1 + ) ) last_value_aggregation.aggregate(measurement(1)) @@ -298,12 +314,14 @@ def test_aggregate(self): Test `ExplicitBucketHistogramAggregation with custom boundaries """ - explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.DELTA, - 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), - boundaries=[0, 2, 4], + explicit_bucket_histogram_aggregation = ( + _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + boundaries=[0, 2, 4], + ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -337,11 +355,13 @@ def test_min_max(self): maximum value in the population """ - explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.CUMULATIVE, - 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + explicit_bucket_histogram_aggregation = ( + _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -353,12 +373,14 @@ def test_min_max(self): self.assertEqual(explicit_bucket_histogram_aggregation._min, -1) self.assertEqual(explicit_bucket_histogram_aggregation._max, 9999) - explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.CUMULATIVE, - 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), - record_min_max=False, + explicit_bucket_histogram_aggregation = ( + _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.CUMULATIVE, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + record_min_max=False, + ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) @@ -375,12 +397,14 @@ def test_collect(self): `_ExplicitBucketHistogramAggregation` collects sum metric points """ - explicit_bucket_histogram_aggregation = _ExplicitBucketHistogramAggregation( - Mock(), - AggregationTemporality.DELTA, - 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), - boundaries=[0, 1, 2], + explicit_bucket_histogram_aggregation = ( + _ExplicitBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_ExplicitBucketHistogramAggregation), + boundaries=[0, 1, 2], + ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(1)) @@ -521,7 +545,10 @@ def setUpClass(cls): def test_counter(self): aggregation = self.default_aggregation._create_aggregation( - _Counter("name", Mock(), Mock()), Mock(), default_reservoir_factory, 0 + _Counter("name", Mock(), Mock()), + Mock(), + default_reservoir_factory, + 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) @@ -532,7 +559,10 @@ def test_counter(self): def test_up_down_counter(self): aggregation = self.default_aggregation._create_aggregation( - _UpDownCounter("name", Mock(), Mock()), Mock(), default_reservoir_factory, 0 + _UpDownCounter("name", Mock(), Mock()), + Mock(), + default_reservoir_factory, + 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) @@ -557,7 +587,9 @@ def test_observable_counter(self): def test_observable_up_down_counter(self): aggregation = self.default_aggregation._create_aggregation( - _ObservableUpDownCounter("name", Mock(), Mock(), callbacks=[Mock()]), + _ObservableUpDownCounter( + "name", Mock(), Mock(), callbacks=[Mock()] + ), Mock(), default_reservoir_factory, 0, diff --git a/opentelemetry-sdk/tests/metrics/test_instrument.py b/opentelemetry-sdk/tests/metrics/test_instrument.py index ba260e1071..4bd10e3fe7 100644 --- a/opentelemetry-sdk/tests/metrics/test_instrument.py +++ b/opentelemetry-sdk/tests/metrics/test_instrument.py @@ -15,6 +15,7 @@ # pylint: disable=no-self-use from logging import WARNING + # from time import time_ns from unittest import TestCase from unittest.mock import Mock, patch @@ -41,7 +42,6 @@ _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement -from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory class TestCounter(TestCase): @@ -130,7 +130,10 @@ def generator_callback_1(): assert isinstance(options, CallbackOptions) -@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) +@patch( + "opentelemetry.sdk.metrics._internal.instrument.time_ns", + Mock(return_value=TEST_TIMESTAMP), +) class TestObservableGauge(TestCase): def testname(self): self.assertEqual(_ObservableGauge("name", Mock(), Mock()).name, "name") @@ -314,7 +317,10 @@ def test_disallow_direct_observable_gauge_creation(self): ObservableGauge("name", Mock(), Mock()) -@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) +@patch( + "opentelemetry.sdk.metrics._internal.instrument.time_ns", + Mock(return_value=TEST_TIMESTAMP), +) class TestObservableCounter(TestCase): def test_callable_callback_0(self): observable_counter = _ObservableCounter( @@ -403,7 +409,10 @@ def test_disallow_direct_counter_creation(self): _SDKGauge("name", Mock(), Mock()) -@patch("opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP)) +@patch( + "opentelemetry.sdk.metrics._internal.instrument.time_ns", + Mock(return_value=TEST_TIMESTAMP), +) class TestObservableUpDownCounter(TestCase): def test_callable_callback_0(self): observable_up_down_counter = _ObservableUpDownCounter( diff --git a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py index ecf46c4c44..1785c8ec24 100644 --- a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py +++ b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py @@ -92,21 +92,27 @@ def test_creates_view_instrument_matches( # instrument1 matches view1 and view2, so should create two # ViewInstrumentMatch objects - storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument1, Context()) + ) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 2, MockViewInstrumentMatch.mock_calls, ) # they should only be created the first time the instrument is seen - storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument1, Context()) + ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2) # instrument2 matches view2, so should create a single # ViewInstrumentMatch MockViewInstrumentMatch.call_args_list.clear() with self.assertLogs(level=WARNING): - storage.consume_measurement(Measurement(1, time_ns(), instrument2, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument2, Context()) + ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) @patch( @@ -116,9 +122,15 @@ def test_creates_view_instrument_matches( def test_forwards_calls_to_view_instrument_match( self, MockViewInstrumentMatch: Mock ): - view_instrument_match1 = Mock(_aggregation=_LastValueAggregation({}, Mock())) - view_instrument_match2 = Mock(_aggregation=_LastValueAggregation({}, Mock())) - view_instrument_match3 = Mock(_aggregation=_LastValueAggregation({}, Mock())) + view_instrument_match1 = Mock( + _aggregation=_LastValueAggregation({}, Mock()) + ) + view_instrument_match2 = Mock( + _aggregation=_LastValueAggregation({}, Mock()) + ) + view_instrument_match3 = Mock( + _aggregation=_LastValueAggregation({}, Mock()) + ) MockViewInstrumentMatch.side_effect = [ view_instrument_match1, view_instrument_match2, @@ -256,7 +268,9 @@ def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock): ) def send_measurement(): - storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument1, Context()) + ) # race sending many measurements concurrently self.run_with_many_threads(send_measurement) @@ -288,17 +302,23 @@ def test_default_view_enabled(self, MockViewInstrumentMatch: Mock): MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument1, Context()) + ) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 1, MockViewInstrumentMatch.mock_calls, ) - storage.consume_measurement(Measurement(1, time_ns(), instrument1, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument1, Context()) + ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) MockViewInstrumentMatch.call_args_list.clear() - storage.consume_measurement(Measurement(1, time_ns(), instrument2, Context())) + storage.consume_measurement( + Measurement(1, time_ns(), instrument2, Context()) + ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) def test_drop_aggregation(self): @@ -322,7 +342,9 @@ def test_drop_aggregation(self): ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - metric_reader_storage.consume_measurement(Measurement(1, time_ns(), counter, Context())) + metric_reader_storage.consume_measurement( + Measurement(1, time_ns(), counter, Context()) + ) self.assertIsNone(metric_reader_storage.collect()) @@ -346,7 +368,9 @@ def test_same_collection_start(self): MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) - metric_reader_storage.consume_measurement(Measurement(1, time_ns(), counter, Context())) + metric_reader_storage.consume_measurement( + Measurement(1, time_ns(), counter, Context()) + ) metric_reader_storage.consume_measurement( Measurement(1, time_ns(), up_down_counter, Context()) ) @@ -505,7 +529,9 @@ def test_view_instrument_match_conflict_1(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, time_ns(), observable_counter_foo, Context()) + Measurement( + 1, time_ns(), observable_counter_foo, Context() + ) ) with self.assertLogs(level=WARNING) as log: @@ -574,13 +600,17 @@ def test_view_instrument_match_conflict_2(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, time_ns(), observable_counter_foo, Context()) + Measurement( + 1, time_ns(), observable_counter_foo, Context() + ) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, time_ns(), observable_counter_bar, Context()) + Measurement( + 1, time_ns(), observable_counter_bar, Context() + ) ) def test_view_instrument_match_conflict_3(self): @@ -629,7 +659,9 @@ def test_view_instrument_match_conflict_3(self): with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( - Measurement(1, time_ns(), observable_counter_baz, Context()) + Measurement( + 1, time_ns(), observable_counter_baz, Context() + ) ) def test_view_instrument_match_conflict_4(self): diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index 41526c6d3a..320e133ff9 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -152,11 +152,11 @@ def test_consume_measurement(self): ) view_instrument_match.consume_measurement( Measurement( - value=0, + value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), - attributes=None + attributes=None, ) ) self.assertEqual( @@ -181,10 +181,13 @@ def test_consume_measurement(self): ), ) view_instrument_match.consume_measurement( - Measurement(value=0, + Measurement( + value=0, time_unix_nano=time_ns(), instrument=instrument1, - context=Context(), attributes=None) + context=Context(), + attributes=None, + ) ) self.assertIsInstance( view_instrument_match._attributes_aggregation[frozenset({})], From 19b2db47ddf5296d846778d5cbd4295ce273c162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 14 Aug 2024 10:08:58 +0200 Subject: [PATCH 09/48] Fix code and unit tests --- .../_internal/exemplar/exemplar_reservoir.py | 18 +- .../sdk/metrics/_internal/instrument.py | 10 +- ...xponential_bucket_histogram_aggregation.py | 353 +++++++++++++----- .../metrics/integration_test/test_cpu_time.py | 45 +++ opentelemetry-sdk/tests/metrics/test_point.py | 10 +- 5 files changed, 335 insertions(+), 101 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 74449fc48e..338b3c3a2c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -68,7 +68,7 @@ def collect(self, point_attributes: Attributes) -> list[Exemplar]: class ExemplarBucket: def __init__(self) -> None: self.__value: Union[int, float] = 0 - self.__attributes: Attributes = {} + self.__attributes: Attributes = None self.__time_unix_nano: int = 0 self.__span_id: Optional[str] = None self.__trace_id: Optional[str] = None @@ -98,11 +98,15 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: if not self.__offered: return None - current_attributes = { - k: v - for k, v in self.__attributes.items() - if k not in point_attributes - } + current_attributes = ( + { + k: v + for k, v in self.__attributes.items() + if k not in point_attributes + } + if self.__attributes + else None + ) exemplar = Exemplar( current_attributes, @@ -157,7 +161,7 @@ def collect(self, point_attributes: Attributes) -> list[Exemplar]: ), ) self._reset() - return [exemplars] + return [*exemplars] def _reset(self) -> None: """Reset the reservoir.""" diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index ff20aa1414..ea373ccc6f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -141,8 +141,8 @@ def callback( api_measurement.value, time_unix_nano=time_ns(), instrument=self, - attributes=api_measurement.attributes, context=api_measurement.context or get_current(), + attributes=api_measurement.attributes, ) except Exception: # pylint: disable=broad-exception-caught _logger.exception( @@ -173,8 +173,8 @@ def add( amount, time_unix_nano, self, - attributes, context or get_current(), + attributes, ) ) @@ -197,8 +197,8 @@ def add( amount, time_unix_nano, self, - attributes, context or get_current(), + attributes, ) ) @@ -245,8 +245,8 @@ def record( amount, time_unix_nano, self, - attributes, context or get_current(), + attributes, ) ) @@ -269,8 +269,8 @@ def set( amount, time_unix_nano, self, - attributes, context or get_current(), + attributes, ) ) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 85c28070c1..f157486ca9 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -21,9 +21,11 @@ from math import ldexp from random import Random, randrange from sys import float_info, maxsize +from time import time_ns from types import MethodType from unittest.mock import Mock, patch +from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( AggregationTemporality, _ExponentialBucketHistogramAggregation, @@ -45,6 +47,7 @@ from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogramDataPoint, ) +from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory from opentelemetry.sdk.metrics.view import ( ExponentialBucketHistogramAggregation, ) @@ -52,7 +55,6 @@ def get_counts(buckets: Buckets) -> int: - counts = [] for index in range(len(buckets)): @@ -72,7 +74,6 @@ def swap( first: _ExponentialBucketHistogramAggregation, second: _ExponentialBucketHistogramAggregation, ): - for attribute in [ "_value_positive", "_value_negative", @@ -93,7 +94,7 @@ class TestExponentialBucketHistogramAggregation(TestCase): def test_create_aggregation(self, mock_logarithm_mapping): exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation() - )._create_aggregation(Mock(), Mock(), Mock()) + )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 20 @@ -103,7 +104,7 @@ def test_create_aggregation(self, mock_logarithm_mapping): exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation(max_scale=10) - )._create_aggregation(Mock(), Mock(), Mock()) + )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 10 @@ -114,7 +115,7 @@ def test_create_aggregation(self, mock_logarithm_mapping): with self.assertLogs(level=WARNING): exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation(max_scale=100) - )._create_aggregation(Mock(), Mock(), Mock()) + )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 100 @@ -127,7 +128,6 @@ def assertInEpsilon(self, first, second, epsilon): self.assertGreaterEqual(first, (second * (1 - epsilon))) def require_equal(self, a, b): - if a._sum == 0 or b._sum == 0: self.assertAlmostEqual(a._sum, b._sum, 1e-6) else: @@ -167,13 +167,27 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=4, ) ) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(4, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) + now = time_ns() + ctx = Context() + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(4, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(1, now, Mock(), ctx) + ) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -1 @@ -194,16 +208,36 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=4, ) ) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(8, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) + now = time_ns() + ctx = Context() + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(1, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(8, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(0.5, now, Mock(), ctx) + ) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -1 @@ -217,9 +251,11 @@ def test_alternating_growth_1(self): def test_permutations(self): """ Tests that every permutation of certain sequences with maxSize=2 - results¶ in the same scale=-1 histogram. + results in the same scale=-1 histogram. """ + now = time_ns() + ctx = Context() for test_values, expected in [ [ [0.5, 1.0, 2.0], @@ -252,12 +288,13 @@ def test_permutations(self): }, ], ]: - for permutation in permutations(test_values): - exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), AggregationTemporality.DELTA, Mock(), max_size=2, @@ -265,9 +302,8 @@ def test_permutations(self): ) for value in permutation: - exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) self.assertEqual( @@ -292,7 +328,6 @@ def test_permutations(self): ) def test_ascending_sequence(self): - for max_size in [3, 4, 6, 9]: for offset in range(-5, 6): for init_scale in [0, 4]: @@ -301,12 +336,15 @@ def test_ascending_sequence(self): def ascending_sequence_test( self, max_size: int, offset: int, init_scale: int ): - + now = time_ns() + ctx = Context() for step in range(max_size, max_size * 4): - exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), AggregationTemporality.DELTA, Mock(), max_size=max_size, @@ -326,7 +364,7 @@ def ascending_sequence_test( for index in range(max_size): value = center_val(mapping, offset + index) exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) sum_ += value @@ -339,7 +377,7 @@ def ascending_sequence_test( ) exponential_histogram_aggregation.aggregate( - Measurement(max_val, Mock()) + Measurement(max_val, now, Mock(), ctx) ) sum_ += max_val @@ -403,7 +441,8 @@ def ascending_sequence_test( ) def test_reset(self): - + now = time_ns() + ctx = Context() for increment in [0x1, 0x100, 0x10000, 0x100000000, 0x200000000]: def mock_increment(self, bucket_index: int) -> None: @@ -415,7 +454,13 @@ def mock_increment(self, bucket_index: int) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=256, ) ) @@ -439,7 +484,7 @@ def mock_increment(self, bucket_index: int) -> None: ), ): exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation._count *= increment exponential_histogram_aggregation._sum *= increment @@ -470,15 +515,29 @@ def mock_increment(self, bucket_index: int) -> None: ) def test_move_into(self): + now = time_ns() + ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=256, ) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=256, ) ) @@ -487,10 +546,10 @@ def test_move_into(self): for index in range(2, 257): expect += index exponential_histogram_aggregation_0.aggregate( - Measurement(index, Mock()) + Measurement(index, now, Mock(), ctx) ) exponential_histogram_aggregation_0.aggregate( - Measurement(0, Mock()) + Measurement(0, now, Mock(), ctx) ) swap( @@ -524,10 +583,18 @@ def test_move_into(self): ) def test_very_large_numbers(self): + now = time_ns() + ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=2, ) ) @@ -546,10 +613,10 @@ def expect_balanced(count: int): ) exponential_histogram_aggregation.aggregate( - Measurement(2**-100, Mock()) + Measurement(2**-100, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( - Measurement(2**100, Mock()) + Measurement(2**100, now, Mock(), ctx) ) self.assertLessEqual( @@ -565,10 +632,10 @@ def expect_balanced(count: int): expect_balanced(1) exponential_histogram_aggregation.aggregate( - Measurement(2**-127, Mock()) + Measurement(2**-127, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( - Measurement(2**128, Mock()) + Measurement(2**128, now, Mock(), ctx) ) self.assertLessEqual( @@ -584,10 +651,10 @@ def expect_balanced(count: int): expect_balanced(2) exponential_histogram_aggregation.aggregate( - Measurement(2**-129, Mock()) + Measurement(2**-129, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( - Measurement(2**255, Mock()) + Measurement(2**255, now, Mock(), ctx) ) self.assertLessEqual( @@ -602,19 +669,29 @@ def expect_balanced(count: int): expect_balanced(3) def test_full_range(self): + now = time_ns() + ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=2, ) ) exponential_histogram_aggregation.aggregate( - Measurement(float_info.max, Mock()) + Measurement(float_info.max, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(1, now, Mock(), ctx) ) - exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) exponential_histogram_aggregation.aggregate( - Measurement(2**-1074, Mock()) + Measurement(2**-1074, now, Mock(), ctx) ) self.assertEqual( @@ -641,16 +718,22 @@ def test_full_range(self): ) def test_aggregator_min_max(self): - + now = time_ns() + ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) for value in [1, 3, 5, 7, 9]: exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) self.assertEqual(1, exponential_histogram_aggregation._min) @@ -658,41 +741,62 @@ def test_aggregator_min_max(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) for value in [-1, -3, -5, -7, -9]: exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) self.assertEqual(-9, exponential_histogram_aggregation._min) self.assertEqual(-1, exponential_histogram_aggregation._max) def test_aggregator_copy_swap(self): - + now = time_ns() + ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) for value in [1, 3, 5, 7, 9, -1, -3, -5]: exponential_histogram_aggregation_0.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) for value in [5, 4, 3, 2]: exponential_histogram_aggregation_1.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation_2 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) @@ -735,10 +839,17 @@ def test_aggregator_copy_swap(self): ) def test_zero_count_by_increment(self): + now = time_ns() + ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) @@ -746,11 +857,16 @@ def test_zero_count_by_increment(self): for _ in range(increment): exponential_histogram_aggregation_0.aggregate( - Measurement(0, Mock()) + Measurement(0, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) @@ -772,7 +888,7 @@ def mock_increment(self, bucket_index: int) -> None: ), ): exponential_histogram_aggregation_1.aggregate( - Measurement(0, Mock()) + Measurement(0, now, Mock(), ctx) ) exponential_histogram_aggregation_1._count *= increment exponential_histogram_aggregation_1._zero_count *= increment @@ -783,10 +899,17 @@ def mock_increment(self, bucket_index: int) -> None: ) def test_one_count_by_increment(self): + now = time_ns() + ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) @@ -794,11 +917,16 @@ def test_one_count_by_increment(self): for _ in range(increment): exponential_histogram_aggregation_0.aggregate( - Measurement(1, Mock()) + Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock() + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), ) ) @@ -820,7 +948,7 @@ def mock_increment(self, bucket_index: int) -> None: ), ): exponential_histogram_aggregation_1.aggregate( - Measurement(1, Mock()) + Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation_1._count *= increment exponential_histogram_aggregation_1._sum *= increment @@ -831,13 +959,11 @@ def mock_increment(self, bucket_index: int) -> None: ) def test_boundary_statistics(self): - total = MAX_NORMAL_EXPONENT - MIN_NORMAL_EXPONENT + 1 for scale in range( LogarithmMapping._min_scale, LogarithmMapping._max_scale + 1 ): - above = 0 below = 0 @@ -870,6 +996,9 @@ def test_min_max_size(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), AggregationTemporality.DELTA, Mock(), max_size=_ExponentialBucketHistogramAggregation._min_max_size, @@ -892,31 +1021,49 @@ def test_aggregate_collect(self): """ Tests a repeated cycle of aggregation and collection. """ + now = time_ns() + ctx = Context() + exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), AggregationTemporality.DELTA, Mock(), ) ) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) def test_collect_results_cumulative(self) -> None: + now = time_ns() + ctx = Context() + exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), AggregationTemporality.DELTA, Mock(), ) @@ -925,13 +1072,19 @@ def test_collect_results_cumulative(self) -> None: self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20) - exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(2, now, Mock(), ctx) + ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20) - exponential_histogram_aggregation.aggregate(Measurement(4, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(4, now, Mock(), ctx) + ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 7) - exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(1, now, Mock(), ctx) + ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 6) collection_0 = exponential_histogram_aggregation.collect( @@ -952,11 +1105,21 @@ def test_collect_results_cumulative(self) -> None: self.assertEqual(collection_0.min, 1) self.assertEqual(collection_0.max, 4) - exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(8, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(0.1, Mock())) - exponential_histogram_aggregation.aggregate(Measurement(0.045, Mock())) + exponential_histogram_aggregation.aggregate( + Measurement(1, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(8, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(0.5, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(0.1, now, Mock(), ctx) + ) + exponential_histogram_aggregation.aggregate( + Measurement(0.045, now, Mock(), ctx) + ) collection_1 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, Mock() @@ -1002,8 +1165,12 @@ def test_collect_results_cumulative(self) -> None: self.assertEqual(collection_1.max, 8) def test_cumulative_aggregation_with_random_data(self) -> None: + now = time_ns() + ctx = Context() + histogram = _ExponentialBucketHistogramAggregation( Mock(), + default_reservoir_factory(_ExponentialBucketHistogramAggregation), AggregationTemporality.DELTA, Mock(), ) @@ -1052,22 +1219,31 @@ def collect_and_validate(values, histogram) -> None: for i in range(2000): value = random_generator.randint(0, 1000) values.append(value) - histogram.aggregate(Measurement(value, Mock())) + histogram.aggregate(Measurement(value, now, Mock(), ctx)) if i % 20 == 0: collect_and_validate(values, histogram) collect_and_validate(values, histogram) def test_merge_collect_cumulative(self): + now = time_ns() + ctx = Context() + exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=4, ) ) for value in [2, 4, 8, 16]: exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) @@ -1088,7 +1264,7 @@ def test_merge_collect_cumulative(self): for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( - Measurement(1 / value, Mock()) + Measurement(1 / value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) @@ -1108,15 +1284,24 @@ def test_merge_collect_cumulative(self): self.assertEqual(result_1.scale, -1) def test_merge_collect_delta(self): + now = time_ns() + ctx = Context() + exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + Mock(), + default_reservoir_factory( + _ExponentialBucketHistogramAggregation + ), + AggregationTemporality.DELTA, + Mock(), + max_size=4, ) ) for value in [2, 4, 8, 16]: exponential_histogram_aggregation.aggregate( - Measurement(value, Mock()) + Measurement(value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) @@ -1135,7 +1320,7 @@ def test_merge_collect_delta(self): for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( - Measurement(1 / value, Mock()) + Measurement(1 / value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py b/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py index 18b8cbdcea..22f20002de 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py @@ -16,14 +16,23 @@ import io from typing import Generator, Iterable, List from unittest import TestCase +from unittest.mock import Mock, patch +from opentelemetry.context import Context from opentelemetry.metrics import CallbackOptions, Instrument, Observation from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.measurement import Measurement # FIXME Test that the instrument methods can be called concurrently safely. +TEST_TIMESTAMP = 1_234_567_890 +TEST_CONTEXT = Context() + +@patch( + "opentelemetry.sdk.metrics._internal.instrument.time_ns", + Mock(return_value=TEST_TIMESTAMP), +) class TestCpuTimeIntegration(TestCase): """Integration test of scraping CPU time from proc stat with an observable counter""" @@ -47,92 +56,128 @@ def create_measurements_expected( return [ Measurement( 6150.29, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "user"}, ), Measurement( 3177.46, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "nice"}, ), Measurement( 5946.01, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "system"}, ), Measurement( 891264.59, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "idle"}, ), Measurement( 1296.29, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "iowait"}, ), Measurement( 0.0, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "irq"}, ), Measurement( 8343.46, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "softirq"}, ), Measurement( 421.37, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "guest"}, ), Measurement( 0, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "guest_nice"}, ), Measurement( 5882.32, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "user"}, ), Measurement( 3491.85, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "nice"}, ), Measurement( 6404.92, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "system"}, ), Measurement( 891564.11, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "idle"}, ), Measurement( 1244.85, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "iowait"}, ), Measurement( 0, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "irq"}, ), Measurement( 2410.04, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "softirq"}, ), Measurement( 418.62, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "guest"}, ), Measurement( 0, + TEST_TIMESTAMP, instrument=instrument, + context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "guest_nice"}, ), ] diff --git a/opentelemetry-sdk/tests/metrics/test_point.py b/opentelemetry-sdk/tests/metrics/test_point.py index 846f2c2fc9..e773f3187f 100644 --- a/opentelemetry-sdk/tests/metrics/test_point.py +++ b/opentelemetry-sdk/tests/metrics/test_point.py @@ -67,7 +67,7 @@ def setUpClass(cls): time_unix_nano=2, value=3.3, ) - cls.number_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "value": 3.3}}' + cls.number_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "value": 3.3, "exemplars": []}}' cls.number_data_point_1 = NumberDataPoint( attributes=cls.attributes_1, @@ -75,7 +75,7 @@ def setUpClass(cls): time_unix_nano=3, value=4.4, ) - cls.number_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "value": 4.4}}' + cls.number_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "value": 4.4, "exemplars": []}}' cls.histogram_data_point_0 = HistogramDataPoint( attributes=cls.attributes_0, @@ -88,7 +88,7 @@ def setUpClass(cls): min=0.2, max=3.3, ) - cls.histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 3, "sum": 3.3, "bucket_counts": [1, 1, 1], "explicit_bounds": [0.1, 1.2, 2.3, 3.4], "min": 0.2, "max": 3.3}}' + cls.histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 3, "sum": 3.3, "bucket_counts": [1, 1, 1], "explicit_bounds": [0.1, 1.2, 2.3, 3.4], "min": 0.2, "max": 3.3, "exemplars": []}}' cls.histogram_data_point_1 = HistogramDataPoint( attributes=cls.attributes_1, @@ -101,7 +101,7 @@ def setUpClass(cls): min=0.3, max=4.4, ) - cls.histogram_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "count": 4, "sum": 4.4, "bucket_counts": [2, 1, 1], "explicit_bounds": [1.2, 2.3, 3.4, 4.5], "min": 0.3, "max": 4.4}}' + cls.histogram_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "count": 4, "sum": 4.4, "bucket_counts": [2, 1, 1], "explicit_bounds": [1.2, 2.3, 3.4, 4.5], "min": 0.3, "max": 4.4, "exemplars": []}}' cls.exp_histogram_data_point_0 = ExponentialHistogramDataPoint( attributes=cls.attributes_0, @@ -117,7 +117,7 @@ def setUpClass(cls): min=10, max=10, ) - cls.exp_histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 1, "sum": 10, "scale": 1, "zero_count": 0, "positive": {{"offset": 0, "bucket_counts": [1]}}, "negative": {{"offset": 0, "bucket_counts": [0]}}, "flags": 0, "min": 10, "max": 10}}' + cls.exp_histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 1, "sum": 10, "scale": 1, "zero_count": 0, "positive": {{"offset": 0, "bucket_counts": [1]}}, "negative": {{"offset": 0, "bucket_counts": [0]}}, "flags": 0, "min": 10, "max": 10, "exemplars": []}}' cls.sum_0 = Sum( data_points=[cls.number_data_point_0, cls.number_data_point_1], From 2b7793a61142ca897758ef728fabc021855b8097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 14 Aug 2024 10:29:25 +0200 Subject: [PATCH 10/48] Add optional context args in Instrument.record/add/set --- .../metrics/_internal/instrument.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py index 1115bb1f19..2250c8b6fd 100644 --- a/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py +++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py @@ -33,6 +33,7 @@ # pylint: disable=unused-import; needed for typing and sphinx from opentelemetry import metrics +from opentelemetry.context import Context from opentelemetry.metrics._internal.observation import Observation from opentelemetry.util.types import Attributes @@ -173,6 +174,7 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: pass @@ -192,8 +194,9 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: - return super().add(amount, attributes=attributes) + return super().add(amount, attributes=attributes, context=context) class _ProxyCounter(_ProxyInstrument[Counter], Counter): @@ -201,9 +204,10 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: if self._real_instrument: - self._real_instrument.add(amount, attributes) + self._real_instrument.add(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Counter: return meter.create_counter(self._name, self._unit, self._description) @@ -217,6 +221,7 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: pass @@ -236,8 +241,9 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: - return super().add(amount, attributes=attributes) + return super().add(amount, attributes=attributes, context=context) class _ProxyUpDownCounter(_ProxyInstrument[UpDownCounter], UpDownCounter): @@ -245,9 +251,10 @@ def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: if self._real_instrument: - self._real_instrument.add(amount, attributes) + self._real_instrument.add(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> UpDownCounter: return meter.create_up_down_counter( @@ -328,6 +335,7 @@ def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: pass @@ -347,8 +355,9 @@ def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: - return super().record(amount, attributes=attributes) + return super().record(amount, attributes=attributes, context=context) class _ProxyHistogram(_ProxyInstrument[Histogram], Histogram): @@ -356,9 +365,10 @@ def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: if self._real_instrument: - self._real_instrument.record(amount, attributes) + self._real_instrument.record(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Histogram: return meter.create_histogram( @@ -406,6 +416,7 @@ def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: pass @@ -425,8 +436,9 @@ def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: - return super().set(amount, attributes=attributes) + return super().set(amount, attributes=attributes, context=context) class _ProxyGauge( @@ -437,9 +449,10 @@ def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, + context: Optional[Context] = None, ) -> None: if self._real_instrument: - self._real_instrument.set(amount, attributes) + self._real_instrument.set(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Gauge: return meter.create_gauge(self._name, self._unit, self._description) From 6a25608ebef6bf3214ad8b0df64541a52d8b83cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 14 Aug 2024 10:29:46 +0200 Subject: [PATCH 11/48] Add first test focusing on exemplar --- .../src/opentelemetry/sdk/metrics/__init__.py | 10 +++++ .../sdk/metrics/_internal/__init__.py | 2 +- .../integration_test/test_console_exporter.py | 43 ++++++++++++++++++- 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index 204b015572..a92d60cfb9 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -16,8 +16,13 @@ from opentelemetry.sdk.metrics._internal import Meter, MeterProvider from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + AlwaysOnExemplarFilter, + AlwaysOffExemplarFilter, ExemplarFilter, ExemplarReservoir, + SimpleFixedSizeExemplarReservoir, + TraceBasedExemplarFilter, ) from opentelemetry.sdk.metrics._internal.instrument import Counter from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge @@ -30,6 +35,9 @@ ) __all__ = [ + "AlignedHistogramBucketExemplarReservoir", + "AlwaysOnExemplarFilter", + "AlwaysOffExemplarFilter", "ExemplarFilter", "ExemplarReservoir", "Meter", @@ -41,5 +49,7 @@ "ObservableCounter", "ObservableGauge", "ObservableUpDownCounter", + "SimpleFixedSizeExemplarReservoir", "UpDownCounter", + "TraceBasedExemplarFilter", ] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index 1e96f0b4b6..5a2e428965 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -384,7 +384,7 @@ def __init__( metric_readers: Sequence[ "opentelemetry.sdk.metrics.export.MetricReader" ] = (), - resource: Resource = None, + resource: Optional[Resource] = None, exemplar_filter: Optional[ExemplarFilter] = None, shutdown_on_exit: bool = True, views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py index 1b3283717a..2d3f37736a 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py @@ -15,9 +15,11 @@ from io import StringIO from json import loads from unittest import TestCase +from unittest.mock import Mock, patch +from opentelemetry.context import Context from opentelemetry.metrics import get_meter, set_meter_provider -from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics import AlwaysOnExemplarFilter, MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, @@ -25,6 +27,9 @@ from opentelemetry.test.globals_test import reset_metrics_globals +TEST_TIMESTAMP = 1_234_567_890 + + class TestConsoleExporter(TestCase): def setUp(self): reset_metrics_globals() @@ -88,3 +93,39 @@ def test_console_exporter_no_export(self): expected = "" self.assertEqual(actual, expected) + + @patch( + "opentelemetry.sdk.metrics._internal.instrument.time_ns", + Mock(return_value=TEST_TIMESTAMP), + ) + def test_console_exporter_with_exemplars(self): + ctx = Context() + + output = StringIO() + exporter = ConsoleMetricExporter(out=output) + reader = PeriodicExportingMetricReader( + exporter, export_interval_millis=100 + ) + provider = MeterProvider(metric_readers=[reader], exemplar_filter=AlwaysOnExemplarFilter()) + set_meter_provider(provider) + meter = get_meter(__name__) + counter = meter.create_counter( + "name", description="description", unit="unit" + ) + counter.add(1, attributes={"a": "b"}, context=ctx) + provider.shutdown() + + output.seek(0) + result_0 = loads("".join(output.readlines())) + + self.assertGreater(len(result_0), 0) + + metrics = result_0["resource_metrics"][0]["scope_metrics"][0] + + self.assertEqual(metrics["scope"]["name"], "test_console_exporter") + + point = metrics["metrics"][0]["data"]["data_points"][0] + + self.assertEqual(point["attributes"], {"a": "b"}) + self.assertEqual(point["value"], 1) + self.assertEqual(point["exemplars"], [{"filtered_attributes": {}, "value": 1, "time_unix_nano": TEST_TIMESTAMP, "span_id": None, "trace_id": None}]) From 22cebeb50bf819b8874d7ea065acdcf05f9ebcdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 14 Aug 2024 10:31:23 +0200 Subject: [PATCH 12/48] Add trivial test for exemplar filters --- .../tests/metrics/test_exemplarfilter.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 opentelemetry-sdk/tests/metrics/test_exemplarfilter.py diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py new file mode 100644 index 0000000000..0eeda84d10 --- /dev/null +++ b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py @@ -0,0 +1,28 @@ +from unittest import TestCase + +from opentelemetry.context import Context +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + TraceBasedExemplarFilter, +) + + +class TestAlwaysOnExemplarFilter(TestCase): + def test_should_sample(self): + filter = AlwaysOnExemplarFilter() + self.assertTrue(filter.should_sample(10, 0, {}, Context())) + + +class TestAlwaysOffExemplarFilter(TestCase): + def test_should_sample(self): + filter = AlwaysOffExemplarFilter() + self.assertFalse(filter.should_sample(10, 0, {}, Context())) + + +class TestTraceBasedExemplarFilter(TestCase): + def test_should_not_sample_without_trace(self): + filter = TraceBasedExemplarFilter() + self.assertFalse(filter.should_sample(10, 0, {}, Context())) + + # FIXME add test with trace that should sample From f0ecace0407a2ea62d25196880cf8340271021c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 14 Aug 2024 11:51:14 +0200 Subject: [PATCH 13/48] Lint the code --- .../metrics/_internal/observation.py | 2 +- .../src/opentelemetry/sdk/metrics/__init__.py | 2 +- .../integration_test/test_console_exporter.py | 18 +++++++++++++++--- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py index fdfef68fb8..ffc254b20a 100644 --- a/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py +++ b/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py @@ -60,4 +60,4 @@ def __eq__(self, other: object) -> bool: ) def __repr__(self) -> str: - return f"Observation(value={self.value}, attributes={self.attributes})" + return f"Observation(value={self.value}, attributes={self.attributes}, context={self.context})" diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index a92d60cfb9..80fc953da4 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -17,8 +17,8 @@ from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - AlwaysOnExemplarFilter, AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, ExemplarFilter, ExemplarReservoir, SimpleFixedSizeExemplarReservoir, diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py index 2d3f37736a..18dcd0da5b 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py @@ -26,7 +26,6 @@ ) from opentelemetry.test.globals_test import reset_metrics_globals - TEST_TIMESTAMP = 1_234_567_890 @@ -106,7 +105,9 @@ def test_console_exporter_with_exemplars(self): reader = PeriodicExportingMetricReader( exporter, export_interval_millis=100 ) - provider = MeterProvider(metric_readers=[reader], exemplar_filter=AlwaysOnExemplarFilter()) + provider = MeterProvider( + metric_readers=[reader], exemplar_filter=AlwaysOnExemplarFilter() + ) set_meter_provider(provider) meter = get_meter(__name__) counter = meter.create_counter( @@ -128,4 +129,15 @@ def test_console_exporter_with_exemplars(self): self.assertEqual(point["attributes"], {"a": "b"}) self.assertEqual(point["value"], 1) - self.assertEqual(point["exemplars"], [{"filtered_attributes": {}, "value": 1, "time_unix_nano": TEST_TIMESTAMP, "span_id": None, "trace_id": None}]) + self.assertEqual( + point["exemplars"], + [ + { + "filtered_attributes": {}, + "value": 1, + "time_unix_nano": TEST_TIMESTAMP, + "span_id": None, + "trace_id": None, + } + ], + ) From fadcefcf1f5b08ecb0b365b72e6dff6a987d90ae Mon Sep 17 00:00:00 2001 From: czhang771 Date: Fri, 16 Aug 2024 12:10:16 -0700 Subject: [PATCH 14/48] add unit tests for exemplarfilter, exemplarreservoir, and reservoirfactory --- .../tests/metrics/test_exemplarfilter.py | 34 ++++++- .../tests/metrics/test_exemplarreservoir.py | 98 +++++++++++++++++++ 2 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py index 0eeda84d10..df7ccc369b 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py @@ -1,13 +1,15 @@ from unittest import TestCase +from opentelemetry import trace from opentelemetry.context import Context +from opentelemetry.trace.span import SpanContext +from opentelemetry.trace import TraceFlags from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) - class TestAlwaysOnExemplarFilter(TestCase): def test_should_sample(self): filter = AlwaysOnExemplarFilter() @@ -21,8 +23,34 @@ def test_should_sample(self): class TestTraceBasedExemplarFilter(TestCase): + TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) + SPAN_ID = int("6e0c63257de34c92", 16) def test_should_not_sample_without_trace(self): + filter = TraceBasedExemplarFilter() + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags= TraceFlags(TraceFlags.DEFAULT), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + self.assertFalse(filter.should_sample(10, 0, {}, ctx)) + + def test_should_not_sample_with_invalid_span(self): filter = TraceBasedExemplarFilter() self.assertFalse(filter.should_sample(10, 0, {}, Context())) - - # FIXME add test with trace that should sample + + def test_should_sample_when_trace_is_sampled(self): + filter = TraceBasedExemplarFilter() + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags= TraceFlags(TraceFlags.SAMPLED), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + self.assertTrue(filter.should_sample(10, 0, {}, ctx)) \ No newline at end of file diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py new file mode 100644 index 0000000000..91bbe30adb --- /dev/null +++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py @@ -0,0 +1,98 @@ +from unittest import TestCase + +#from opentelemetry.context import Context +from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags +from opentelemetry import trace +from time import time_ns +from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir +) +from opentelemetry.sdk.metrics._internal.aggregation import ( + _ExplicitBucketHistogramAggregation, + _LastValueAggregation, + _SumAggregation, +) + +class TestSimpleFixedSizeExemplarReservoir(TestCase): + + TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) + SPAN_ID = int("6e0c63257de34c92", 16) + + def test_no_measurements(self): + reservoir = SimpleFixedSizeExemplarReservoir(10) + self.assertEqual(len(reservoir.collect({})), 0) + + def test_has_context(self): + reservoir = SimpleFixedSizeExemplarReservoir(1) + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags= TraceFlags(TraceFlags.SAMPLED), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + reservoir.offer(1, time_ns(), {}, ctx) + exemplars = reservoir.collect({}) + self.assertEqual(len(exemplars), 1) + self.assertEqual(exemplars[0].trace_id, self.TRACE_ID) + self.assertEqual(exemplars[0].span_id, self.SPAN_ID) + + def test_filter_attributes(self): + reservoir = SimpleFixedSizeExemplarReservoir(1) + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags= TraceFlags(TraceFlags.SAMPLED), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + reservoir.offer(1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx) + exemplars = reservoir.collect({"key2": "value2", "key3": "value3"}) + self.assertEqual(len(exemplars), 1) + self.assertNotEqual("key1", exemplars[0].filtered_attributes) + +class TestAlignedHistogramBucketExemplarReservoir(TestCase): + + TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) + SPAN_ID = int("6e0c63257de34c92", 16) + + def test_measurement_in_buckets(self): + reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25, 50, 75]) + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags= TraceFlags(TraceFlags.SAMPLED), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + reservoir.offer(52, time_ns(), {"bucket": "5"}, ctx) + reservoir.offer(7, time_ns(), {"bucket": "3"}, ctx) + reservoir.offer(6, time_ns(), {"bucket": "3"}, ctx) + exemplars = reservoir.collect({"bucket": "3"}) + self.assertEqual(len(exemplars), 2) + self.assertEqual(exemplars[0].value, 6) + self.assertEqual(exemplars[1].value, 52) + self.assertEqual(len(exemplars[0].filtered_attributes), 0) + self.assertNotEqual(exemplars[1].filtered_attributes, {"bucket": "5"}) + + +class TestExemplarReservoirFactory(TestCase): + def test_sum_aggregation(self): + exemplar_reservoir = default_reservoir_factory(_SumAggregation) + self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) + def test_last_value_aggregation(self): + exemplar_reservoir = default_reservoir_factory(_LastValueAggregation) + self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) + def test_explicit_histogram_aggregation(self): + exemplar_reservoir = default_reservoir_factory(_ExplicitBucketHistogramAggregation) + self.assertEqual(exemplar_reservoir, AlignedHistogramBucketExemplarReservoir) \ No newline at end of file From 70f8bef09310b44d2ce0f2d9ea38d288b483da17 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Fri, 23 Aug 2024 16:00:43 -0700 Subject: [PATCH 15/48] add unit and integration tests --- .../integration_test/test_histogram_export.py | 65 +++++ .../integration_test/test_sum_aggregation.py | 38 ++- .../tests/metrics/test_aggregation.py | 79 +++++- .../tests/metrics/test_exemplarreservoir.py | 50 +++- .../metrics/test_view_instrument_match.py | 228 +++++++++++++++++- 5 files changed, 455 insertions(+), 5 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py index eaf590219b..2d4b6d4f43 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py @@ -17,6 +17,11 @@ from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + TraceBasedExemplarFilter, +) class TestHistogramExport(TestCase): @@ -88,3 +93,63 @@ def test_histogram_counter_collection(self): ), 1, ) + + def test_histogram_with_exemplars(self): + + in_memory_metric_reader = InMemoryMetricReader() + + provider = MeterProvider( + resource=Resource.create({SERVICE_NAME: "otel-test"}), + metric_readers=[in_memory_metric_reader], + exemplar_filter=AlwaysOnExemplarFilter() + ) + meter = provider.get_meter("my-meter") + histogram = meter.create_histogram("my_histogram") + + histogram.record(2, {"attribute": "value1"}) # Should go in the first bucket + histogram.record(7, {"attribute": "value2"}) # Should go in the second bucket + histogram.record(9, {"attribute": "value2"}) # Should also go in the second bucket + histogram.record(15, {"attribute": "value3"}) # Should go in the third bucket + + metric_data = in_memory_metric_reader.get_metrics_data() + + self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) + histogram_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + + self.assertEqual(len(histogram_metric.data.data_points), 3) + + self.assertEqual(len(histogram_metric.data.data_points[0].exemplars), 1) + self.assertEqual(len(histogram_metric.data.data_points[1].exemplars), 1) + self.assertEqual(len(histogram_metric.data.data_points[2].exemplars), 1) + + self.assertEqual(histogram_metric.data.data_points[0].sum, 2) + self.assertEqual(histogram_metric.data.data_points[1].sum, 16) + self.assertEqual(histogram_metric.data.data_points[2].sum, 15) + + self.assertEqual(histogram_metric.data.data_points[0].exemplars[0].value, 2.0) + self.assertEqual(histogram_metric.data.data_points[1].exemplars[0].value, 9.0) + self.assertEqual(histogram_metric.data.data_points[2].exemplars[0].value, 15.0) + + def test_filter_with_exemplars(self): + in_memory_metric_reader = InMemoryMetricReader() + + provider = MeterProvider( + resource=Resource.create({SERVICE_NAME: "otel-test"}), + metric_readers=[in_memory_metric_reader], + exemplar_filter=AlwaysOffExemplarFilter() + ) + meter = provider.get_meter("my-meter") + histogram = meter.create_histogram("my_histogram") + + histogram.record(2, {"attribute": "value1"}) # Should go in the first bucket + histogram.record(7, {"attribute": "value2"}) # Should go in the second bucket + + metric_data = in_memory_metric_reader.get_metrics_data() + + self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) + histogram_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + + self.assertEqual(len(histogram_metric.data.data_points), 2) + + self.assertEqual(len(histogram_metric.data.data_points[0].exemplars), 0) + self.assertEqual(len(histogram_metric.data.data_points[1].exemplars), 0) \ No newline at end of file diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py index 74a77eb534..38be37bfea 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py @@ -27,7 +27,12 @@ InMemoryMetricReader, ) from opentelemetry.sdk.metrics.view import SumAggregation - +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + TraceBasedExemplarFilter, +) +from opentelemetry.context import Context class TestSumAggregation(TestCase): @mark.skipif( @@ -474,3 +479,34 @@ def test_synchronous_cumulative_temporality(self): start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual(metric_data.value, 80) + + def test_sum_aggregation_with_exemplars(self): + + in_memory_metric_reader = InMemoryMetricReader() + + provider = MeterProvider( + metric_readers=[in_memory_metric_reader], + exemplar_filter=AlwaysOnExemplarFilter(), + ) + + meter = provider.get_meter("my-meter") + counter = meter.create_counter("my_counter") + + counter.add(2, {"attribute": "value1"}, context=Context()) + counter.add(5, {"attribute": "value2"}, context=Context()) + counter.add(3, {"attribute": "value3"}, context=Context()) + + metric_data = in_memory_metric_reader.get_metrics_data() + + self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) + + sum_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + + data_points = sum_metric.data.data_points + self.assertEqual(len(data_points), 3) + + self.assertEqual(data_points[0].exemplars[0].value, 2.0) + self.assertEqual(data_points[1].exemplars[0].value, 5.0) + self.assertEqual(data_points[2].exemplars[0].value, 3.0) + + provider.shutdown() \ No newline at end of file diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 58c67263aa..17fa59227f 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -48,7 +48,11 @@ SumAggregation, ) from opentelemetry.util.types import Attributes - +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir, +) def measurement( value: Union[int, float], attributes: Attributes = None @@ -640,3 +644,76 @@ def test_observable_gauge(self): 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) + +class TestExemplarsFromAggregations(TestCase): + + def test_collection_simple_fixed_size_reservoir(self): + exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir(size=5) + synchronous_sum_aggregation = _SumAggregation( + Mock(), + True, + AggregationTemporality.DELTA, + 0, + exemplar_reservoir_factory, + ) + + synchronous_sum_aggregation.aggregate(measurement(1)) + synchronous_sum_aggregation.aggregate(measurement(2)) + synchronous_sum_aggregation.aggregate(measurement(3)) + + self.assertEqual(synchronous_sum_aggregation._value, 6) + datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) + self.assertEqual(len(datapoint.exemplars), 3) + + def test_collection_simple_fixed_size_reservoir_with_default_reservoir(self): + + synchronous_sum_aggregation = _SumAggregation( + Mock(), + True, + AggregationTemporality.DELTA, + 0, + default_reservoir_factory(_SumAggregation), + ) + + synchronous_sum_aggregation.aggregate(measurement(1)) + synchronous_sum_aggregation.aggregate(measurement(2)) + synchronous_sum_aggregation.aggregate(measurement(3)) + + self.assertEqual(synchronous_sum_aggregation._value, 6) + datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) + self.assertEqual(len(datapoint.exemplars), 1) + + def test_collection_aligned_histogram_bucket_reservoir(self): + boundaries = [5.0, 10.0, 20.0] + exemplar_reservoir_factory = lambda: AlignedHistogramBucketExemplarReservoir(boundaries) + synchronous_sum_aggregation = _SumAggregation( + Mock(), + True, + AggregationTemporality.DELTA, + 0, + exemplar_reservoir_factory, + ) + + synchronous_sum_aggregation.aggregate(measurement(2.0)) + synchronous_sum_aggregation.aggregate(measurement(4.0)) + synchronous_sum_aggregation.aggregate(measurement(6.0)) + synchronous_sum_aggregation.aggregate(measurement(15.0)) + synchronous_sum_aggregation.aggregate(measurement(25.0)) + + datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) + self.assertEqual(len(datapoint.exemplars), 4) + + # Verify that exemplars are associated with the correct boundaries + expected_buckets = [ + (4.0, boundaries[0]), # First bucket, should hold the last value <= 5.0 + (6.0, boundaries[1]), # Second bucket, should hold the last value <= 10.0 + (15.0, boundaries[2]), # Third bucket, should hold the last value <= 20.0 + (25.0, None), # Last bucket, should hold the value > 20.0 + ] + + for exemplar, (value, boundary) in zip(datapoint.exemplars, expected_buckets): + self.assertEqual(exemplar.value, value) + if boundary is not None: + self.assertLessEqual(exemplar.value, boundary) + else: + self.assertGreater(exemplar.value, boundaries[-1]) diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py index 91bbe30adb..7d8ba026ed 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py @@ -1,6 +1,6 @@ from unittest import TestCase -#from opentelemetry.context import Context +from opentelemetry.context import Context from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags from opentelemetry import trace from time import time_ns @@ -58,6 +58,26 @@ def test_filter_attributes(self): exemplars = reservoir.collect({"key2": "value2", "key3": "value3"}) self.assertEqual(len(exemplars), 1) self.assertNotEqual("key1", exemplars[0].filtered_attributes) + + def test_reset_after_collection(self): + reservoir = SimpleFixedSizeExemplarReservoir(4) + + reservoir.offer(1.0, time_ns(), {"attribute": "value1"}, Context()) + reservoir.offer(2.0, time_ns(), {"attribute": "value2"}, Context()) + reservoir.offer(3.0, time_ns(), {"attribute": "value3"}, Context()) + + exemplars = reservoir.collect({}) + self.assertEqual(len(exemplars), 3) + + # Offer new measurements after reset + reservoir.offer(4.0, time_ns(), {"attribute": "value4"}, Context()) + reservoir.offer(5.0, time_ns(), {"attribute": "value5"}, Context()) + + # Collect again and check the number of exemplars + new_exemplars = reservoir.collect({}) + self.assertEqual(len(new_exemplars), 2) + self.assertEqual(new_exemplars[0].value, 4.0) + self.assertEqual(new_exemplars[1].value, 5.0) class TestAlignedHistogramBucketExemplarReservoir(TestCase): @@ -78,14 +98,40 @@ def test_measurement_in_buckets(self): reservoir.offer(52, time_ns(), {"bucket": "5"}, ctx) reservoir.offer(7, time_ns(), {"bucket": "3"}, ctx) reservoir.offer(6, time_ns(), {"bucket": "3"}, ctx) + exemplars = reservoir.collect({"bucket": "3"}) + self.assertEqual(len(exemplars), 2) self.assertEqual(exemplars[0].value, 6) self.assertEqual(exemplars[1].value, 52) self.assertEqual(len(exemplars[0].filtered_attributes), 0) self.assertNotEqual(exemplars[1].filtered_attributes, {"bucket": "5"}) + def test_last_measurement_in_bucket(self): + reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25]) + span_context = SpanContext( + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags=TraceFlags(TraceFlags.SAMPLED), + trace_state={} + ) + span = trace.NonRecordingSpan(span_context) + ctx = trace.set_span_in_context(span) + + # Offer values to the reservoir + reservoir.offer(2, time_ns(), {"bucket": "1"}, ctx) # Bucket 1 + reservoir.offer(7, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 + reservoir.offer(8, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 - should replace the 7 + reservoir.offer(15, time_ns(), {"bucket": "3"}, ctx) # Bucket 3 - + exemplars = reservoir.collect({}) + + # Check that each bucket has the correct value + self.assertEqual(len(exemplars), 3) + self.assertEqual(exemplars[0].value, 2) + self.assertEqual(exemplars[1].value, 8) + self.assertEqual(exemplars[2].value, 15) + class TestExemplarReservoirFactory(TestCase): def test_sum_aggregation(self): exemplar_reservoir = default_reservoir_factory(_SumAggregation) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index 320e133ff9..6d80a752bf 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -26,7 +26,7 @@ _DropAggregation, _LastValueAggregation, ) -from opentelemetry.sdk.metrics._internal.instrument import _Counter +from opentelemetry.sdk.metrics._internal.instrument import _Counter, _Histogram from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, @@ -38,7 +38,29 @@ LastValueAggregation, View, ) +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + _Aggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir +) +from typing import Callable, Optional, Set, Type, Any, Sequence +def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: + def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != 'boundaries'}) + else: + return lambda **kwargs: SimpleFixedSizeExemplarReservoir(size=size, **kwargs) + + return factory class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name @classmethod @@ -353,3 +375,207 @@ def test_setting_aggregation(self): ], _LastValueAggregation, ) +class TestSimpleFixedSizeExemplarReservoir(TestCase): + + def test_consume_measurement_with_custom_reservoir_factory(self): + simple_fixed_size_factory = generalized_reservoir_factory(size=10) + + # Create an instance of _Counter + instrument1 = _Counter( + name="instrument1", + instrumentation_scope=None, + measurement_consumer=None, + description="description", + unit="unit", + ) + + view_instrument_match = _ViewInstrumentMatch( + view=View( + instrument_name="instrument1", + name="name", + aggregation=DefaultAggregation(), + exemplar_reservoir_factory=simple_fixed_size_factory, + ), + instrument=instrument1, + instrument_class_aggregation={_Counter: DefaultAggregation()}, + ) + + # Consume measurements with the same attributes to ensure aggregation + view_instrument_match.consume_measurement( + Measurement( + value=2.0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute1": "value1"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=4.0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=5.0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) + + # Ensure only one data point is collected + self.assertEqual(len(data_points), 2) + + # Verify that exemplars have been correctly stored and collected + self.assertEqual(len(data_points[0].exemplars), 1) + self.assertEqual(len(data_points[1].exemplars), 2) + + self.assertEqual(data_points[0].exemplars[0].value, 2.0) + self.assertEqual(data_points[1].exemplars[0].value, 4.0) + self.assertEqual(data_points[1].exemplars[1].value, 5.0) + + + def test_consume_measurement_with_exemplars(self): + # Create an instance of _Counter + instrument1 = _Counter( + name="instrument1", + instrumentation_scope=None, # No mock, set to None or actual scope if available + measurement_consumer=None, # No mock, set to None or actual consumer if available + description="description", + unit="unit", + ) + + view_instrument_match = _ViewInstrumentMatch( + view=View( + instrument_name="instrument1", + name="name", + aggregation=DefaultAggregation(), + ), + instrument=instrument1, + instrument_class_aggregation={_Counter: DefaultAggregation()}, + ) + + # Consume measurements with the same attributes to ensure aggregation + view_instrument_match.consume_measurement( + Measurement( + value=4.0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=5.0, + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + # Collect the data points + data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) + + # Ensure only one data point is collected + self.assertEqual(len(data_points), 1) + + # Verify that exemplars have been correctly stored and collected + self.assertEqual(len(data_points[0].exemplars), 2) + + self.assertEqual(data_points[0].exemplars[0].value, 4.0) + self.assertEqual(data_points[0].exemplars[1].value, 5.0) + +class TestAlignedHistogramBucketExemplarReservoir(TestCase): + + def test_consume_measurement_with_custom_reservoir_factory(self): + # Custom factory for AlignedHistogramBucketExemplarReservoir with specific boundaries + histogram_reservoir_factory = generalized_reservoir_factory(boundaries=[0, 5, 10, 25]) + + # Create an instance of _Histogram + instrument1 = _Histogram( + name="instrument1", + instrumentation_scope=None, + measurement_consumer=None, + description="description", + unit="unit", + ) + + view_instrument_match = _ViewInstrumentMatch( + view=View( + instrument_name="instrument1", + name="name", + aggregation=DefaultAggregation(), + exemplar_reservoir_factory=histogram_reservoir_factory, + ), + instrument=instrument1, + instrument_class_aggregation={_Histogram: DefaultAggregation()}, + ) + + # Consume measurements with different values to ensure they are placed in the correct buckets + view_instrument_match.consume_measurement( + Measurement( + value=2.0, # Should go into the first bucket (0 to 5) + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute1": "value1"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=7.0, # Should go into the second bucket (5 to 10) + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=8.0, # Should go into the second bucket (5 to 10) + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute2": "value2"}, + ) + ) + + view_instrument_match.consume_measurement( + Measurement( + value=15.0, # Should go into the third bucket (10 to 25) + time_unix_nano=time_ns(), + instrument=instrument1, + context=Context(), + attributes={"attribute3": "value3"}, + ) + ) + + # Collect the data points + data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) + + # Ensure three data points are collected, one for each bucket + self.assertEqual(len(data_points), 3) + + # Verify that exemplars have been correctly stored and collected in their respective buckets + self.assertEqual(len(data_points[0].exemplars), 1) + self.assertEqual(len(data_points[1].exemplars), 1) + self.assertEqual(len(data_points[2].exemplars), 1) + + self.assertEqual(data_points[0].exemplars[0].value, 2.0) # First bucket + self.assertEqual(data_points[1].exemplars[0].value, 8.0) # Second bucket + self.assertEqual(data_points[2].exemplars[0].value, 15.0) # Third bucket \ No newline at end of file From 351730c80cb19426023211a8c8f50025653abe5e Mon Sep 17 00:00:00 2001 From: czhang771 Date: Tue, 27 Aug 2024 16:22:10 -0700 Subject: [PATCH 16/48] update otlp exporter to export exemplars --- .../_internal/metrics_encoder/__init__.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py index 0d66fd28b7..f3fd673712 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -28,6 +28,8 @@ ) from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, + _encode_span_id, + _encode_trace_id, ) from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, @@ -216,6 +218,7 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: data_point.attributes ), time_unix_nano=data_point.time_unix_nano, + exemplars=encode_exemplars(data_point.exemplars), ) if isinstance(data_point.value, int): pt.as_int = data_point.value @@ -233,6 +236,7 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: start_time_unix_nano=( data_point.start_time_unix_nano ), + exemplars=encode_exemplars(data_point.exemplars), count=data_point.count, sum=data_point.sum, bucket_counts=data_point.bucket_counts, @@ -255,6 +259,7 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: data_point.start_time_unix_nano ), time_unix_nano=data_point.time_unix_nano, + exemplars=encode_exemplars(data_point.exemplars), ) if isinstance(data_point.value, int): pt.as_int = data_point.value @@ -296,6 +301,7 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: start_time_unix_nano=( data_point.start_time_unix_nano ), + exemplars=encode_exemplars(data_point.exemplars), count=data_point.count, sum=data_point.sum, scale=data_point.scale, @@ -336,3 +342,32 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: ) resource_metrics = resource_data return ExportMetricsServiceRequest(resource_metrics=resource_metrics) + +def encode_exemplars(sdk_exemplars: list) -> list: + """ + Converts a list of SDK Exemplars into a list of protobuf Exemplars. + + Args: + sdk_exemplars (list): The list of exemplars from the OpenTelemetry SDK. + + Returns: + list: A list of protobuf exemplars. + """ + pb_exemplars = [] + for sdk_exemplar in sdk_exemplars: + pb_exemplar = pb2.Exemplar( + time_unix_nano=sdk_exemplar.time_unix_nano, + span_id=_encode_span_id(sdk_exemplar.span_id), + trace_id=_encode_trace_id(sdk_exemplar.trace_id), + filtered_attributes=_encode_attributes(sdk_exemplar.filtered_attributes), + ) + # Assign the value based on its type in the SDK exemplar + if isinstance(sdk_exemplar.value, float): + pb_exemplar.as_double = sdk_exemplar.value + elif isinstance(sdk_exemplar.value, int): + pb_exemplar.as_int = sdk_exemplar.value + else: + raise ValueError("Exemplar value must be an int or float") + pb_exemplars.append(pb_exemplar) + + return pb_exemplars \ No newline at end of file From afd4e2c28292e1375a2c1fc9266a67e26ef01ac8 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Thu, 29 Aug 2024 12:58:48 -0700 Subject: [PATCH 17/48] address basic PR comments --- .../sdk/metrics/_internal/aggregation.py | 13 ++-- .../_internal/exemplar/exemplar_reservoir.py | 6 +- .../sdk/metrics/_internal/measurement.py | 2 +- .../sdk/metrics/_internal/view.py | 4 +- ...xponential_bucket_histogram_aggregation.py | 50 +++++++-------- .../tests/metrics/test_aggregation.py | 62 +++++++++---------- .../tests/metrics/test_exemplarreservoir.py | 8 +-- 7 files changed, 72 insertions(+), 73 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index c6f17be776..8ab284c16c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -117,7 +117,7 @@ def collect( def _collect_exemplars(self) -> Sequence[Exemplar]: return self._reservoir.collect( self._attributes - ) # FIXME provide filtered data point attributes + ) class _DropAggregation(_Aggregation): @@ -165,7 +165,7 @@ def aggregate( self._value = self._value + measurement.value - super().aggregate(measurement, should_sample_exemplar) + super().aggregate(measurement, should_sample_exemplar) def collect( self, @@ -295,7 +295,6 @@ def collect( with self._lock: value = self._value self._value = None - exemplars = self._collect_exemplars() if ( self._instrument_aggregation_temporality @@ -320,7 +319,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=value, @@ -333,7 +332,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=self._previous_value, @@ -362,7 +361,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=result_value, @@ -370,7 +369,7 @@ def collect( return NumberDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=value, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 338b3c3a2c..310d61a14d 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -98,7 +98,7 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: if not self.__offered: return None - current_attributes = ( + filtered_attributes = ( { k: v for k, v in self.__attributes.items() @@ -109,7 +109,7 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: ) exemplar = Exemplar( - current_attributes, + filtered_attributes, self.__value, self.__time_unix_nano, self.__span_id, @@ -206,11 +206,11 @@ def _find_bucket_index( attributes: Attributes, ctx: Context, ) -> int: - self._measurements_seen += 1 if self._measurements_seen < self._size: return self._measurements_seen index = randrange(0, self._measurements_seen) + self._measurements_seen += 1 return index if index < self._size else -1 diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py index 01c0a93e51..a73d6001a1 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py @@ -28,7 +28,7 @@ class Measurement: Attributes: value: Measured value time_unix_nano: The time the API call was made to record the Measurement - instrument: Measurement instrument + instrument: The instrument that produced this `Measurement`. context: The active Context of the Measurement at API call time. attributes: Measurement attributes """ diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index c2859b1e5d..8d913cf8d0 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -34,7 +34,7 @@ _logger = getLogger(__name__) -def default_reservoir_factory( +def _default_reservoir_factory( aggregationType: Type[_Aggregation], ) -> ExemplarReservoirFactory: """Default reservoir factory per aggregation.""" @@ -159,7 +159,7 @@ def __init__( self._attribute_keys = attribute_keys self._aggregation = aggregation or self._default_aggregation self._exemplar_reservoir_factory = ( - exemplar_reservoir_factory or default_reservoir_factory + exemplar_reservoir_factory or _default_reservoir_factory ) # pylint: disable=too-many-return-statements diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index f157486ca9..1e2d3e734a 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -47,7 +47,7 @@ from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogramDataPoint, ) -from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory +from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.view import ( ExponentialBucketHistogramAggregation, ) @@ -168,7 +168,7 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -209,7 +209,7 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -292,7 +292,7 @@ def test_permutations(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -342,7 +342,7 @@ def ascending_sequence_test( exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -455,7 +455,7 @@ def mock_increment(self, bucket_index: int) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -521,7 +521,7 @@ def test_move_into(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -532,7 +532,7 @@ def test_move_into(self): exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -589,7 +589,7 @@ def test_very_large_numbers(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -675,7 +675,7 @@ def test_full_range(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -723,7 +723,7 @@ def test_aggregator_min_max(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -742,7 +742,7 @@ def test_aggregator_min_max(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -764,7 +764,7 @@ def test_aggregator_copy_swap(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -778,7 +778,7 @@ def test_aggregator_copy_swap(self): exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -792,7 +792,7 @@ def test_aggregator_copy_swap(self): exponential_histogram_aggregation_2 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -845,7 +845,7 @@ def test_zero_count_by_increment(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -862,7 +862,7 @@ def test_zero_count_by_increment(self): exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -905,7 +905,7 @@ def test_one_count_by_increment(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -922,7 +922,7 @@ def test_one_count_by_increment(self): exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -996,7 +996,7 @@ def test_min_max_size(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -1027,7 +1027,7 @@ def test_aggregate_collect(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -1061,7 +1061,7 @@ def test_collect_results_cumulative(self) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -1170,7 +1170,7 @@ def test_cumulative_aggregation_with_random_data(self) -> None: histogram = _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory(_ExponentialBucketHistogramAggregation), + _default_reservoir_factory(_ExponentialBucketHistogramAggregation), AggregationTemporality.DELTA, Mock(), ) @@ -1232,7 +1232,7 @@ def test_merge_collect_cumulative(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, @@ -1290,7 +1290,7 @@ def test_merge_collect_delta(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), - default_reservoir_factory( + _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 17fa59227f..dcc374c1f1 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -36,7 +36,7 @@ _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement -from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory +from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.export import ( AggregationTemporality, NumberDataPoint, @@ -77,7 +77,7 @@ def test_aggregate_delta(self): True, AggregationTemporality.DELTA, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -91,7 +91,7 @@ def test_aggregate_delta(self): True, AggregationTemporality.DELTA, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -110,7 +110,7 @@ def test_aggregate_cumulative(self): True, AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -124,7 +124,7 @@ def test_aggregate_cumulative(self): True, AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -143,7 +143,7 @@ def test_collect_delta(self): True, AggregationTemporality.DELTA, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -173,7 +173,7 @@ def test_collect_delta(self): True, AggregationTemporality.DELTA, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -208,7 +208,7 @@ def test_collect_cumulative(self): True, AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) sum_aggregation.aggregate(measurement(1)) @@ -245,7 +245,7 @@ def test_aggregate(self): """ last_value_aggregation = _LastValueAggregation( - Mock(), default_reservoir_factory(_LastValueAggregation) + Mock(), _default_reservoir_factory(_LastValueAggregation) ) last_value_aggregation.aggregate(measurement(1)) @@ -263,7 +263,7 @@ def test_collect(self): """ last_value_aggregation = _LastValueAggregation( - Mock(), default_reservoir_factory(_LastValueAggregation) + Mock(), _default_reservoir_factory(_LastValueAggregation) ) self.assertIsNone( @@ -323,7 +323,7 @@ def test_aggregate(self): Mock(), AggregationTemporality.DELTA, 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory(_ExplicitBucketHistogramAggregation), boundaries=[0, 2, 4], ) ) @@ -364,7 +364,7 @@ def test_min_max(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory(_ExplicitBucketHistogramAggregation), ) ) @@ -382,7 +382,7 @@ def test_min_max(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory(_ExplicitBucketHistogramAggregation), record_min_max=False, ) ) @@ -406,7 +406,7 @@ def test_collect(self): Mock(), AggregationTemporality.DELTA, 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory(_ExplicitBucketHistogramAggregation), boundaries=[0, 1, 2], ) ) @@ -445,7 +445,7 @@ def test_boundaries(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory(_ExplicitBucketHistogramAggregation), )._boundaries, ( 0.0, @@ -472,7 +472,7 @@ def test_sum_factory(self): counter = _Counter("name", Mock(), Mock()) factory = SumAggregation() aggregation = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) @@ -481,14 +481,14 @@ def test_sum_factory(self): AggregationTemporality.DELTA, ) aggregation2 = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) counter = _UpDownCounter("name", Mock(), Mock()) factory = SumAggregation() aggregation = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) @@ -500,7 +500,7 @@ def test_sum_factory(self): counter = _ObservableCounter("name", Mock(), Mock(), None) factory = SumAggregation() aggregation = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) @@ -519,13 +519,13 @@ def test_explicit_bucket_histogram_factory(self): record_min_max=False, ) aggregation = factory._create_aggregation( - histo, Mock(), default_reservoir_factory, 0 + histo, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) self.assertFalse(aggregation._record_min_max) self.assertEqual(aggregation._boundaries, (0.0, 5.0)) aggregation2 = factory._create_aggregation( - histo, Mock(), default_reservoir_factory, 0 + histo, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) @@ -533,11 +533,11 @@ def test_last_value_factory(self): counter = _Counter("name", Mock(), Mock()) factory = LastValueAggregation() aggregation = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _LastValueAggregation) aggregation2 = factory._create_aggregation( - counter, Mock(), default_reservoir_factory, 0 + counter, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) @@ -551,7 +551,7 @@ def test_counter(self): aggregation = self.default_aggregation._create_aggregation( _Counter("name", Mock(), Mock()), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -565,7 +565,7 @@ def test_up_down_counter(self): aggregation = self.default_aggregation._create_aggregation( _UpDownCounter("name", Mock(), Mock()), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -579,7 +579,7 @@ def test_observable_counter(self): aggregation = self.default_aggregation._create_aggregation( _ObservableCounter("name", Mock(), Mock(), callbacks=[Mock()]), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -595,7 +595,7 @@ def test_observable_up_down_counter(self): "name", Mock(), Mock(), callbacks=[Mock()] ), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) @@ -613,7 +613,7 @@ def test_histogram(self): Mock(), ), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) @@ -626,7 +626,7 @@ def test_gauge(self): Mock(), ), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) @@ -640,7 +640,7 @@ def test_observable_gauge(self): callbacks=[Mock()], ), Mock(), - default_reservoir_factory, + _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) @@ -672,7 +672,7 @@ def test_collection_simple_fixed_size_reservoir_with_default_reservoir(self): True, AggregationTemporality.DELTA, 0, - default_reservoir_factory(_SumAggregation), + _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py index 7d8ba026ed..4708ffc5d0 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py @@ -4,7 +4,7 @@ from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags from opentelemetry import trace from time import time_ns -from opentelemetry.sdk.metrics._internal.view import default_reservoir_factory +from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, @@ -134,11 +134,11 @@ def test_last_measurement_in_bucket(self): class TestExemplarReservoirFactory(TestCase): def test_sum_aggregation(self): - exemplar_reservoir = default_reservoir_factory(_SumAggregation) + exemplar_reservoir = _default_reservoir_factory(_SumAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) def test_last_value_aggregation(self): - exemplar_reservoir = default_reservoir_factory(_LastValueAggregation) + exemplar_reservoir = _default_reservoir_factory(_LastValueAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) def test_explicit_histogram_aggregation(self): - exemplar_reservoir = default_reservoir_factory(_ExplicitBucketHistogramAggregation) + exemplar_reservoir = _default_reservoir_factory(_ExplicitBucketHistogramAggregation) self.assertEqual(exemplar_reservoir, AlignedHistogramBucketExemplarReservoir) \ No newline at end of file From eece48d6e7b8a61d2ec89e53299562e5ff262f5f Mon Sep 17 00:00:00 2001 From: czhang771 Date: Thu, 29 Aug 2024 14:20:11 -0700 Subject: [PATCH 18/48] add samples for exemplar filter and custom reservoir factory --- .../reader/preferred_exemplarfilter.py | 50 +++++++++++ .../metrics/views/change_reservoir_factory.py | 82 +++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 docs/examples/metrics/reader/preferred_exemplarfilter.py create mode 100644 docs/examples/metrics/views/change_reservoir_factory.py diff --git a/docs/examples/metrics/reader/preferred_exemplarfilter.py b/docs/examples/metrics/reader/preferred_exemplarfilter.py new file mode 100644 index 0000000000..31712d3f39 --- /dev/null +++ b/docs/examples/metrics/reader/preferred_exemplarfilter.py @@ -0,0 +1,50 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +from opentelemetry.metrics import get_meter_provider, set_meter_provider +from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics.export import ( + ConsoleMetricExporter, + PeriodicExportingMetricReader, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + TraceBasedExemplarFilter, +) + +# Create an ExemplarFilter instance (e.g., TraceBasedExemplarFilter) +exemplar_filter = TraceBasedExemplarFilter() + +exporter = ConsoleMetricExporter() + +reader = PeriodicExportingMetricReader( + exporter, + export_interval_millis=5_000, +) + +# Set up the MeterProvider with the ExemplarFilter +provider = MeterProvider( + metric_readers=[reader], + exemplar_filter=exemplar_filter, # Pass the ExemplarFilter to the MeterProvider +) +set_meter_provider(provider) + +meter = get_meter_provider().get_meter("exemplar-filter-example", "0.1.2") +counter = meter.create_counter("my-counter") + +for value in range(10): + counter.add(value) + time.sleep(2.0) \ No newline at end of file diff --git a/docs/examples/metrics/views/change_reservoir_factory.py b/docs/examples/metrics/views/change_reservoir_factory.py new file mode 100644 index 0000000000..e6360e41ce --- /dev/null +++ b/docs/examples/metrics/views/change_reservoir_factory.py @@ -0,0 +1,82 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import time + +from opentelemetry.metrics import get_meter_provider, set_meter_provider +from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics.export import ( + ConsoleMetricExporter, + PeriodicExportingMetricReader, +) +from opentelemetry.sdk.metrics.view import View +from typing import Callable, Optional, Set, Type, Any, Sequence +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + _Aggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir +) + +# Returns a factory for creating an exemplar reservoir based on the aggregation type and specified parameters +def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: + def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != 'boundaries'}) + else: + return lambda **kwargs: SimpleFixedSizeExemplarReservoir(size=size, **kwargs) + + return factory + +# Create a custom reservoir factory with specified parameters +custom_reservoir_factory = generalized_reservoir_factory(size=10) + +# Create a view with the custom reservoir factory +change_reservoir_factory_view= View( + instrument_name="my.counter", + name="name", + aggregation=DefaultAggregation(), + exemplar_reservoir_factory=custom_reservoir_factory, + ) + +# Use console exporter for the example +exporter = ConsoleMetricExporter() + +# Create a metric reader with stdout exporter +reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) +provider = MeterProvider( + metric_readers=[ + reader, + ], + views=[ + change_reservoir_factory_view, + ], +) +set_meter_provider(provider) + +meter = get_meter_provider().get_meter("reservoir-factory-change", "0.1.2") + +my_counter = meter.create_counter("my.counter") + +while 1: + my_counter.add(random.randint(1, 10)) + time.sleep(random.random()) From bfaec2da28042ac681649fcab53b1ccb384afdb4 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Fri, 30 Aug 2024 10:50:18 -0700 Subject: [PATCH 19/48] clean up documentation on exemplar reservoir --- .../sdk/metrics/_internal/exemplar/exemplar_reservoir.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 310d61a14d..5dcced5069 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -97,7 +97,8 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: """May return an Exemplar and resets the bucket for the next sampling period.""" if not self.__offered: return None - + + # filters out attributes from the measurement that are already included in the metric data point filtered_attributes = ( { k: v @@ -137,10 +138,6 @@ def __init__(self, size: int, **kwargs) -> None: ExemplarBucket() for _ in range(self._size) ] - def maxSize(self) -> int: - """Reservoir maximal size""" - return self._size - def collect(self, point_attributes: Attributes) -> list[Exemplar]: """Returns accumulated Exemplars and also resets the reservoir for the next sampling period @@ -164,7 +161,7 @@ def collect(self, point_attributes: Attributes) -> list[Exemplar]: return [*exemplars] def _reset(self) -> None: - """Reset the reservoir.""" + """Reset the reservoir by resetting any stateful logic after a collection cycle.""" pass From 68e8824a88de0bdd756df2db6929c2f242b37092 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Fri, 30 Aug 2024 11:32:40 -0700 Subject: [PATCH 20/48] refactor aggregate method and fix bucket index --- .../sdk/metrics/_internal/aggregation.py | 39 +++++++++++-------- .../_internal/exemplar/exemplar_reservoir.py | 3 +- .../tests/metrics/test_aggregation.py | 2 +- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 8ab284c16c..21fc500fac 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -95,17 +95,11 @@ def __init__( self._reservoir = reservoir_factory() self._previous_point = None - def aggregate( - self, measurement: Measurement, should_sample_exemplar: bool = True + @abstractmethod + def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: - if should_sample_exemplar: - self._reservoir.offer( - measurement.value, - measurement.time_unix_nano, - measurement.attributes, - measurement.context, - ) - + pass + @abstractmethod def collect( self, @@ -119,7 +113,14 @@ def _collect_exemplars(self) -> Sequence[Exemplar]: self._attributes ) - + def sample_exemplar(self, measurement: Measurement) -> None: + self._reservoir.offer( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ) + class _DropAggregation(_Aggregation): def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True @@ -164,8 +165,10 @@ def aggregate( self._value = 0 self._value = self._value + measurement.value - - super().aggregate(measurement, should_sample_exemplar) + + if should_sample_exemplar: + self.sample_exemplar(measurement) + def collect( self, @@ -390,8 +393,8 @@ def aggregate( ): with self._lock: self._value = measurement.value - - super().aggregate(measurement, should_sample_exemplar) + if should_sample_exemplar: + self.sample_exemplar(measurement) def collect( self, @@ -491,7 +494,8 @@ def aggregate( self._value[bisect_left(self._boundaries, measurement_value)] += 1 - super().aggregate(measurement, should_sample_exemplar) + if should_sample_exemplar: + self.sample_exemplar(measurement) def collect( self, @@ -790,7 +794,8 @@ def aggregate( # in _ExplicitBucketHistogramAggregation.aggregate value.increment_bucket(bucket_index) - super().aggregate(measurement, should_sample_exemplar) + if should_sample_exemplar: + self.sample_exemplar(measurement) def collect( self, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 5dcced5069..4248badcc0 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -204,7 +204,8 @@ def _find_bucket_index( ctx: Context, ) -> int: if self._measurements_seen < self._size: - return self._measurements_seen + self._measurements_seen += 1 + return self._measurements_seen - 1 index = randrange(0, self._measurements_seen) self._measurements_seen += 1 diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index dcc374c1f1..7b5809af4b 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -648,7 +648,7 @@ def test_observable_gauge(self): class TestExemplarsFromAggregations(TestCase): def test_collection_simple_fixed_size_reservoir(self): - exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir(size=5) + exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir(size=3) synchronous_sum_aggregation = _SumAggregation( Mock(), True, From ed02f8bff921a35466b599d1b019bd8856d10d4d Mon Sep 17 00:00:00 2001 From: czhang771 Date: Fri, 30 Aug 2024 11:42:37 -0700 Subject: [PATCH 21/48] refactored FixedSizeExemplarReservoirABC --- .../_internal/exemplar/exemplar_reservoir.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 4248badcc0..9d356f8738 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -164,6 +164,19 @@ def _reset(self) -> None: """Reset the reservoir by resetting any stateful logic after a collection cycle.""" pass + @abstractmethod + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + ctx: Context, + ) -> int: + """ + Determines the bucket index for the given measurement. + Should be implemented by subclasses based on specific strategies. + """ + pass class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): """This reservoir uses an uniformly-weighted sampling algorithm based on the number From 4f5efa7758499b335fc5a6bbfadf61a779e90554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Mon, 2 Sep 2024 12:04:40 +0200 Subject: [PATCH 22/48] Apply suggestions from review --- .../sdk/metrics/_internal/aggregation.py | 124 +++++++++-------- .../metrics/_internal/exemplar/__init__.py | 4 +- .../_internal/exemplar/exemplar_filter.py | 18 +-- .../_internal/exemplar/exemplar_reservoir.py | 125 ++++++++++++------ .../sdk/metrics/_internal/view.py | 6 +- .../tests/metrics/test_aggregation.py | 2 +- .../metrics/test_view_instrument_match.py | 6 +- 7 files changed, 175 insertions(+), 110 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 21fc500fac..271e256449 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -37,7 +37,7 @@ ) from opentelemetry.sdk.metrics._internal.exemplar import ( Exemplar, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( Buckets, @@ -88,18 +88,25 @@ class _Aggregation(ABC, Generic[_DataPointVarT]): def __init__( self, attributes: Attributes, - reservoir_factory: ExemplarReservoirFactory, + reservoir_builder: ExemplarReservoirBuilder, ): self._lock = Lock() self._attributes = attributes - self._reservoir = reservoir_factory() + self._reservoir = reservoir_builder() self._previous_point = None @abstractmethod - def aggregate(self, measurement: Measurement, should_sample_exemplar: bool = True + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: + """Aggregate a measurement. + + Args: + measurement: Measurement to aggregate + should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. + """ pass - + @abstractmethod def collect( self, @@ -109,18 +116,33 @@ def collect( pass def _collect_exemplars(self) -> Sequence[Exemplar]: - return self._reservoir.collect( - self._attributes - ) + """Returns the collected exemplars. - def sample_exemplar(self, measurement: Measurement) -> None: - self._reservoir.offer( + Returns: + The exemplars collected by the reservoir + """ + return self._reservoir.collect(self._attributes) + + def _sample_exemplar( + self, measurement: Measurement, should_sample_exemplar: bool + ) -> None: + """Offer the measurement to the exemplar reservoir for sampling. + + It should be called within the each :ref:`aggregate` call. + + Args: + measurement: The new measurement + should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. + """ + if should_sample_exemplar: + self._reservoir.offer( measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context, ) - + + class _DropAggregation(_Aggregation): def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True @@ -142,9 +164,9 @@ def __init__( instrument_is_monotonic: bool, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, - reservoir_factory: ExemplarReservoirFactory, + reservoir_builder: ExemplarReservoirBuilder, ): - super().__init__(attributes, reservoir_factory) + super().__init__(attributes, reservoir_builder) self._start_time_unix_nano = start_time_unix_nano self._instrument_aggregation_temporality = ( @@ -165,10 +187,8 @@ def aggregate( self._value = 0 self._value = self._value + measurement.value - - if should_sample_exemplar: - self.sample_exemplar(measurement) - + + self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, @@ -383,9 +403,9 @@ class _LastValueAggregation(_Aggregation[GaugePoint]): def __init__( self, attributes: Attributes, - reservoir_factory: ExemplarReservoirFactory, + reservoir_builder: ExemplarReservoirBuilder, ): - super().__init__(attributes, reservoir_factory) + super().__init__(attributes, reservoir_builder) self._value = None def aggregate( @@ -393,8 +413,8 @@ def aggregate( ): with self._lock: self._value = measurement.value - if should_sample_exemplar: - self.sample_exemplar(measurement) + + self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, @@ -410,7 +430,7 @@ def collect( value = self._value self._value = None - exemplars = self._collect_exemplars() + exemplars = self._collect_exemplars() return NumberDataPoint( attributes=self._attributes, @@ -427,7 +447,7 @@ def __init__( attributes: Attributes, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, - reservoir_factory: ExemplarReservoirFactory, + reservoir_builder: ExemplarReservoirBuilder, boundaries: Sequence[float] = ( 0.0, 5.0, @@ -449,8 +469,8 @@ def __init__( ): super().__init__( attributes, - reservoir_factory=partial( - reservoir_factory, boundaries=boundaries + reservoir_builder=partial( + reservoir_builder, boundaries=boundaries ), ) @@ -494,8 +514,7 @@ def aggregate( self._value[bisect_left(self._boundaries, measurement_value)] += 1 - if should_sample_exemplar: - self.sample_exemplar(measurement) + self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, @@ -517,8 +536,6 @@ def collect( self._min = inf self._max = -inf - exemplars = self._collect_exemplars() - if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA @@ -542,7 +559,7 @@ def collect( return HistogramDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=sum(value), @@ -572,7 +589,7 @@ def collect( return HistogramDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=sum(self._previous_value), @@ -602,7 +619,7 @@ class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): def __init__( self, attributes: Attributes, - reservoir_factory: ExemplarReservoirFactory, + reservoir_builder: ExemplarReservoirBuilder, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, # This is the default maximum number of buckets per positive or @@ -648,8 +665,8 @@ def __init__( super().__init__( attributes, - reservoir_factory=partial( - reservoir_factory, size=min(20, max_size) + reservoir_builder=partial( + reservoir_builder, size=min(20, max_size) ), ) @@ -794,8 +811,7 @@ def aggregate( # in _ExplicitBucketHistogramAggregation.aggregate value.increment_bucket(bucket_index) - if should_sample_exemplar: - self.sample_exemplar(measurement) + self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, @@ -826,8 +842,6 @@ def collect( self._zero_count = 0 self._scale = None - exemplars = self._collect_exemplars() - if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA @@ -851,7 +865,7 @@ def collect( return ExponentialHistogramDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=count, @@ -1015,7 +1029,7 @@ def collect( return ExponentialHistogramDataPoint( attributes=self._attributes, - exemplars=exemplars, + exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=self._previous_count, @@ -1187,7 +1201,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1218,7 +1232,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1227,7 +1241,7 @@ def _create_aggregation( if isinstance(instrument, Counter): return _SumAggregation( attributes, - reservoir_factory=reservoir_factory(_SumAggregation), + reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.DELTA @@ -1237,7 +1251,7 @@ def _create_aggregation( if isinstance(instrument, UpDownCounter): return _SumAggregation( attributes, - reservoir_factory=reservoir_factory(_SumAggregation), + reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.DELTA @@ -1248,7 +1262,7 @@ def _create_aggregation( if isinstance(instrument, ObservableCounter): return _SumAggregation( attributes, - reservoir_factory=reservoir_factory(_SumAggregation), + reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE @@ -1259,7 +1273,7 @@ def _create_aggregation( if isinstance(instrument, ObservableUpDownCounter): return _SumAggregation( attributes, - reservoir_factory=reservoir_factory(_SumAggregation), + reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE @@ -1270,7 +1284,7 @@ def _create_aggregation( if isinstance(instrument, Histogram): return _ExplicitBucketHistogramAggregation( attributes, - reservoir_factory=reservoir_factory( + reservoir_builder=reservoir_factory( _ExplicitBucketHistogramAggregation ), instrument_aggregation_temporality=( @@ -1282,13 +1296,13 @@ def _create_aggregation( if isinstance(instrument, ObservableGauge): return _LastValueAggregation( attributes, - reservoir_factory=reservoir_factory(_LastValueAggregation), + reservoir_builder=reservoir_factory(_LastValueAggregation), ) if isinstance(instrument, _Gauge): return _LastValueAggregation( attributes, - reservoir_factory=reservoir_factory(_LastValueAggregation), + reservoir_builder=reservoir_factory(_LastValueAggregation), ) # pylint: disable=broad-exception-raised @@ -1309,7 +1323,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1375,7 +1389,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1409,7 +1423,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: @@ -1444,13 +1458,13 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: return _LastValueAggregation( attributes, - reservoir_factory=reservoir_factory(_LastValueAggregation), + reservoir_builder=reservoir_factory(_LastValueAggregation), ) @@ -1462,7 +1476,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ - [Type[_Aggregation]], ExemplarReservoirFactory + [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py index f3032c5d1e..ee93dd1827 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py @@ -22,7 +22,7 @@ from .exemplar_reservoir import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) @@ -34,6 +34,6 @@ "TraceBasedExemplarFilter", "AlignedHistogramBucketExemplarReservoir", "ExemplarReservoir", - "ExemplarReservoirFactory", + "ExemplarReservoirBuilder", "SimpleFixedSizeExemplarReservoir", ] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py index bbb26d3ed0..0e090f9e35 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py @@ -38,7 +38,7 @@ def should_sample( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. @@ -46,7 +46,7 @@ def should_sample( value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes - ctx: The Context of the measurement + context: The Context of the measurement """ raise NotImplementedError( "ExemplarFilter.should_sample is not implemented" @@ -65,7 +65,7 @@ def should_sample( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. @@ -73,7 +73,7 @@ def should_sample( value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes - ctx: The Context of the measurement + context: The Context of the measurement """ return True @@ -92,7 +92,7 @@ def should_sample( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. @@ -100,7 +100,7 @@ def should_sample( value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes - ctx: The Context of the measurement + context: The Context of the measurement """ return False @@ -118,7 +118,7 @@ def should_sample( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. @@ -126,9 +126,9 @@ def should_sample( value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes - ctx: The Context of the measurement + context: The Context of the measurement """ - span = trace.get_current_span(ctx) + span = trace.get_current_span(context) if span == INVALID_SPAN: return False return span.get_span_context().trace_flags.sampled diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 9d356f8738..2ce5d2461a 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -42,9 +42,16 @@ def offer( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> None: - """Offers a measurement to be sampled.""" + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ raise NotImplementedError("ExemplarReservoir.offer is not implemented") @abstractmethod @@ -79,13 +86,20 @@ def offer( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> None: - """Offers a measurement to be sampled.""" + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ self.__value = value self.__time_unix_nano = time_unix_nano self.__attributes = attributes - span = trace.get_current_span(ctx) + span = trace.get_current_span(context) if span != INVALID_SPAN: span_context = span.get_span_context() self.__span_id = span_context.span_id @@ -97,8 +111,10 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: """May return an Exemplar and resets the bucket for the next sampling period.""" if not self.__offered: return None - + # filters out attributes from the measurement that are already included in the metric data point + # See the specification for more details: + # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar filtered_attributes = ( { k: v @@ -120,6 +136,7 @@ def collect(self, point_attributes: Attributes) -> Exemplar | None: return exemplar def __reset(self) -> None: + """Reset the bucket state after a collection cycle.""" self.__value = 0 self.__attributes = {} self.__time_unix_nano = 0 @@ -128,6 +145,10 @@ def __reset(self) -> None: self.__offered = False +class BucketIndexError(ValueError): + """An exception raised when the bucket index cannot be found.""" + + class FixedSizeExemplarReservoirABC(ExemplarReservoir): """Abstract class for a reservoir with fixed size.""" @@ -160,9 +181,32 @@ def collect(self, point_attributes: Attributes) -> list[Exemplar]: self._reset() return [*exemplars] - def _reset(self) -> None: - """Reset the reservoir by resetting any stateful logic after a collection cycle.""" - pass + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> None: + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ + try: + index = self._find_bucket_index( + value, time_unix_nano, attributes, context + ) + + self._reservoir_storage[index].offer( + value, time_unix_nano, attributes, context + ) + except BucketIndexError: + # Ignore invalid bucket index + pass @abstractmethod def _find_bucket_index( @@ -170,14 +214,31 @@ def _find_bucket_index( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> int: - """ - Determines the bucket index for the given measurement. - Should be implemented by subclasses based on specific strategies. + """Determines the bucket index for the given measurement. + + It should be implemented by subclasses based on specific strategies. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + + Returns: + The bucket index + + Raises: + BucketIndexError: If no bucket index can be found. """ pass + def _reset(self) -> None: + """Reset the reservoir by resetting any stateful logic after a collection cycle.""" + pass + + class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): """This reservoir uses an uniformly-weighted sampling algorithm based on the number of samples the reservoir has seen so far to determine if the offered measurements @@ -195,34 +256,22 @@ def _reset(self) -> None: super()._reset() self._measurements_seen = 0 - def offer( - self, - value: Union[int, float], - time_unix_nano: int, - attributes: Attributes, - ctx: Context, - ) -> None: - """Offers a measurement to be sampled.""" - index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) - if index != -1: - self._reservoir_storage[index].offer( - value, time_unix_nano, attributes, ctx - ) - def _find_bucket_index( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> int: + self._measurements_seen += 1 if self._measurements_seen < self._size: - self._measurements_seen += 1 return self._measurements_seen - 1 index = randrange(0, self._measurements_seen) - self._measurements_seen += 1 - return index if index < self._size else -1 + if index < self._size: + return index + + raise BucketIndexError("Unable to find the bucket index.") class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC): @@ -243,12 +292,14 @@ def offer( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> None: """Offers a measurement to be sampled.""" - index = self._find_bucket_index(value, time_unix_nano, attributes, ctx) + index = self._find_bucket_index( + value, time_unix_nano, attributes, context + ) self._reservoir_storage[index].offer( - value, time_unix_nano, attributes, ctx + value, time_unix_nano, attributes, context ) def _find_bucket_index( @@ -256,7 +307,7 @@ def _find_bucket_index( value: Union[int, float], time_unix_nano: int, attributes: Attributes, - ctx: Context, + context: Context, ) -> int: for i, boundary in enumerate(self._boundaries): if value <= boundary: @@ -264,10 +315,10 @@ def _find_bucket_index( return len(self._boundaries) -ExemplarReservoirFactory: TypeAlias = Callable[ +ExemplarReservoirBuilder: TypeAlias = Callable[ [dict[str, Any]], ExemplarReservoir ] -ExemplarReservoirFactory.__doc__ = """ExemplarReservoir factory. +ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. It may receive the Aggregation parameters it is bounded to; e.g. the _ExplicitBucketHistogramAggregation will provide the boundaries. diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 8d913cf8d0..e9176af33e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -27,7 +27,7 @@ ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) @@ -36,7 +36,7 @@ def _default_reservoir_factory( aggregationType: Type[_Aggregation], -) -> ExemplarReservoirFactory: +) -> ExemplarReservoirBuilder: """Default reservoir factory per aggregation.""" if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir @@ -115,7 +115,7 @@ def __init__( attribute_keys: Optional[Set[str]] = None, aggregation: Optional[Aggregation] = None, exemplar_reservoir_factory: Optional[ - Callable[[Type[_Aggregation]], ExemplarReservoirFactory] + Callable[[Type[_Aggregation]], ExemplarReservoirBuilder] ] = None, instrument_unit: Optional[str] = None, ): diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 7b5809af4b..4c3f1518d9 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -50,7 +50,7 @@ from opentelemetry.util.types import Attributes from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index 6d80a752bf..c2ad05e21c 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -48,13 +48,13 @@ from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir ) from typing import Callable, Optional, Set, Type, Any, Sequence -def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: - def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: +def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]: + def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirBuilder: if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != 'boundaries'}) else: From 682a1762b35594d18604456c5d6f7ce5246b6049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Mon, 2 Sep 2024 12:04:47 +0200 Subject: [PATCH 23/48] Lint the code --- .../reader/preferred_exemplarfilter.py | 10 +- .../metrics/views/change_reservoir_factory.py | 53 +++++---- .../_internal/metrics_encoder/__init__.py | 11 +- .../integration_test/test_histogram_export.py | 88 ++++++++++----- .../integration_test/test_sum_aggregation.py | 27 +++-- .../tests/metrics/test_aggregation.py | 101 ++++++++++++------ .../tests/metrics/test_exemplarfilter.py | 22 ++-- .../tests/metrics/test_exemplarreservoir.py | 96 ++++++++++------- .../metrics/test_view_instrument_match.py | 101 +++++++++++------- 9 files changed, 321 insertions(+), 188 deletions(-) diff --git a/docs/examples/metrics/reader/preferred_exemplarfilter.py b/docs/examples/metrics/reader/preferred_exemplarfilter.py index 31712d3f39..1840cebfcc 100644 --- a/docs/examples/metrics/reader/preferred_exemplarfilter.py +++ b/docs/examples/metrics/reader/preferred_exemplarfilter.py @@ -15,15 +15,15 @@ from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider -from opentelemetry.sdk.metrics.export import ( - ConsoleMetricExporter, - PeriodicExportingMetricReader, -) from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) +from opentelemetry.sdk.metrics.export import ( + ConsoleMetricExporter, + PeriodicExportingMetricReader, +) # Create an ExemplarFilter instance (e.g., TraceBasedExemplarFilter) exemplar_filter = TraceBasedExemplarFilter() @@ -47,4 +47,4 @@ for value in range(10): counter.add(value) - time.sleep(2.0) \ No newline at end of file + time.sleep(2.0) diff --git a/docs/examples/metrics/views/change_reservoir_factory.py b/docs/examples/metrics/views/change_reservoir_factory.py index e6360e41ce..843c1bf3a9 100644 --- a/docs/examples/metrics/views/change_reservoir_factory.py +++ b/docs/examples/metrics/views/change_reservoir_factory.py @@ -14,15 +14,10 @@ import random import time +from typing import Any, Callable, Optional, Sequence, Set, Type from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider -from opentelemetry.sdk.metrics.export import ( - ConsoleMetricExporter, - PeriodicExportingMetricReader, -) -from opentelemetry.sdk.metrics.view import View -from typing import Callable, Optional, Set, Type, Any, Sequence from opentelemetry.sdk.metrics._internal.aggregation import ( Aggregation, DefaultAggregation, @@ -34,29 +29,45 @@ AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, ExemplarReservoirFactory, - SimpleFixedSizeExemplarReservoir + SimpleFixedSizeExemplarReservoir, +) +from opentelemetry.sdk.metrics.export import ( + ConsoleMetricExporter, + PeriodicExportingMetricReader, ) +from opentelemetry.sdk.metrics.view import View + # Returns a factory for creating an exemplar reservoir based on the aggregation type and specified parameters -def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: - def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirFactory: - if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): - return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != 'boundaries'}) - else: - return lambda **kwargs: SimpleFixedSizeExemplarReservoir(size=size, **kwargs) - - return factory +def generalized_reservoir_factory( + size: int = 1, boundaries: Sequence[float] = None +) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: + def factory( + aggregationType: Type[_Aggregation], + ) -> ExemplarReservoirFactory: + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return lambda **kwargs: AlignedHistogramBucketExemplarReservoir( + boundaries=boundaries or [], + **{k: v for k, v in kwargs.items() if k != "boundaries"}, + ) + else: + return lambda **kwargs: SimpleFixedSizeExemplarReservoir( + size=size, **kwargs + ) + + return factory + # Create a custom reservoir factory with specified parameters custom_reservoir_factory = generalized_reservoir_factory(size=10) # Create a view with the custom reservoir factory -change_reservoir_factory_view= View( - instrument_name="my.counter", - name="name", - aggregation=DefaultAggregation(), - exemplar_reservoir_factory=custom_reservoir_factory, - ) +change_reservoir_factory_view = View( + instrument_name="my.counter", + name="name", + aggregation=DefaultAggregation(), + exemplar_reservoir_factory=custom_reservoir_factory, +) # Use console exporter for the example exporter = ConsoleMetricExporter() diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py index f3fd673712..f757b2476f 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -28,7 +28,7 @@ ) from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, - _encode_span_id, + _encode_span_id, _encode_trace_id, ) from opentelemetry.sdk.environment_variables import ( @@ -343,6 +343,7 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: resource_metrics = resource_data return ExportMetricsServiceRequest(resource_metrics=resource_metrics) + def encode_exemplars(sdk_exemplars: list) -> list: """ Converts a list of SDK Exemplars into a list of protobuf Exemplars. @@ -359,7 +360,9 @@ def encode_exemplars(sdk_exemplars: list) -> list: time_unix_nano=sdk_exemplar.time_unix_nano, span_id=_encode_span_id(sdk_exemplar.span_id), trace_id=_encode_trace_id(sdk_exemplar.trace_id), - filtered_attributes=_encode_attributes(sdk_exemplar.filtered_attributes), + filtered_attributes=_encode_attributes( + sdk_exemplar.filtered_attributes + ), ) # Assign the value based on its type in the SDK exemplar if isinstance(sdk_exemplar.value, float): @@ -369,5 +372,5 @@ def encode_exemplars(sdk_exemplars: list) -> list: else: raise ValueError("Exemplar value must be an int or float") pb_exemplars.append(pb_exemplar) - - return pb_exemplars \ No newline at end of file + + return pb_exemplars diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py index 2d4b6d4f43..09f7bb9db7 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py @@ -15,13 +15,13 @@ from unittest import TestCase from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.metrics.export import InMemoryMetricReader -from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) +from opentelemetry.sdk.metrics.export import InMemoryMetricReader +from opentelemetry.sdk.resources import SERVICE_NAME, Resource class TestHistogramExport(TestCase): @@ -93,7 +93,7 @@ def test_histogram_counter_collection(self): ), 1, ) - + def test_histogram_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() @@ -101,34 +101,58 @@ def test_histogram_with_exemplars(self): provider = MeterProvider( resource=Resource.create({SERVICE_NAME: "otel-test"}), metric_readers=[in_memory_metric_reader], - exemplar_filter=AlwaysOnExemplarFilter() + exemplar_filter=AlwaysOnExemplarFilter(), ) meter = provider.get_meter("my-meter") histogram = meter.create_histogram("my_histogram") - - histogram.record(2, {"attribute": "value1"}) # Should go in the first bucket - histogram.record(7, {"attribute": "value2"}) # Should go in the second bucket - histogram.record(9, {"attribute": "value2"}) # Should also go in the second bucket - histogram.record(15, {"attribute": "value3"}) # Should go in the third bucket + + histogram.record( + 2, {"attribute": "value1"} + ) # Should go in the first bucket + histogram.record( + 7, {"attribute": "value2"} + ) # Should go in the second bucket + histogram.record( + 9, {"attribute": "value2"} + ) # Should also go in the second bucket + histogram.record( + 15, {"attribute": "value3"} + ) # Should go in the third bucket metric_data = in_memory_metric_reader.get_metrics_data() - self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) - histogram_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + self.assertEqual( + len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 + ) + histogram_metric = ( + metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + ) self.assertEqual(len(histogram_metric.data.data_points), 3) - self.assertEqual(len(histogram_metric.data.data_points[0].exemplars), 1) - self.assertEqual(len(histogram_metric.data.data_points[1].exemplars), 1) - self.assertEqual(len(histogram_metric.data.data_points[2].exemplars), 1) + self.assertEqual( + len(histogram_metric.data.data_points[0].exemplars), 1 + ) + self.assertEqual( + len(histogram_metric.data.data_points[1].exemplars), 1 + ) + self.assertEqual( + len(histogram_metric.data.data_points[2].exemplars), 1 + ) self.assertEqual(histogram_metric.data.data_points[0].sum, 2) self.assertEqual(histogram_metric.data.data_points[1].sum, 16) self.assertEqual(histogram_metric.data.data_points[2].sum, 15) - self.assertEqual(histogram_metric.data.data_points[0].exemplars[0].value, 2.0) - self.assertEqual(histogram_metric.data.data_points[1].exemplars[0].value, 9.0) - self.assertEqual(histogram_metric.data.data_points[2].exemplars[0].value, 15.0) + self.assertEqual( + histogram_metric.data.data_points[0].exemplars[0].value, 2.0 + ) + self.assertEqual( + histogram_metric.data.data_points[1].exemplars[0].value, 9.0 + ) + self.assertEqual( + histogram_metric.data.data_points[2].exemplars[0].value, 15.0 + ) def test_filter_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() @@ -136,20 +160,32 @@ def test_filter_with_exemplars(self): provider = MeterProvider( resource=Resource.create({SERVICE_NAME: "otel-test"}), metric_readers=[in_memory_metric_reader], - exemplar_filter=AlwaysOffExemplarFilter() + exemplar_filter=AlwaysOffExemplarFilter(), ) meter = provider.get_meter("my-meter") histogram = meter.create_histogram("my_histogram") - - histogram.record(2, {"attribute": "value1"}) # Should go in the first bucket - histogram.record(7, {"attribute": "value2"}) # Should go in the second bucket - + + histogram.record( + 2, {"attribute": "value1"} + ) # Should go in the first bucket + histogram.record( + 7, {"attribute": "value2"} + ) # Should go in the second bucket + metric_data = in_memory_metric_reader.get_metrics_data() - self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) - histogram_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + self.assertEqual( + len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 + ) + histogram_metric = ( + metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + ) self.assertEqual(len(histogram_metric.data.data_points), 2) - self.assertEqual(len(histogram_metric.data.data_points[0].exemplars), 0) - self.assertEqual(len(histogram_metric.data.data_points[1].exemplars), 0) \ No newline at end of file + self.assertEqual( + len(histogram_metric.data.data_points[0].exemplars), 0 + ) + self.assertEqual( + len(histogram_metric.data.data_points[1].exemplars), 0 + ) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py index 38be37bfea..36de73be04 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py @@ -20,19 +20,20 @@ from pytest import mark +from opentelemetry.context import Context from opentelemetry.metrics import Observation from opentelemetry.sdk.metrics import Counter, MeterProvider, ObservableCounter -from opentelemetry.sdk.metrics.export import ( - AggregationTemporality, - InMemoryMetricReader, -) -from opentelemetry.sdk.metrics.view import SumAggregation from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) -from opentelemetry.context import Context +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + InMemoryMetricReader, +) +from opentelemetry.sdk.metrics.view import SumAggregation + class TestSumAggregation(TestCase): @mark.skipif( @@ -479,7 +480,7 @@ def test_synchronous_cumulative_temporality(self): start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual(metric_data.value, 80) - + def test_sum_aggregation_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() @@ -498,15 +499,19 @@ def test_sum_aggregation_with_exemplars(self): metric_data = in_memory_metric_reader.get_metrics_data() - self.assertEqual(len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1) + self.assertEqual( + len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 + ) - sum_metric = metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + sum_metric = ( + metric_data.resource_metrics[0].scope_metrics[0].metrics[0] + ) data_points = sum_metric.data.data_points self.assertEqual(len(data_points), 3) - + self.assertEqual(data_points[0].exemplars[0].value, 2.0) self.assertEqual(data_points[1].exemplars[0].value, 5.0) self.assertEqual(data_points[2].exemplars[0].value, 3.0) - provider.shutdown() \ No newline at end of file + provider.shutdown() diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 4c3f1518d9..e76c0fcc80 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -26,6 +26,11 @@ _LastValueAggregation, _SumAggregation, ) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoirBuilder, + SimpleFixedSizeExemplarReservoir, +) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, @@ -48,11 +53,7 @@ SumAggregation, ) from opentelemetry.util.types import Attributes -from opentelemetry.sdk.metrics._internal.exemplar import ( - AlignedHistogramBucketExemplarReservoir, - ExemplarReservoirBuilder, - SimpleFixedSizeExemplarReservoir, -) + def measurement( value: Union[int, float], attributes: Attributes = None @@ -323,7 +324,9 @@ def test_aggregate(self): Mock(), AggregationTemporality.DELTA, 0, - _default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ), boundaries=[0, 2, 4], ) ) @@ -364,7 +367,9 @@ def test_min_max(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - _default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ), ) ) @@ -382,7 +387,9 @@ def test_min_max(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - _default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ), record_min_max=False, ) ) @@ -406,7 +413,9 @@ def test_collect(self): Mock(), AggregationTemporality.DELTA, 0, - _default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ), boundaries=[0, 1, 2], ) ) @@ -445,7 +454,9 @@ def test_boundaries(self): Mock(), AggregationTemporality.CUMULATIVE, 0, - _default_reservoir_factory(_ExplicitBucketHistogramAggregation), + _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ), )._boundaries, ( 0.0, @@ -645,10 +656,13 @@ def test_observable_gauge(self): ) self.assertIsInstance(aggregation, _LastValueAggregation) + class TestExemplarsFromAggregations(TestCase): - + def test_collection_simple_fixed_size_reservoir(self): - exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir(size=3) + exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir( + size=3 + ) synchronous_sum_aggregation = _SumAggregation( Mock(), True, @@ -662,11 +676,15 @@ def test_collection_simple_fixed_size_reservoir(self): synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) - datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) - self.assertEqual(len(datapoint.exemplars), 3) - - def test_collection_simple_fixed_size_reservoir_with_default_reservoir(self): - + datapoint = synchronous_sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + self.assertEqual(len(datapoint.exemplars), 3) + + def test_collection_simple_fixed_size_reservoir_with_default_reservoir( + self, + ): + synchronous_sum_aggregation = _SumAggregation( Mock(), True, @@ -674,18 +692,22 @@ def test_collection_simple_fixed_size_reservoir_with_default_reservoir(self): 0, _default_reservoir_factory(_SumAggregation), ) - + synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) - datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) - self.assertEqual(len(datapoint.exemplars), 1) - + datapoint = synchronous_sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + self.assertEqual(len(datapoint.exemplars), 1) + def test_collection_aligned_histogram_bucket_reservoir(self): boundaries = [5.0, 10.0, 20.0] - exemplar_reservoir_factory = lambda: AlignedHistogramBucketExemplarReservoir(boundaries) + exemplar_reservoir_factory = ( + lambda: AlignedHistogramBucketExemplarReservoir(boundaries) + ) synchronous_sum_aggregation = _SumAggregation( Mock(), True, @@ -694,24 +716,37 @@ def test_collection_aligned_histogram_bucket_reservoir(self): exemplar_reservoir_factory, ) - synchronous_sum_aggregation.aggregate(measurement(2.0)) - synchronous_sum_aggregation.aggregate(measurement(4.0)) - synchronous_sum_aggregation.aggregate(measurement(6.0)) + synchronous_sum_aggregation.aggregate(measurement(2.0)) + synchronous_sum_aggregation.aggregate(measurement(4.0)) + synchronous_sum_aggregation.aggregate(measurement(6.0)) synchronous_sum_aggregation.aggregate(measurement(15.0)) - synchronous_sum_aggregation.aggregate(measurement(25.0)) + synchronous_sum_aggregation.aggregate(measurement(25.0)) - datapoint = synchronous_sum_aggregation.collect(AggregationTemporality.CUMULATIVE, 0) - self.assertEqual(len(datapoint.exemplars), 4) + datapoint = synchronous_sum_aggregation.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + self.assertEqual(len(datapoint.exemplars), 4) # Verify that exemplars are associated with the correct boundaries expected_buckets = [ - (4.0, boundaries[0]), # First bucket, should hold the last value <= 5.0 - (6.0, boundaries[1]), # Second bucket, should hold the last value <= 10.0 - (15.0, boundaries[2]), # Third bucket, should hold the last value <= 20.0 - (25.0, None), # Last bucket, should hold the value > 20.0 + ( + 4.0, + boundaries[0], + ), # First bucket, should hold the last value <= 5.0 + ( + 6.0, + boundaries[1], + ), # Second bucket, should hold the last value <= 10.0 + ( + 15.0, + boundaries[2], + ), # Third bucket, should hold the last value <= 20.0 + (25.0, None), # Last bucket, should hold the value > 20.0 ] - for exemplar, (value, boundary) in zip(datapoint.exemplars, expected_buckets): + for exemplar, (value, boundary) in zip( + datapoint.exemplars, expected_buckets + ): self.assertEqual(exemplar.value, value) if boundary is not None: self.assertLessEqual(exemplar.value, boundary) diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py index df7ccc369b..daca0e6061 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py @@ -2,13 +2,14 @@ from opentelemetry import trace from opentelemetry.context import Context -from opentelemetry.trace.span import SpanContext -from opentelemetry.trace import TraceFlags from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) +from opentelemetry.trace import TraceFlags +from opentelemetry.trace.span import SpanContext + class TestAlwaysOnExemplarFilter(TestCase): def test_should_sample(self): @@ -25,14 +26,15 @@ def test_should_sample(self): class TestTraceBasedExemplarFilter(TestCase): TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) + def test_should_not_sample_without_trace(self): filter = TraceBasedExemplarFilter() span_context = SpanContext( - trace_id=self.TRACE_ID, + trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, - trace_flags= TraceFlags(TraceFlags.DEFAULT), - trace_state={} + trace_flags=TraceFlags(TraceFlags.DEFAULT), + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) @@ -41,16 +43,16 @@ def test_should_not_sample_without_trace(self): def test_should_not_sample_with_invalid_span(self): filter = TraceBasedExemplarFilter() self.assertFalse(filter.should_sample(10, 0, {}, Context())) - + def test_should_sample_when_trace_is_sampled(self): filter = TraceBasedExemplarFilter() span_context = SpanContext( - trace_id=self.TRACE_ID, + trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, - trace_flags= TraceFlags(TraceFlags.SAMPLED), - trace_state={} + trace_flags=TraceFlags(TraceFlags.SAMPLED), + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) - self.assertTrue(filter.should_sample(10, 0, {}, ctx)) \ No newline at end of file + self.assertTrue(filter.should_sample(10, 0, {}, ctx)) diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py index 4708ffc5d0..f98a2c943b 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py @@ -1,39 +1,40 @@ +from time import time_ns from unittest import TestCase -from opentelemetry.context import Context -from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags from opentelemetry import trace -from time import time_ns -from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory -from opentelemetry.sdk.metrics._internal.exemplar import ( - AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, - ExemplarReservoirFactory, - SimpleFixedSizeExemplarReservoir -) +from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _ExplicitBucketHistogramAggregation, _LastValueAggregation, _SumAggregation, ) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirFactory, + SimpleFixedSizeExemplarReservoir, +) +from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory +from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags + class TestSimpleFixedSizeExemplarReservoir(TestCase): - + TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) def test_no_measurements(self): reservoir = SimpleFixedSizeExemplarReservoir(10) self.assertEqual(len(reservoir.collect({})), 0) - + def test_has_context(self): reservoir = SimpleFixedSizeExemplarReservoir(1) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, - trace_flags= TraceFlags(TraceFlags.SAMPLED), - trace_state={} + trace_flags=TraceFlags(TraceFlags.SAMPLED), + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) @@ -42,23 +43,25 @@ def test_has_context(self): self.assertEqual(len(exemplars), 1) self.assertEqual(exemplars[0].trace_id, self.TRACE_ID) self.assertEqual(exemplars[0].span_id, self.SPAN_ID) - + def test_filter_attributes(self): reservoir = SimpleFixedSizeExemplarReservoir(1) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, - trace_flags= TraceFlags(TraceFlags.SAMPLED), - trace_state={} + trace_flags=TraceFlags(TraceFlags.SAMPLED), + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) - reservoir.offer(1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx) + reservoir.offer( + 1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx + ) exemplars = reservoir.collect({"key2": "value2", "key3": "value3"}) self.assertEqual(len(exemplars), 1) self.assertNotEqual("key1", exemplars[0].filtered_attributes) - + def test_reset_after_collection(self): reservoir = SimpleFixedSizeExemplarReservoir(4) @@ -79,33 +82,37 @@ def test_reset_after_collection(self): self.assertEqual(new_exemplars[0].value, 4.0) self.assertEqual(new_exemplars[1].value, 5.0) + class TestAlignedHistogramBucketExemplarReservoir(TestCase): - + TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) - + def test_measurement_in_buckets(self): - reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25, 50, 75]) + reservoir = AlignedHistogramBucketExemplarReservoir( + [0, 5, 10, 25, 50, 75] + ) span_context = SpanContext( - trace_id=self.TRACE_ID, - span_id=self.SPAN_ID, - is_remote=False, - trace_flags= TraceFlags(TraceFlags.SAMPLED), - trace_state={} + trace_id=self.TRACE_ID, + span_id=self.SPAN_ID, + is_remote=False, + trace_flags=TraceFlags(TraceFlags.SAMPLED), + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) reservoir.offer(52, time_ns(), {"bucket": "5"}, ctx) reservoir.offer(7, time_ns(), {"bucket": "3"}, ctx) reservoir.offer(6, time_ns(), {"bucket": "3"}, ctx) - + exemplars = reservoir.collect({"bucket": "3"}) - + self.assertEqual(len(exemplars), 2) - self.assertEqual(exemplars[0].value, 6) + self.assertEqual(exemplars[0].value, 6) self.assertEqual(exemplars[1].value, 52) self.assertEqual(len(exemplars[0].filtered_attributes), 0) self.assertNotEqual(exemplars[1].filtered_attributes, {"bucket": "5"}) + def test_last_measurement_in_bucket(self): reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25]) span_context = SpanContext( @@ -113,32 +120,41 @@ def test_last_measurement_in_bucket(self): span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), - trace_state={} + trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) - + # Offer values to the reservoir - reservoir.offer(2, time_ns(), {"bucket": "1"}, ctx) # Bucket 1 - reservoir.offer(7, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 - reservoir.offer(8, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 - should replace the 7 + reservoir.offer(2, time_ns(), {"bucket": "1"}, ctx) # Bucket 1 + reservoir.offer(7, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 + reservoir.offer( + 8, time_ns(), {"bucket": "2"}, ctx + ) # Bucket 2 - should replace the 7 reservoir.offer(15, time_ns(), {"bucket": "3"}, ctx) # Bucket 3 exemplars = reservoir.collect({}) - + # Check that each bucket has the correct value - self.assertEqual(len(exemplars), 3) + self.assertEqual(len(exemplars), 3) self.assertEqual(exemplars[0].value, 2) self.assertEqual(exemplars[1].value, 8) self.assertEqual(exemplars[2].value, 15) - + + class TestExemplarReservoirFactory(TestCase): def test_sum_aggregation(self): exemplar_reservoir = _default_reservoir_factory(_SumAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) + def test_last_value_aggregation(self): exemplar_reservoir = _default_reservoir_factory(_LastValueAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) + def test_explicit_histogram_aggregation(self): - exemplar_reservoir = _default_reservoir_factory(_ExplicitBucketHistogramAggregation) - self.assertEqual(exemplar_reservoir, AlignedHistogramBucketExemplarReservoir) \ No newline at end of file + exemplar_reservoir = _default_reservoir_factory( + _ExplicitBucketHistogramAggregation + ) + self.assertEqual( + exemplar_reservoir, AlignedHistogramBucketExemplarReservoir + ) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index c2ad05e21c..dd9e778144 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -15,6 +15,7 @@ # pylint: disable=protected-access from time import time_ns +from typing import Any, Callable, Optional, Sequence, Set, Type from unittest import TestCase from unittest.mock import MagicMock, Mock @@ -23,9 +24,20 @@ _ViewInstrumentMatch, ) from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + _Aggregation, _DropAggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, _LastValueAggregation, ) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirBuilder, + SimpleFixedSizeExemplarReservoir, +) from opentelemetry.sdk.metrics._internal.instrument import _Counter, _Histogram from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.sdk_configuration import ( @@ -38,29 +50,26 @@ LastValueAggregation, View, ) -from opentelemetry.sdk.metrics._internal.aggregation import ( - Aggregation, - DefaultAggregation, - _Aggregation, - _ExplicitBucketHistogramAggregation, - _ExponentialBucketHistogramAggregation, -) -from opentelemetry.sdk.metrics._internal.exemplar import ( - AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, - ExemplarReservoirBuilder, - SimpleFixedSizeExemplarReservoir -) -from typing import Callable, Optional, Set, Type, Any, Sequence -def generalized_reservoir_factory(size: int = 1, boundaries: Sequence[float] = None) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]: - def factory(aggregationType: Type[_Aggregation]) -> ExemplarReservoirBuilder: - if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): - return lambda **kwargs: AlignedHistogramBucketExemplarReservoir(boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != 'boundaries'}) - else: - return lambda **kwargs: SimpleFixedSizeExemplarReservoir(size=size, **kwargs) - - return factory + +def generalized_reservoir_factory( + size: int = 1, boundaries: Sequence[float] = None +) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]: + def factory( + aggregationType: Type[_Aggregation], + ) -> ExemplarReservoirBuilder: + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return lambda **kwargs: AlignedHistogramBucketExemplarReservoir( + boundaries=boundaries or [], + **{k: v for k, v in kwargs.items() if k != "boundaries"}, + ) + else: + return lambda **kwargs: SimpleFixedSizeExemplarReservoir( + size=size, **kwargs + ) + + return factory + class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name @classmethod @@ -375,16 +384,18 @@ def test_setting_aggregation(self): ], _LastValueAggregation, ) + + class TestSimpleFixedSizeExemplarReservoir(TestCase): - + def test_consume_measurement_with_custom_reservoir_factory(self): simple_fixed_size_factory = generalized_reservoir_factory(size=10) # Create an instance of _Counter instrument1 = _Counter( name="instrument1", - instrumentation_scope=None, - measurement_consumer=None, + instrumentation_scope=None, + measurement_consumer=None, description="description", unit="unit", ) @@ -431,8 +442,10 @@ def test_consume_measurement_with_custom_reservoir_factory(self): ) ) - data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) - + data_points = view_instrument_match.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + # Ensure only one data point is collected self.assertEqual(len(data_points), 2) @@ -444,7 +457,6 @@ def test_consume_measurement_with_custom_reservoir_factory(self): self.assertEqual(data_points[1].exemplars[0].value, 4.0) self.assertEqual(data_points[1].exemplars[1].value, 5.0) - def test_consume_measurement_with_exemplars(self): # Create an instance of _Counter instrument1 = _Counter( @@ -487,8 +499,10 @@ def test_consume_measurement_with_exemplars(self): ) # Collect the data points - data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) - + data_points = view_instrument_match.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + # Ensure only one data point is collected self.assertEqual(len(data_points), 1) @@ -498,11 +512,14 @@ def test_consume_measurement_with_exemplars(self): self.assertEqual(data_points[0].exemplars[0].value, 4.0) self.assertEqual(data_points[0].exemplars[1].value, 5.0) + class TestAlignedHistogramBucketExemplarReservoir(TestCase): - + def test_consume_measurement_with_custom_reservoir_factory(self): # Custom factory for AlignedHistogramBucketExemplarReservoir with specific boundaries - histogram_reservoir_factory = generalized_reservoir_factory(boundaries=[0, 5, 10, 25]) + histogram_reservoir_factory = generalized_reservoir_factory( + boundaries=[0, 5, 10, 25] + ) # Create an instance of _Histogram instrument1 = _Histogram( @@ -544,7 +561,7 @@ def test_consume_measurement_with_custom_reservoir_factory(self): attributes={"attribute2": "value2"}, ) ) - + view_instrument_match.consume_measurement( Measurement( value=8.0, # Should go into the second bucket (5 to 10) @@ -566,8 +583,10 @@ def test_consume_measurement_with_custom_reservoir_factory(self): ) # Collect the data points - data_points = view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) - + data_points = view_instrument_match.collect( + AggregationTemporality.CUMULATIVE, 0 + ) + # Ensure three data points are collected, one for each bucket self.assertEqual(len(data_points), 3) @@ -576,6 +595,12 @@ def test_consume_measurement_with_custom_reservoir_factory(self): self.assertEqual(len(data_points[1].exemplars), 1) self.assertEqual(len(data_points[2].exemplars), 1) - self.assertEqual(data_points[0].exemplars[0].value, 2.0) # First bucket - self.assertEqual(data_points[1].exemplars[0].value, 8.0) # Second bucket - self.assertEqual(data_points[2].exemplars[0].value, 15.0) # Third bucket \ No newline at end of file + self.assertEqual( + data_points[0].exemplars[0].value, 2.0 + ) # First bucket + self.assertEqual( + data_points[1].exemplars[0].value, 8.0 + ) # Second bucket + self.assertEqual( + data_points[2].exemplars[0].value, 15.0 + ) # Third bucket From 0a13b62112464023682766afc5d5b1c2abc4020f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Mon, 2 Sep 2024 14:43:17 +0200 Subject: [PATCH 24/48] Fix unit tests --- .../integration_test/test_histogram_export.py | 1 - .../integration_test/test_sum_aggregation.py | 6 +--- .../tests/metrics/test_aggregation.py | 11 ++++--- .../tests/metrics/test_exemplarreservoir.py | 22 +++++++------- .../metrics/test_view_instrument_match.py | 30 +++++++++++-------- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py index 09f7bb9db7..6095781cb2 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_histogram_export.py @@ -18,7 +18,6 @@ from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, - TraceBasedExemplarFilter, ) from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.resources import SERVICE_NAME, Resource diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py index 36de73be04..0d56ca92bc 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py @@ -23,11 +23,7 @@ from opentelemetry.context import Context from opentelemetry.metrics import Observation from opentelemetry.sdk.metrics import Counter, MeterProvider, ObservableCounter -from opentelemetry.sdk.metrics._internal.exemplar import ( - AlwaysOffExemplarFilter, - AlwaysOnExemplarFilter, - TraceBasedExemplarFilter, -) +from opentelemetry.sdk.metrics._internal.exemplar import AlwaysOnExemplarFilter from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index e76c0fcc80..0d3b084788 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -28,7 +28,6 @@ ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics._internal.instrument import ( @@ -660,15 +659,12 @@ def test_observable_gauge(self): class TestExemplarsFromAggregations(TestCase): def test_collection_simple_fixed_size_reservoir(self): - exemplar_reservoir_factory = lambda: SimpleFixedSizeExemplarReservoir( - size=3 - ) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, - exemplar_reservoir_factory, + lambda: SimpleFixedSizeExemplarReservoir(size=3), ) synchronous_sum_aggregation.aggregate(measurement(1)) @@ -679,7 +675,10 @@ def test_collection_simple_fixed_size_reservoir(self): datapoint = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) - self.assertEqual(len(datapoint.exemplars), 3) + # As the reservoir as multiple buckets, it may store up to + # 3 exemplars + self.assertGreater(len(datapoint.exemplars), 0) + self.assertLessEqual(len(datapoint.exemplars), 3) def test_collection_simple_fixed_size_reservoir_with_default_reservoir( self, diff --git a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py index f98a2c943b..2c205a6ee0 100644 --- a/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py +++ b/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py @@ -10,12 +10,10 @@ ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, - ExemplarReservoirFactory, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory -from opentelemetry.trace import INVALID_SPAN, SpanContext, TraceFlags +from opentelemetry.trace import SpanContext, TraceFlags class TestSimpleFixedSizeExemplarReservoir(TestCase): @@ -58,9 +56,10 @@ def test_filter_attributes(self): reservoir.offer( 1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx ) - exemplars = reservoir.collect({"key2": "value2", "key3": "value3"}) + exemplars = reservoir.collect({"key2": "value2"}) self.assertEqual(len(exemplars), 1) - self.assertNotEqual("key1", exemplars[0].filtered_attributes) + self.assertIn("key1", exemplars[0].filtered_attributes) + self.assertNotIn("key2", exemplars[0].filtered_attributes) def test_reset_after_collection(self): reservoir = SimpleFixedSizeExemplarReservoir(4) @@ -101,17 +100,18 @@ def test_measurement_in_buckets(self): ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) - reservoir.offer(52, time_ns(), {"bucket": "5"}, ctx) - reservoir.offer(7, time_ns(), {"bucket": "3"}, ctx) - reservoir.offer(6, time_ns(), {"bucket": "3"}, ctx) + reservoir.offer(80, time_ns(), {"bucket": "5"}, ctx) # outliner + reservoir.offer(52, time_ns(), {"bucket": "4"}, ctx) + reservoir.offer(7, time_ns(), {"bucket": "1"}, ctx) + reservoir.offer(6, time_ns(), {"bucket": "1"}, ctx) - exemplars = reservoir.collect({"bucket": "3"}) + exemplars = reservoir.collect({"bucket": "1"}) - self.assertEqual(len(exemplars), 2) + self.assertEqual(len(exemplars), 3) self.assertEqual(exemplars[0].value, 6) self.assertEqual(exemplars[1].value, 52) + self.assertEqual(exemplars[2].value, 80) # outliner self.assertEqual(len(exemplars[0].filtered_attributes), 0) - self.assertNotEqual(exemplars[1].filtered_attributes, {"bucket": "5"}) def test_last_measurement_in_bucket(self): reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25]) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index dd9e778144..4017ca84e4 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -15,7 +15,7 @@ # pylint: disable=protected-access from time import time_ns -from typing import Any, Callable, Optional, Sequence, Set, Type +from typing import Callable, Sequence, Type from unittest import TestCase from unittest.mock import MagicMock, Mock @@ -24,17 +24,13 @@ _ViewInstrumentMatch, ) from opentelemetry.sdk.metrics._internal.aggregation import ( - Aggregation, - DefaultAggregation, _Aggregation, _DropAggregation, _ExplicitBucketHistogramAggregation, - _ExponentialBucketHistogramAggregation, _LastValueAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) @@ -65,7 +61,8 @@ def factory( ) else: return lambda **kwargs: SimpleFixedSizeExemplarReservoir( - size=size, **kwargs + size=size, + **{k: v for k, v in kwargs.items() if k != "size"}, ) return factory @@ -74,7 +71,6 @@ def factory( class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name @classmethod def setUpClass(cls): - cls.mock_aggregation_factory = Mock() cls.mock_created_aggregation = ( cls.mock_aggregation_factory._create_aggregation() @@ -387,7 +383,6 @@ def test_setting_aggregation(self): class TestSimpleFixedSizeExemplarReservoir(TestCase): - def test_consume_measurement_with_custom_reservoir_factory(self): simple_fixed_size_factory = generalized_reservoir_factory(size=10) @@ -507,14 +502,14 @@ def test_consume_measurement_with_exemplars(self): self.assertEqual(len(data_points), 1) # Verify that exemplars have been correctly stored and collected - self.assertEqual(len(data_points[0].exemplars), 2) + # As the default reservoir as only one bucket, it will retain + # the last measurement as exemplar + self.assertEqual(len(data_points[0].exemplars), 1) - self.assertEqual(data_points[0].exemplars[0].value, 4.0) - self.assertEqual(data_points[0].exemplars[1].value, 5.0) + self.assertEqual(data_points[0].exemplars[0].value, 5.0) class TestAlignedHistogramBucketExemplarReservoir(TestCase): - def test_consume_measurement_with_custom_reservoir_factory(self): # Custom factory for AlignedHistogramBucketExemplarReservoir with specific boundaries histogram_reservoir_factory = generalized_reservoir_factory( @@ -582,6 +577,16 @@ def test_consume_measurement_with_custom_reservoir_factory(self): ) ) + # view_instrument_match.consume_measurement( + # Measurement( + # value=30.0, # Should go into the outliners bucket + # time_unix_nano=time_ns(), + # instrument=instrument1, + # context=Context(), + # attributes={"attribute3": "value3"}, + # ) + # ) + # Collect the data points data_points = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, 0 @@ -604,3 +609,4 @@ def test_consume_measurement_with_custom_reservoir_factory(self): self.assertEqual( data_points[2].exemplars[0].value, 15.0 ) # Third bucket + # self.assertEqual(data_points[2].exemplars[0].value, 30.0) # Outliner bucket From 612404e4f5f0f912c8fe71956ca01ae3d285aff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Mon, 2 Sep 2024 14:57:07 +0200 Subject: [PATCH 25/48] Improve the example --- .../reader/preferred_exemplarfilter.py | 19 ++++--- .../metrics/views/change_reservoir_factory.py | 54 +++++++++---------- 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/docs/examples/metrics/reader/preferred_exemplarfilter.py b/docs/examples/metrics/reader/preferred_exemplarfilter.py index 1840cebfcc..4bb98896be 100644 --- a/docs/examples/metrics/reader/preferred_exemplarfilter.py +++ b/docs/examples/metrics/reader/preferred_exemplarfilter.py @@ -13,19 +13,21 @@ # limitations under the License. import time +from opentelemetry import trace from opentelemetry.metrics import get_meter_provider, set_meter_provider -from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.exemplar import ( - AlwaysOffExemplarFilter, - AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) +from opentelemetry.sdk.trace import TracerProvider # Create an ExemplarFilter instance (e.g., TraceBasedExemplarFilter) +# Default available values are AlwaysOffExemplarFilter, AlwaysOnExemplarFilter +# and TraceBasedExemplarFilter exemplar_filter = TraceBasedExemplarFilter() exporter = ConsoleMetricExporter() @@ -45,6 +47,11 @@ meter = get_meter_provider().get_meter("exemplar-filter-example", "0.1.2") counter = meter.create_counter("my-counter") -for value in range(10): - counter.add(value) - time.sleep(2.0) +# Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter` +# will only store exemplar if a context exists +trace.set_tracer_provider(TracerProvider()) +tracer = trace.get_tracer(__name__) +with tracer.start_as_current_span("foo"): + for value in range(10): + counter.add(value) + time.sleep(2.0) diff --git a/docs/examples/metrics/views/change_reservoir_factory.py b/docs/examples/metrics/views/change_reservoir_factory.py index 843c1bf3a9..b87e9d92c6 100644 --- a/docs/examples/metrics/views/change_reservoir_factory.py +++ b/docs/examples/metrics/views/change_reservoir_factory.py @@ -14,21 +14,19 @@ import random import time -from typing import Any, Callable, Optional, Sequence, Set, Type +from typing import Type +from opentelemetry import trace from opentelemetry.metrics import get_meter_provider, set_meter_provider -from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.aggregation import ( - Aggregation, DefaultAggregation, _Aggregation, _ExplicitBucketHistogramAggregation, - _ExponentialBucketHistogramAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, - ExemplarReservoir, - ExemplarReservoirFactory, + ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics.export import ( @@ -36,30 +34,21 @@ PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import View - - -# Returns a factory for creating an exemplar reservoir based on the aggregation type and specified parameters -def generalized_reservoir_factory( - size: int = 1, boundaries: Sequence[float] = None -) -> Callable[[Type[_Aggregation]], ExemplarReservoirFactory]: - def factory( - aggregationType: Type[_Aggregation], - ) -> ExemplarReservoirFactory: - if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): - return lambda **kwargs: AlignedHistogramBucketExemplarReservoir( - boundaries=boundaries or [], - **{k: v for k, v in kwargs.items() if k != "boundaries"}, - ) - else: - return lambda **kwargs: SimpleFixedSizeExemplarReservoir( - size=size, **kwargs - ) - - return factory +from opentelemetry.sdk.trace import TracerProvider # Create a custom reservoir factory with specified parameters -custom_reservoir_factory = generalized_reservoir_factory(size=10) +def custom_reservoir_factory( + aggregationType: Type[_Aggregation], +) -> ExemplarReservoirBuilder: + if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + return AlignedHistogramBucketExemplarReservoir + else: + return lambda **kwargs: SimpleFixedSizeExemplarReservoir( + size=10, + **{k: v for k, v in kwargs.items() if k != "size"}, + ) + # Create a view with the custom reservoir factory change_reservoir_factory_view = View( @@ -88,6 +77,11 @@ def factory( my_counter = meter.create_counter("my.counter") -while 1: - my_counter.add(random.randint(1, 10)) - time.sleep(random.random()) +# Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter` +# will only store exemplar if a context exists +trace.set_tracer_provider(TracerProvider()) +tracer = trace.get_tracer(__name__) +with tracer.start_as_current_span("foo"): + while 1: + my_counter.add(random.randint(1, 10)) + time.sleep(random.random()) From c29e0dd62bf60d39562e35a93aa0d58b22081370 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 3 Sep 2024 17:55:49 +0200 Subject: [PATCH 26/48] Fix pylint errors --- .../sdk/metrics/_internal/aggregation.py | 1 - .../_internal/exemplar/exemplar_reservoir.py | 8 +-- .../sdk/metrics/_internal/view.py | 6 +- .../tests/metrics/test_aggregation.py | 5 +- .../metrics/test_view_instrument_match.py | 67 +++++++++++-------- 5 files changed, 45 insertions(+), 42 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 271e256449..39c967e4c8 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -105,7 +105,6 @@ def aggregate( measurement: Measurement to aggregate should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. """ - pass @abstractmethod def collect( diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 2ce5d2461a..d3fc4e900e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -107,7 +107,7 @@ def offer( self.__offered = True - def collect(self, point_attributes: Attributes) -> Exemplar | None: + def collect(self, point_attributes: Attributes) -> Optional[Exemplar]: """May return an Exemplar and resets the bucket for the next sampling period.""" if not self.__offered: return None @@ -232,11 +232,9 @@ def _find_bucket_index( Raises: BucketIndexError: If no bucket index can be found. """ - pass def _reset(self) -> None: """Reset the reservoir by resetting any stateful logic after a collection cycle.""" - pass class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): @@ -309,9 +307,9 @@ def _find_bucket_index( attributes: Attributes, context: Context, ) -> int: - for i, boundary in enumerate(self._boundaries): + for index, boundary in enumerate(self._boundaries): if value <= boundary: - return i + return index return len(self._boundaries) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index e9176af33e..5dd11be1f9 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -35,12 +35,12 @@ def _default_reservoir_factory( - aggregationType: Type[_Aggregation], + aggregation_type: Type[_Aggregation], ) -> ExemplarReservoirBuilder: """Default reservoir factory per aggregation.""" - if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir - elif issubclass(aggregationType, _ExponentialBucketHistogramAggregation): + if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation): return SimpleFixedSizeExemplarReservoir return SimpleFixedSizeExemplarReservoir diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 0d3b084788..3eeb63e26c 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -704,15 +704,12 @@ def test_collection_simple_fixed_size_reservoir_with_default_reservoir( def test_collection_aligned_histogram_bucket_reservoir(self): boundaries = [5.0, 10.0, 20.0] - exemplar_reservoir_factory = ( - lambda: AlignedHistogramBucketExemplarReservoir(boundaries) - ) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, - exemplar_reservoir_factory, + lambda: AlignedHistogramBucketExemplarReservoir(boundaries), ) synchronous_sum_aggregation.aggregate(measurement(2.0)) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index a5c8c48026..fa9a0e4dc7 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -52,18 +52,18 @@ def generalized_reservoir_factory( size: int = 1, boundaries: Sequence[float] = None ) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]: def factory( - aggregationType: Type[_Aggregation], + aggregation_type: Type[_Aggregation], ) -> ExemplarReservoirBuilder: - if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): + if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation): return lambda **kwargs: AlignedHistogramBucketExemplarReservoir( boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != "boundaries"}, ) - else: - return lambda **kwargs: SimpleFixedSizeExemplarReservoir( - size=size, - **{k: v for k, v in kwargs.items() if k != "size"}, - ) + + return lambda **kwargs: SimpleFixedSizeExemplarReservoir( + size=size, + **{k: v for k, v in kwargs.items() if k != "size"}, + ) return factory @@ -288,7 +288,11 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): # +1 call to _create_aggregation view_instrument_match.consume_measurement( Measurement( - value=0, instrument=instrument, attributes={"foo": "bar0"} + value=0, + time_unix_nano=time_ns(), + instrument=instrument, + attributes={"foo": "bar0"}, + context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( @@ -304,7 +308,11 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): # +1 call to _create_aggregation view_instrument_match.consume_measurement( Measurement( - value=0, instrument=instrument, attributes={"foo": "bar1"} + value=0, + time_unix_nano=time_ns(), + instrument=instrument, + attributes={"foo": "bar1"}, + context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( @@ -322,7 +330,11 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): # +1 call to create_aggregation view_instrument_match.consume_measurement( Measurement( - value=0, instrument=instrument, attributes={"foo": "bar"} + value=0, + time_unix_nano=time_ns(), + instrument=instrument, + attributes={"foo": "bar"}, + context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( @@ -331,12 +343,20 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): # No new calls to _create_aggregation because attributes remain the same view_instrument_match.consume_measurement( Measurement( - value=0, instrument=instrument, attributes={"foo": "bar"} + value=0, + time_unix_nano=time_ns(), + instrument=instrument, + attributes={"foo": "bar"}, + context=Context(), ) ) view_instrument_match.consume_measurement( Measurement( - value=0, instrument=instrument, attributes={"foo": "bar"} + value=0, + time_unix_nano=time_ns(), + instrument=instrument, + attributes={"foo": "bar"}, + context=Context(), ) ) # In total we have 5 calls for _create_aggregation @@ -520,8 +540,8 @@ def test_consume_measurement_with_custom_reservoir_factory(self): ) ) - data_points = view_instrument_match.collect( - AggregationTemporality.CUMULATIVE, 0 + data_points = list( + view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure only one data point is collected @@ -577,8 +597,8 @@ def test_consume_measurement_with_exemplars(self): ) # Collect the data points - data_points = view_instrument_match.collect( - AggregationTemporality.CUMULATIVE, 0 + data_points = list( + view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure only one data point is collected @@ -660,19 +680,9 @@ def test_consume_measurement_with_custom_reservoir_factory(self): ) ) - # view_instrument_match.consume_measurement( - # Measurement( - # value=30.0, # Should go into the outliners bucket - # time_unix_nano=time_ns(), - # instrument=instrument1, - # context=Context(), - # attributes={"attribute3": "value3"}, - # ) - # ) - # Collect the data points - data_points = view_instrument_match.collect( - AggregationTemporality.CUMULATIVE, 0 + data_points = list( + view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure three data points are collected, one for each bucket @@ -692,4 +702,3 @@ def test_consume_measurement_with_custom_reservoir_factory(self): self.assertEqual( data_points[2].exemplars[0].value, 15.0 ) # Third bucket - # self.assertEqual(data_points[2].exemplars[0].value, 30.0) # Outliner bucket From 1309b6131cf9813d4a961c00a0b1c9de35ccc49c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 3 Sep 2024 17:58:14 +0200 Subject: [PATCH 27/48] Add changelog entry --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index de7013d0cb..49d15d1fc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- Implementation of exemplars + ([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094)) + ## Version 1.27.0/0.48b0 (2024-08-28) - Implementation of Events API From 2780df70f90cfd28ebb2d9bce65832737303566d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 3 Sep 2024 18:01:02 +0200 Subject: [PATCH 28/48] Fix opentelemetry-api tests --- opentelemetry-api/tests/metrics/test_meter_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/opentelemetry-api/tests/metrics/test_meter_provider.py b/opentelemetry-api/tests/metrics/test_meter_provider.py index bce530d6ca..adf9df1eb7 100644 --- a/opentelemetry-api/tests/metrics/test_meter_provider.py +++ b/opentelemetry-api/tests/metrics/test_meter_provider.py @@ -279,13 +279,13 @@ def test_proxy_meter(self): real_gauge.assert_not_called() proxy_counter.add(amount, attributes=attributes) - real_counter.add.assert_called_once_with(amount, attributes) + real_counter.add.assert_called_once_with(amount, attributes, None) proxy_updowncounter.add(amount, attributes=attributes) - real_updowncounter.add.assert_called_once_with(amount, attributes) + real_updowncounter.add.assert_called_once_with(amount, attributes, None) proxy_histogram.record(amount, attributes=attributes) - real_histogram.record.assert_called_once_with(amount, attributes) + real_histogram.record.assert_called_once_with(amount, attributes, None) proxy_gauge.set(amount, attributes=attributes) - real_gauge.set.assert_called_once_with(amount, attributes) + real_gauge.set.assert_called_once_with(amount, attributes, None) def test_proxy_meter_with_real_meter(self) -> None: # Creating new instruments on the _ProxyMeter with a real meter set From 0ea80dce7c0368e4ba2d8ef05a193bc3581a8d22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 3 Sep 2024 18:03:59 +0200 Subject: [PATCH 29/48] Fix TypeAlias non-supported with py38 and py39 --- .../sdk/metrics/_internal/exemplar/exemplar_reservoir.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index d3fc4e900e..d5241fe5f3 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -14,7 +14,7 @@ from abc import ABC, abstractmethod from random import randrange -from typing import Any, Callable, Optional, Sequence, TypeAlias, Union +from typing import Any, Callable, Optional, Sequence, Union from opentelemetry import trace from opentelemetry.context import Context @@ -313,7 +313,7 @@ def _find_bucket_index( return len(self._boundaries) -ExemplarReservoirBuilder: TypeAlias = Callable[ +ExemplarReservoirBuilder = Callable[ [dict[str, Any]], ExemplarReservoir ] ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. From e7e4227fb1ab3afd29280a34f0cb7b9e3ec01edc Mon Sep 17 00:00:00 2001 From: czhang771 Date: Tue, 3 Sep 2024 12:29:10 -0700 Subject: [PATCH 30/48] add exemplar filter as environment variable --- .../sdk/metrics/_internal/__init__.py | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index 5a2e428965..cb2582bc77 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -33,11 +33,16 @@ ) from opentelemetry.metrics import UpDownCounter as APIUpDownCounter from opentelemetry.metrics import _Gauge as APIGauge -from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED +from opentelemetry.sdk.environment_variables import ( + OTEL_SDK_DISABLED, + OTEL_METRICS_EXEMPLAR_FILTER, +) from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( ExemplarFilter, TraceBasedExemplarFilter, + AlwaysOnExemplarFilter, + AlwaysOffExemplarFilter, ) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, @@ -394,11 +399,12 @@ def __init__( self._atexit_handler = None if resource is None: resource = Resource.create({}) + filter = environ.get(OTEL_METRICS_EXEMPLAR_FILTER, None) self._sdk_config = SdkConfiguration( exemplar_filter=( - TraceBasedExemplarFilter() - if exemplar_filter is None - else exemplar_filter + exemplar_filter + if exemplar_filter is not None + else self._get_exemplar_filter(filter) ), resource=resource, metric_readers=metric_readers, @@ -556,3 +562,14 @@ def get_meter( self._measurement_consumer, ) return self._meters[info] + + def _get_exemplar_filter(self, exemplar_filter: str) -> ExemplarFilter: + if not exemplar_filter or exemplar_filter == 'trace_based': + return TraceBasedExemplarFilter() + elif exemplar_filter == 'always_on': + return AlwaysOnExemplarFilter() + elif exemplar_filter == 'always_off': + return AlwaysOffExemplarFilter() + else: + raise Exception("Invalid exemplar filter.") + \ No newline at end of file From 028e414f1c709f2e5923ef43ce803b2cc7a26670 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 09:11:59 +0200 Subject: [PATCH 31/48] Fix format --- opentelemetry-api/tests/metrics/test_meter_provider.py | 4 +++- .../sdk/metrics/_internal/exemplar/exemplar_reservoir.py | 4 +--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/opentelemetry-api/tests/metrics/test_meter_provider.py b/opentelemetry-api/tests/metrics/test_meter_provider.py index adf9df1eb7..8caec848f6 100644 --- a/opentelemetry-api/tests/metrics/test_meter_provider.py +++ b/opentelemetry-api/tests/metrics/test_meter_provider.py @@ -281,7 +281,9 @@ def test_proxy_meter(self): proxy_counter.add(amount, attributes=attributes) real_counter.add.assert_called_once_with(amount, attributes, None) proxy_updowncounter.add(amount, attributes=attributes) - real_updowncounter.add.assert_called_once_with(amount, attributes, None) + real_updowncounter.add.assert_called_once_with( + amount, attributes, None + ) proxy_histogram.record(amount, attributes=attributes) real_histogram.record.assert_called_once_with(amount, attributes, None) proxy_gauge.set(amount, attributes=attributes) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index d5241fe5f3..4ec6162e13 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -313,9 +313,7 @@ def _find_bucket_index( return len(self._boundaries) -ExemplarReservoirBuilder = Callable[ - [dict[str, Any]], ExemplarReservoir -] +ExemplarReservoirBuilder = Callable[[dict[str, Any]], ExemplarReservoir] ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. It may receive the Aggregation parameters it is bounded to; e.g. From 74016f04110f723bb500f2ac74ecf41e5a55b023 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 09:31:24 +0200 Subject: [PATCH 32/48] Lint the latest version --- .../sdk/metrics/_internal/__init__.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index cb2582bc77..f9ed028032 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -34,15 +34,15 @@ from opentelemetry.metrics import UpDownCounter as APIUpDownCounter from opentelemetry.metrics import _Gauge as APIGauge from opentelemetry.sdk.environment_variables import ( - OTEL_SDK_DISABLED, OTEL_METRICS_EXEMPLAR_FILTER, + OTEL_SDK_DISABLED, ) from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, ExemplarFilter, TraceBasedExemplarFilter, - AlwaysOnExemplarFilter, - AlwaysOffExemplarFilter, ) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, @@ -349,6 +349,17 @@ def create_observable_up_down_counter( return instrument +def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter: + if exemplar_filter == "trace_based": + return TraceBasedExemplarFilter() + if exemplar_filter == "always_on": + return AlwaysOnExemplarFilter() + if exemplar_filter == "always_off": + return AlwaysOffExemplarFilter() + msg = f"Unknown exemplar filter '{exemplar_filter}'." + raise ValueError(msg) + + class MeterProvider(APIMeterProvider): r"""See `opentelemetry.metrics.MeterProvider`. @@ -399,12 +410,12 @@ def __init__( self._atexit_handler = None if resource is None: resource = Resource.create({}) - filter = environ.get(OTEL_METRICS_EXEMPLAR_FILTER, None) self._sdk_config = SdkConfiguration( exemplar_filter=( exemplar_filter - if exemplar_filter is not None - else self._get_exemplar_filter(filter) + or _get_exemplar_filter( + environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based") + ) ), resource=resource, metric_readers=metric_readers, @@ -562,14 +573,3 @@ def get_meter( self._measurement_consumer, ) return self._meters[info] - - def _get_exemplar_filter(self, exemplar_filter: str) -> ExemplarFilter: - if not exemplar_filter or exemplar_filter == 'trace_based': - return TraceBasedExemplarFilter() - elif exemplar_filter == 'always_on': - return AlwaysOnExemplarFilter() - elif exemplar_filter == 'always_off': - return AlwaysOffExemplarFilter() - else: - raise Exception("Invalid exemplar filter.") - \ No newline at end of file From dcb44f012a143bfc421bb48cd2d7f09d3bc4b79f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 09:46:37 +0200 Subject: [PATCH 33/48] More typing fixes for py38 and py39 --- .../metrics/_internal/exemplar/exemplar_reservoir.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 4ec6162e13..a78cde29fd 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -14,7 +14,7 @@ from abc import ABC, abstractmethod from random import randrange -from typing import Any, Callable, Optional, Sequence, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Union from opentelemetry import trace from opentelemetry.context import Context @@ -55,7 +55,7 @@ def offer( raise NotImplementedError("ExemplarReservoir.offer is not implemented") @abstractmethod - def collect(self, point_attributes: Attributes) -> list[Exemplar]: + def collect(self, point_attributes: Attributes) -> List[Exemplar]: """Returns accumulated Exemplars and also resets the reservoir for the next sampling period @@ -155,11 +155,11 @@ class FixedSizeExemplarReservoirABC(ExemplarReservoir): def __init__(self, size: int, **kwargs) -> None: super().__init__(**kwargs) self._size: int = size - self._reservoir_storage: list[ExemplarBucket] = [ + self._reservoir_storage: List[ExemplarBucket] = [ ExemplarBucket() for _ in range(self._size) ] - def collect(self, point_attributes: Attributes) -> list[Exemplar]: + def collect(self, point_attributes: Attributes) -> List[Exemplar]: """Returns accumulated Exemplars and also resets the reservoir for the next sampling period @@ -313,7 +313,7 @@ def _find_bucket_index( return len(self._boundaries) -ExemplarReservoirBuilder = Callable[[dict[str, Any]], ExemplarReservoir] +ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir] ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. It may receive the Aggregation parameters it is bounded to; e.g. From c0787abf22b8d28abe8bf2df04defbd09ce9bc66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 09:46:58 +0200 Subject: [PATCH 34/48] Fix log record tests --- .../tests/metrics/test_view_instrument_match.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index fa9a0e4dc7..1a1c3e8269 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -39,6 +39,7 @@ from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) +from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.export import AggregationTemporality from opentelemetry.sdk.metrics.view import ( DefaultAggregation, @@ -296,7 +297,7 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( - instrument, {"foo": "bar0"}, start_time_unix_nano + instrument, {"foo": "bar0"}, _default_reservoir_factory, start_time_unix_nano ) collection_start_time_unix_nano = time_ns() collected_data_points = view_instrument_match.collect( @@ -316,7 +317,7 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( - instrument, {"foo": "bar1"}, 1 + instrument, {"foo": "bar1"}, _default_reservoir_factory, 1 ) collection_start_time_unix_nano = time_ns() collected_data_points = view_instrument_match.collect( @@ -338,7 +339,7 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( - instrument, {"foo": "bar"}, 2 + instrument, {"foo": "bar"}, _default_reservoir_factory, 2 ) # No new calls to _create_aggregation because attributes remain the same view_instrument_match.consume_measurement( From e2b77783afac3748c07cb91b36fbd1dfe9db14e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 10:09:15 +0200 Subject: [PATCH 35/48] Fix doc --- .../sdk/metrics/_internal/exemplar/exemplar.py | 6 +++++- .../sdk/metrics/_internal/exemplar/exemplar_filter.py | 8 ++++---- .../opentelemetry/sdk/metrics/_internal/measurement.py | 6 +++++- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py index e460f38a48..2c570cc8b0 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -26,7 +26,7 @@ class Exemplar: was recorded, for example the span and trace ID of the active span when the exemplar was recorded. - Attributes: + Attributes trace_id: (optional) The trace associated with a recording span_id: (optional) The span associated with a recording time_unix_nano: The time of the observation @@ -37,6 +37,10 @@ class Exemplar: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar """ + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated + # one will come from napoleon extension and the other from autodoc extension. This + # will raise an sphinx error of duplicated object description + # See https://github.com/sphinx-doc/sphinx/issues/8664 filtered_attributes: Attributes value: Union[int, float] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py index 0e090f9e35..8961d101ef 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py @@ -42,7 +42,7 @@ def should_sample( ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. - Attributes: + Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes @@ -69,7 +69,7 @@ def should_sample( ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. - Attributes: + Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes @@ -96,7 +96,7 @@ def should_sample( ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. - Attributes: + Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes @@ -122,7 +122,7 @@ def should_sample( ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. - Attributes: + Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py index a73d6001a1..dad8057731 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py @@ -25,13 +25,17 @@ class Measurement: """ Represents a data point reported via the metrics API to the SDK. - Attributes: + Attributes value: Measured value time_unix_nano: The time the API call was made to record the Measurement instrument: The instrument that produced this `Measurement`. context: The active Context of the Measurement at API call time. attributes: Measurement attributes """ + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated + # one will come from napoleon extension and the other from autodoc extension. This + # will raise an sphinx error of duplicated object description + # See https://github.com/sphinx-doc/sphinx/issues/8664 value: Union[int, float] time_unix_nano: int From 04a21e053c5795fba9ae90191ed0293d187d749a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 15:18:34 +0200 Subject: [PATCH 36/48] More linting --- .../opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py | 1 + .../src/opentelemetry/sdk/metrics/_internal/measurement.py | 1 + .../tests/metrics/test_view_instrument_match.py | 5 ++++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py index 2c570cc8b0..d3199c69ab 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -37,6 +37,7 @@ class Exemplar: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar """ + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated # one will come from napoleon extension and the other from autodoc extension. This # will raise an sphinx error of duplicated object description diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py index dad8057731..56619a83a1 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py @@ -32,6 +32,7 @@ class Measurement: context: The active Context of the Measurement at API call time. attributes: Measurement attributes """ + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated # one will come from napoleon extension and the other from autodoc extension. This # will raise an sphinx error of duplicated object description diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index 1a1c3e8269..48481b5bd8 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -297,7 +297,10 @@ def test_collect_resets_start_time_unix_nano(self, mock_time_ns): ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( - instrument, {"foo": "bar0"}, _default_reservoir_factory, start_time_unix_nano + instrument, + {"foo": "bar0"}, + _default_reservoir_factory, + start_time_unix_nano, ) collection_start_time_unix_nano = time_ns() collected_data_points = view_instrument_match.collect( From 975700a23e0927159a0f7b9ff8de7086534925a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 16:29:12 +0200 Subject: [PATCH 37/48] Fix PyPy json loads --- .../tests/metrics/integration_test/test_console_exporter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py index 18dcd0da5b..7e77a878d8 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_console_exporter.py @@ -14,6 +14,7 @@ from io import StringIO from json import loads +from os import linesep from unittest import TestCase from unittest.mock import Mock, patch @@ -117,7 +118,8 @@ def test_console_exporter_with_exemplars(self): provider.shutdown() output.seek(0) - result_0 = loads("".join(output.readlines())) + joined_output = "".join(output.readlines()) + result_0 = loads(joined_output.strip(linesep)) self.assertGreater(len(result_0), 0) From 5b32ffcbf900b23765368e35f833eeccb75d9c69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 4 Sep 2024 17:05:03 +0200 Subject: [PATCH 38/48] Fix sphinx doc generation --- docs/conf.py | 12 ++++++++++++ .../src/opentelemetry/sdk/metrics/__init__.py | 2 ++ .../metrics/_internal/exemplar/exemplar_reservoir.py | 8 ++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 3aa7e022e3..ad2c6aa6b3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -138,6 +138,18 @@ "py:class", "opentelemetry.proto.collector.logs.v1.logs_service_pb2.ExportLogsServiceRequest", ), + ( + "py:class", + "opentelemetry.sdk.metrics._internal.exemplar.exemplar_reservoir.FixedSizeExemplarReservoirABC", + ), + ( + "py:class", + "opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar", + ), + ( + "py:class", + "opentelemetry.sdk.metrics._internal.aggregation._Aggregation", + ) ] # Add any paths that contain templates here, relative to this directory. diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index 80fc953da4..b89c08da04 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -19,6 +19,7 @@ AlignedHistogramBucketExemplarReservoir, AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, + Exemplar, ExemplarFilter, ExemplarReservoir, SimpleFixedSizeExemplarReservoir, @@ -38,6 +39,7 @@ "AlignedHistogramBucketExemplarReservoir", "AlwaysOnExemplarFilter", "AlwaysOffExemplarFilter", + "Exemplar", "ExemplarFilter", "ExemplarReservoir", "Meter", diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index a78cde29fd..1dcbfe47da 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -60,10 +60,10 @@ def collect(self, point_attributes: Attributes) -> List[Exemplar]: sampling period Args: - point_attributes The attributes associated with metric point. + point_attributes: The attributes associated with metric point. Returns: - a list of :class:`opentelemetry.sdk.metrics.exemplar.Exemplar`s. Returned + a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned exemplars contain the attributes that were filtered out by the aggregator, but recorded alongside the original measurement. """ @@ -164,10 +164,10 @@ def collect(self, point_attributes: Attributes) -> List[Exemplar]: sampling period Args: - point_attributes The attributes associated with metric point. + point_attributes: The attributes associated with metric point. Returns: - a list of :class:`opentelemetry.sdk.metrics.exemplar.Exemplar`s. Returned + a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned exemplars contain the attributes that were filtered out by the aggregator, but recorded alongside the original measurement. """ From ecd03bc4db28f59c74a6dc6d98aa835c2f205e30 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Wed, 4 Sep 2024 11:04:03 -0700 Subject: [PATCH 39/48] fix view instrument match test case --- .../tests/metrics/test_view_instrument_match.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index 48481b5bd8..17c185294c 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -610,10 +610,11 @@ def test_consume_measurement_with_exemplars(self): # Verify that exemplars have been correctly stored and collected # As the default reservoir as only one bucket, it will retain - # the last measurement as exemplar + # either one of the measurements based on random selection self.assertEqual(len(data_points[0].exemplars), 1) - self.assertEqual(data_points[0].exemplars[0].value, 5.0) + self.assertIn(data_points[0].exemplars[0].value, [4.0, 5.0]) + class TestAlignedHistogramBucketExemplarReservoir(TestCase): From ac0eb5674b9380a9c3c85b10c626500759e0b236 Mon Sep 17 00:00:00 2001 From: czhang771 Date: Wed, 4 Sep 2024 14:40:47 -0700 Subject: [PATCH 40/48] start work on prometheus exporter --- .../exporter/prometheus/__init__.py | 58 ++++++++++++++++--- 1 file changed, 50 insertions(+), 8 deletions(-) diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py index 7e6b2ecc01..0b5fd28213 100644 --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -67,7 +67,7 @@ from json import dumps from logging import getLogger from os import environ -from typing import Deque, Dict, Iterable, Sequence, Tuple, Union +from typing import Deque, Dict, Iterable, Sequence, Tuple, Union, Optional from prometheus_client import start_http_server from prometheus_client.core import ( @@ -107,6 +107,9 @@ Sum, ) from opentelemetry.util.types import Attributes +from opentelemetry.sdk.metrics._internal import Exemplar +from prometheus_client.samples import Exemplar as PrometheusExemplar + _logger = getLogger(__name__) @@ -115,17 +118,29 @@ def _convert_buckets( - bucket_counts: Sequence[int], explicit_bounds: Sequence[float] -) -> Sequence[Tuple[str, int]]: + bucket_counts: Sequence[int], explicit_bounds: Sequence[float], exemplars: Sequence[Optional[PrometheusExemplar]] = None +) -> Sequence[Tuple[str, int, Optional[Exemplar]]]: buckets = [] total_count = 0 + previous_bound = float('-inf') + for upper_bound, count in zip( chain(explicit_bounds, ["+Inf"]), bucket_counts, ): total_count += count - buckets.append((f"{upper_bound}", total_count)) - + buckets.append((f"{upper_bound}", total_count, None)) + + # assigning exemplars to their corresponding values + if exemplars: + for i, (upper_bound, _, _) in enumerate(buckets): + for exemplar in exemplars: + if previous_bound <= exemplar.value < float(upper_bound): + # Assign the exemplar to the current bucket if it's the first valid one found + _, current_count, current_exemplar = buckets[i] + if current_exemplar is None: # Only assign if no exemplar has been assigned yet + buckets[i] = (upper_bound, current_count, exemplar) + previous_bound = float(upper_bound) return buckets @@ -251,6 +266,10 @@ def _translate_to_prometheus( for number_data_point in metric.data.data_points: label_keys = [] label_values = [] + exemplars = [ + self._convert_exemplar(ex) for ex in number_data_point.exemplars + ] + for key, value in sorted(number_data_point.attributes.items()): label_keys.append(sanitize_attribute(key)) @@ -276,6 +295,7 @@ def _translate_to_prometheus( number_data_point.explicit_bounds ), "sum": number_data_point.sum, + "exemplars": exemplars, } ) else: @@ -350,7 +370,7 @@ def _translate_to_prometheus( [pre_metric_family_id, HistogramMetricFamily.__name__] ) - if ( + if ( metric_family_id not in metric_family_id_metric_family.keys() ): @@ -359,7 +379,7 @@ def _translate_to_prometheus( name=metric_name, documentation=metric_description, labels=label_keys, - unit=metric_unit, + unit=metric_unit, ) ) metric_family_id_metric_family[ @@ -367,7 +387,7 @@ def _translate_to_prometheus( ].add_metric( labels=label_values, buckets=_convert_buckets( - value["bucket_counts"], value["explicit_bounds"] + value["bucket_counts"], value["explicit_bounds"], value["exemplars"] ), sum_value=value["sum"], ) @@ -395,7 +415,29 @@ def _create_info_metric( info = InfoMetricFamily(name, description, labels=attributes) info.add_metric(labels=list(attributes.keys()), value=attributes) return info + + def _convert_exemplar(self, exemplar_data: Exemplar) -> PrometheusExemplar: + """ + Converts the SDK exemplar into a Prometheus Exemplar, including proper time conversion. + Parameters: + - value (float): The value associated with the exemplar. + - exemplar_data (ExemplarData): An OpenTelemetry exemplar data object containing attributes and timing information. + + Returns: + - Exemplar: A Prometheus Exemplar object with correct labeling and timing. + """ + labels = {self._sanitize_label(key): str(value) for key, value in exemplar_data.filtered_attributes.items()} + + # Add trace_id and span_id to labels only if they are valid and not None + if exemplar_data.trace_id and exemplar_data.span_id: + labels['trace_id'] = exemplar_data.trace_id + labels['span_id'] = exemplar_data.span_id + + # Convert time from nanoseconds to seconds + timestamp_seconds = exemplar_data.time_unix_nano / 1e9 + prom_exemplar = PrometheusExemplar(labels, exemplar_data.value, timestamp_seconds) + return prom_exemplar class _AutoPrometheusMetricReader(PrometheusMetricReader): """Thin wrapper around PrometheusMetricReader used for the opentelemetry_metrics_exporter entry point. From 97f3c10fff968fbaa694961193be07f62803f757 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 15 Oct 2024 17:31:22 +0200 Subject: [PATCH 41/48] Fix span and trace id typing --- .../opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py | 4 ++-- .../sdk/metrics/_internal/exemplar/exemplar_reservoir.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py index d3199c69ab..95582e1601 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -46,5 +46,5 @@ class Exemplar: filtered_attributes: Attributes value: Union[int, float] time_unix_nano: int - span_id: Optional[str] = None - trace_id: Optional[str] = None + span_id: Optional[int] = None + trace_id: Optional[int] = None diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py index 1dcbfe47da..c8fa7f1453 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -77,8 +77,8 @@ def __init__(self) -> None: self.__value: Union[int, float] = 0 self.__attributes: Attributes = None self.__time_unix_nano: int = 0 - self.__span_id: Optional[str] = None - self.__trace_id: Optional[str] = None + self.__span_id: Optional[int] = None + self.__trace_id: Optional[int] = None self.__offered: bool = False def offer( From da7a2c0cddb53f39a590bb3cb6183cf02d3fdb8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 15 Oct 2024 17:31:38 +0200 Subject: [PATCH 42/48] Deal with missing span and trace ids --- .../_internal/metrics_encoder/__init__.py | 79 ++++++++++--------- 1 file changed, 43 insertions(+), 36 deletions(-) diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py index b3c7b98f1d..746a5813aa 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -13,53 +13,48 @@ # limitations under the License. import logging -from opentelemetry.sdk.metrics.export import ( - MetricExporter, -) -from opentelemetry.sdk.metrics.view import Aggregation -from os import environ -from opentelemetry.sdk.metrics import ( - Counter, - Histogram, - ObservableCounter, - ObservableGauge, - ObservableUpDownCounter, - UpDownCounter, -) from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, _encode_span_id, _encode_trace_id, ) -from opentelemetry.sdk.environment_variables import ( - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, -) -from opentelemetry.sdk.metrics.export import ( - AggregationTemporality, -) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( ExportMetricsServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 -from opentelemetry.sdk.metrics.export import ( - MetricsData, - Gauge, - Histogram as HistogramType, - Sum, - ExponentialHistogram as ExponentialHistogramType, -) -from typing import Dict from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, + OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, +) +from opentelemetry.sdk.metrics import ( + Counter, + Exemplar, + Histogram, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, +) +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + ExponentialHistogram as ExponentialHistogramType, + Gauge, + Histogram as HistogramType, + MetricsData, + MetricExporter, + Sum, ) from opentelemetry.sdk.metrics.view import ( + Aggregation, ExponentialBucketHistogramAggregation, ExplicitBucketHistogramAggregation, ) +from os import environ +from typing import Dict _logger = logging.getLogger(__name__) @@ -350,7 +345,7 @@ def _encode_metric(metric, pb2_metric): ) -def _encode_exemplars(sdk_exemplars: list) -> list: +def _encode_exemplars(sdk_exemplars: list[Exemplar]) -> list: """ Converts a list of SDK Exemplars into a list of protobuf Exemplars. @@ -362,14 +357,26 @@ def _encode_exemplars(sdk_exemplars: list) -> list: """ pb_exemplars = [] for sdk_exemplar in sdk_exemplars: - pb_exemplar = pb2.Exemplar( - time_unix_nano=sdk_exemplar.time_unix_nano, - span_id=_encode_span_id(sdk_exemplar.span_id), - trace_id=_encode_trace_id(sdk_exemplar.trace_id), - filtered_attributes=_encode_attributes( - sdk_exemplar.filtered_attributes - ), - ) + if ( + sdk_exemplar.span_id is not None + and sdk_exemplar.trace_id is not None + ): + pb_exemplar = pb2.Exemplar( + time_unix_nano=sdk_exemplar.time_unix_nano, + span_id=_encode_span_id(sdk_exemplar.span_id), + trace_id=_encode_trace_id(sdk_exemplar.trace_id), + filtered_attributes=_encode_attributes( + sdk_exemplar.filtered_attributes + ), + ) + else: + pb_exemplar = pb2.Exemplar( + time_unix_nano=sdk_exemplar.time_unix_nano, + filtered_attributes=_encode_attributes( + sdk_exemplar.filtered_attributes + ), + ) + # Assign the value based on its type in the SDK exemplar if isinstance(sdk_exemplar.value, float): pb_exemplar.as_double = sdk_exemplar.value From 3b5454415b4018f621a568a37788edaf8c7ac128 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Tue, 15 Oct 2024 17:33:17 +0200 Subject: [PATCH 43/48] Add test and improve code --- .../exporter/prometheus/__init__.py | 80 +++++++++++-------- .../tests/test_prometheus_exporter.py | 80 +++++++++++++++++-- 2 files changed, 122 insertions(+), 38 deletions(-) diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py index 083dcdd6ae..56eda6216c 100644 --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -89,9 +89,10 @@ OTEL_EXPORTER_PROMETHEUS_PORT, OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION, ) -from opentelemetry.sdk.metrics import Counter -from opentelemetry.sdk.metrics import Histogram as HistogramInstrument from opentelemetry.sdk.metrics import ( + Counter, + Exemplar, + Histogram as HistogramInstrument, ObservableCounter, ObservableGauge, ObservableUpDownCounter, @@ -107,7 +108,8 @@ Sum, ) from opentelemetry.util.types import Attributes -from opentelemetry.sdk.metrics._internal import Exemplar +from opentelemetry.trace import format_span_id, format_trace_id + from prometheus_client.samples import Exemplar as PrometheusExemplar @@ -118,29 +120,33 @@ def _convert_buckets( - bucket_counts: Sequence[int], explicit_bounds: Sequence[float], exemplars: Sequence[Optional[PrometheusExemplar]] = None + bucket_counts: Sequence[int], + explicit_bounds: Sequence[float], + exemplars: Optional[Sequence[PrometheusExemplar]] = None, ) -> Sequence[Tuple[str, int, Optional[Exemplar]]]: buckets = [] total_count = 0 - previous_bound = float('-inf') + previous_bound = float("-inf") + + exemplars = list(reversed(exemplars or [])) + exemplar = exemplars.pop() if exemplars else None for upper_bound, count in zip( chain(explicit_bounds, ["+Inf"]), bucket_counts, ): total_count += count - buckets.append((f"{upper_bound}", total_count, None)) - - # assigning exemplars to their corresponding values - if exemplars: - for i, (upper_bound, _, _) in enumerate(buckets): - for exemplar in exemplars: - if previous_bound <= exemplar.value < float(upper_bound): - # Assign the exemplar to the current bucket if it's the first valid one found - _, current_count, current_exemplar = buckets[i] - if current_exemplar is None: # Only assign if no exemplar has been assigned yet - buckets[i] = (upper_bound, current_count, exemplar) - previous_bound = float(upper_bound) + current_exemplar = None + upper_bound_f = float(upper_bound) + while exemplar and previous_bound <= exemplar.value < upper_bound_f: + if current_exemplar is None: + # Assign the exemplar to the current bucket if it's the first valid one found + current_exemplar = exemplar + exemplar = exemplars.pop() if exemplars else None + previous_bound = upper_bound_f + + buckets.append((f"{upper_bound}", total_count, current_exemplar)) + return buckets @@ -266,10 +272,10 @@ def _translate_to_prometheus( label_keys = [] label_values = [] exemplars = [ - self._convert_exemplar(ex) for ex in number_data_point.exemplars + self._convert_exemplar(ex) + for ex in number_data_point.exemplars ] - for key, value in sorted(number_data_point.attributes.items()): label_keys.append(sanitize_attribute(key)) label_values.append(self._check_value(value)) @@ -322,7 +328,6 @@ def _translate_to_prometheus( isinstance(metric.data, Sum) and not should_convert_sum_to_gauge ): - metric_family_id = "|".join( [pre_metric_family_id, CounterMetricFamily.__name__] ) @@ -343,7 +348,6 @@ def _translate_to_prometheus( isinstance(metric.data, Gauge) or should_convert_sum_to_gauge ): - metric_family_id = "|".join( [pre_metric_family_id, GaugeMetricFamily.__name__] ) @@ -364,12 +368,11 @@ def _translate_to_prometheus( metric_family_id ].add_metric(labels=label_values, value=value) elif isinstance(metric.data, Histogram): - metric_family_id = "|".join( [pre_metric_family_id, HistogramMetricFamily.__name__] ) - if ( + if ( metric_family_id not in metric_family_id_metric_family.keys() ): @@ -378,7 +381,7 @@ def _translate_to_prometheus( name=metric_name, documentation=metric_description, labels=label_keys, - unit=metric_unit, + unit=metric_unit, ) ) metric_family_id_metric_family[ @@ -386,7 +389,9 @@ def _translate_to_prometheus( ].add_metric( labels=label_values, buckets=_convert_buckets( - value["bucket_counts"], value["explicit_bounds"], value["exemplars"] + value["bucket_counts"], + value["explicit_bounds"], + value["exemplars"], ), sum_value=value["sum"], ) @@ -414,7 +419,7 @@ def _create_info_metric( info = InfoMetricFamily(name, description, labels=attributes) info.add_metric(labels=list(attributes.keys()), value=attributes) return info - + def _convert_exemplar(self, exemplar_data: Exemplar) -> PrometheusExemplar: """ Converts the SDK exemplar into a Prometheus Exemplar, including proper time conversion. @@ -426,18 +431,27 @@ def _convert_exemplar(self, exemplar_data: Exemplar) -> PrometheusExemplar: Returns: - Exemplar: A Prometheus Exemplar object with correct labeling and timing. """ - labels = {self._sanitize_label(key): str(value) for key, value in exemplar_data.filtered_attributes.items()} - + labels = { + sanitize_attribute(key): str(value) + for key, value in exemplar_data.filtered_attributes.items() + } + # Add trace_id and span_id to labels only if they are valid and not None - if exemplar_data.trace_id and exemplar_data.span_id: - labels['trace_id'] = exemplar_data.trace_id - labels['span_id'] = exemplar_data.span_id - + if ( + exemplar_data.trace_id is not None + and exemplar_data.span_id is not None + ): + labels["trace_id"] = format_trace_id(exemplar_data.trace_id) + labels["span_id"] = format_span_id(exemplar_data.span_id) + # Convert time from nanoseconds to seconds timestamp_seconds = exemplar_data.time_unix_nano / 1e9 - prom_exemplar = PrometheusExemplar(labels, exemplar_data.value, timestamp_seconds) + prom_exemplar = PrometheusExemplar( + labels, exemplar_data.value, timestamp_seconds + ) return prom_exemplar + class _AutoPrometheusMetricReader(PrometheusMetricReader): """Thin wrapper around PrometheusMetricReader used for the opentelemetry_metrics_exporter entry point. diff --git a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py index ed11bb9ab7..847d54062d 100644 --- a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py +++ b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py @@ -23,6 +23,9 @@ GaugeMetricFamily, InfoMetricFamily, ) +from prometheus_client.openmetrics.exposition import ( + generate_latest as openmetrics_generate_latest, +) from opentelemetry.exporter.prometheus import ( PrometheusMetricReader, @@ -31,7 +34,7 @@ from opentelemetry.sdk.environment_variables import ( OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION, ) -from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics import MeterProvider, Exemplar from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Histogram, @@ -42,6 +45,7 @@ ScopeMetrics, ) from opentelemetry.sdk.resources import Resource +from opentelemetry.trace import format_span_id, format_trace_id from opentelemetry.test.metrictestutil import ( _generate_gauge, _generate_histogram, @@ -59,7 +63,10 @@ def setUp(self): ) def verify_text_format( - self, metric: Metric, expect_prometheus_text: str + self, + metric: Metric, + expect_prometheus_text: str, + openmetrics_generator: bool = False, ) -> None: metrics_data = MetricsData( resource_metrics=[ @@ -79,7 +86,11 @@ def verify_text_format( collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) - result_bytes = generate_latest(collector) + result_bytes = ( + openmetrics_generate_latest(collector) + if openmetrics_generator + else generate_latest(collector) + ) result = result_bytes.decode("utf-8") self.assertEqual(result, expect_prometheus_text) @@ -135,6 +146,67 @@ def test_histogram_to_prometheus(self): ), ) + def test_histogram_with_exemplar_to_prometheus(self): + span_id = 10217189687419569865 + trace_id = 67545097771067222548457157018666467027 + metric = Metric( + name="test@name", + description="foo", + unit="s", + data=Histogram( + data_points=[ + HistogramDataPoint( + attributes={"histo": 1}, + start_time_unix_nano=1641946016139533244, + time_unix_nano=1641946016139533244, + exemplars=[ + Exemplar( + {"filtered": "banana"}, + 305.0, + 1641946016139533244, + span_id, + trace_id, + ), + # Will be ignored as part of the same buckets + Exemplar( + {"filtered": "banana"}, + 298.0, + 1641946016139533400, + span_id, + trace_id, + ), + ], + count=6, + sum=579.0, + bucket_counts=[1, 3, 2], + explicit_bounds=[123.0, 456.0], + min=1, + max=457, + ) + ], + aggregation_temporality=AggregationTemporality.DELTA, + ), + ) + span_str = format_span_id(span_id) + trace_str = format_trace_id(trace_id) + self.verify_text_format( + metric, + dedent( + f"""\ + # HELP test_name_seconds foo + # TYPE test_name_seconds histogram + # UNIT test_name_seconds seconds + test_name_seconds_bucket{{histo="1",le="123.0"}} 1.0 + test_name_seconds_bucket{{histo="1",le="456.0"}} 4.0 # {{filtered="banana",span_id="{span_str}",trace_id="{trace_str}"}} 305.0 1641946016.1395333 + test_name_seconds_bucket{{histo="1",le="+Inf"}} 6.0 + test_name_seconds_count{{histo="1"}} 6.0 + test_name_seconds_sum{{histo="1"}} 579.0 + # EOF + """ + ), + openmetrics_generator=True, + ) + def test_monotonic_sum_to_prometheus(self): labels = {"environment@": "staging", "os": "Windows"} metric = _generate_sum( @@ -321,7 +393,6 @@ def test_list_labels(self): self.assertEqual(prometheus_metric.samples[0].labels["os"], "Unix") def test_check_value(self): - collector = _CustomCollector() self.assertEqual(collector._check_value(1), "1") @@ -335,7 +406,6 @@ def test_check_value(self): self.assertEqual(collector._check_value(None), "null") def test_multiple_collection_calls(self): - metric_reader = PrometheusMetricReader() provider = MeterProvider(metric_readers=[metric_reader]) meter = provider.get_meter("getting-started", "0.1.2") From a6c9cef050ce746463066c9d94d26907017c436c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 16 Oct 2024 09:40:58 +0200 Subject: [PATCH 44/48] Fix typing --- .../otlp/proto/common/_internal/metrics_encoder/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py index 746a5813aa..4b8ddf85d7 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, @@ -345,12 +346,12 @@ def _encode_metric(metric, pb2_metric): ) -def _encode_exemplars(sdk_exemplars: list[Exemplar]) -> list: +def _encode_exemplars(sdk_exemplars: List[Exemplar]) -> List[pb2.Exemplar]: """ Converts a list of SDK Exemplars into a list of protobuf Exemplars. Args: - sdk_exemplars (list): The list of exemplars from the OpenTelemetry SDK. + sdk_exemplars: The list of exemplars from the OpenTelemetry SDK. Returns: list: A list of protobuf exemplars. From 3e23cf15d65371a29278e97fcae7c56a278efed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 16 Oct 2024 09:41:06 +0200 Subject: [PATCH 45/48] Add entry in changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33438d21ed..53c8b6c556 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#4182](https://github.com/open-telemetry/opentelemetry-python/pull/4182)) - sdk: Implementation of exemplars ([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094)) +- sdk: Add exemplars to the Prometheus exporter + ([#4178](https://github.com/open-telemetry/opentelemetry-python/pull/4178)) - Implement events sdk ([#4176](https://github.com/open-telemetry/opentelemetry-python/pull/4176)) - Update semantic conventions to version 1.28.0 From 9f0a2c746efff85c96511828bda01b3d1675908e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 16 Oct 2024 09:49:35 +0200 Subject: [PATCH 46/48] Lint with ruff --- .../proto/common/_internal/metrics_encoder/__init__.py | 8 ++++++-- .../src/opentelemetry/exporter/prometheus/__init__.py | 4 +++- .../tests/test_prometheus_exporter.py | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py index 3fa856ce50..ae0b8a314f 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -43,13 +43,17 @@ ) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, - ExponentialHistogram as ExponentialHistogramType, Gauge, - Histogram as HistogramType, MetricExporter, MetricsData, Sum, ) +from opentelemetry.sdk.metrics.export import ( + ExponentialHistogram as ExponentialHistogramType, +) +from opentelemetry.sdk.metrics.export import ( + Histogram as HistogramType, +) from opentelemetry.sdk.metrics.view import ( Aggregation, ExplicitBucketHistogramAggregation, diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py index 7674b89971..d115f1e017 100644 --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -93,12 +93,14 @@ from opentelemetry.sdk.metrics import ( Counter, Exemplar, - Histogram as HistogramInstrument, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) +from opentelemetry.sdk.metrics import ( + Histogram as HistogramInstrument, +) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, diff --git a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py index 847d54062d..121434e981 100644 --- a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py +++ b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py @@ -34,7 +34,7 @@ from opentelemetry.sdk.environment_variables import ( OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION, ) -from opentelemetry.sdk.metrics import MeterProvider, Exemplar +from opentelemetry.sdk.metrics import Exemplar, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Histogram, @@ -45,13 +45,13 @@ ScopeMetrics, ) from opentelemetry.sdk.resources import Resource -from opentelemetry.trace import format_span_id, format_trace_id from opentelemetry.test.metrictestutil import ( _generate_gauge, _generate_histogram, _generate_sum, _generate_unsupported_metric, ) +from opentelemetry.trace import format_span_id, format_trace_id class TestPrometheusMetricReader(TestCase): From db2e53daaca61d3adcec0e430f7ec1930f162f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 16 Oct 2024 10:06:32 +0200 Subject: [PATCH 47/48] Ignore pylint error in test file --- .../tests/test_prometheus_exporter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py index 121434e981..b861b4f0f7 100644 --- a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py +++ b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py @@ -55,6 +55,8 @@ class TestPrometheusMetricReader(TestCase): + # pylint: disable=too-many-public-methods + def setUp(self): self._mock_registry_register = Mock() self._registry_register_patch = patch( From a4d4b474b3c305e158f4ad9504c1e2bac6635901 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Collonval?= Date: Wed, 6 Nov 2024 11:05:49 +0100 Subject: [PATCH 48/48] Move changelog entry to the proper place --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b2630f669..6ea7b84459 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- sdk: Add exemplars to the Prometheus exporter + ([#4178](https://github.com/open-telemetry/opentelemetry-python/pull/4178)) + ## Version 1.28.0/0.49b0 (2024-11-05) - Removed superfluous py.typed markers and added them where they were missing @@ -19,8 +22,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#4182](https://github.com/open-telemetry/opentelemetry-python/pull/4182)) - sdk: Implementation of exemplars ([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094)) -- sdk: Add exemplars to the Prometheus exporter - ([#4178](https://github.com/open-telemetry/opentelemetry-python/pull/4178)) - Implement events sdk ([#4176](https://github.com/open-telemetry/opentelemetry-python/pull/4176)) - Update semantic conventions to version 1.28.0