From 3be377c661b103105179e9e8dd7137fc720fda0c Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Fri, 22 Mar 2024 22:26:23 +0000 Subject: [PATCH] Run mypy on opentelemetry-sdk package --- mypy-upgrade.sh | 13 ++ .../sdk/_configuration/__init__.py | 98 ++++---- .../sdk/_logs/_internal/__init__.py | 136 ++++++------ .../sdk/_logs/_internal/export/__init__.py | 82 +++---- .../export/in_memory_log_exporter.py | 8 +- .../sdk/error_handler/__init__.py | 24 +- .../sdk/metrics/_internal/__init__.py | 210 +++++++++--------- .../_internal/_view_instrument_match.py | 64 +++--- .../sdk/metrics/_internal/aggregation.py | 204 +++++++++-------- .../exponential_histogram/buckets.py | 12 +- .../exponential_histogram/mapping/__init__.py | 16 +- .../mapping/exponent_mapping.py | 22 +- .../exponential_histogram/mapping/ieee_754.py | 10 +- .../mapping/logarithm_mapping.py | 30 +-- .../sdk/metrics/_internal/export/__init__.py | 190 ++++++++-------- .../sdk/metrics/_internal/instrument.py | 90 ++++---- .../metrics/_internal/measurement_consumer.py | 24 +- .../_internal/metric_reader_storage.py | 75 ++++--- .../sdk/metrics/_internal/point.py | 90 ++++---- .../metrics/_internal/sdk_configuration.py | 4 +- .../sdk/metrics/_internal/view.py | 12 +- .../sdk/metrics/export/__init__.py | 2 +- .../opentelemetry/sdk/resources/__init__.py | 52 ++--- .../src/opentelemetry/sdk/trace/__init__.py | 162 +++++++------- .../sdk/trace/export/__init__.py | 70 +++--- .../src/opentelemetry/sdk/trace/sampling.py | 24 +- .../src/opentelemetry/sdk/util/__init__.pyi | 4 +- .../opentelemetry/sdk/util/instrumentation.py | 32 +-- .../src/opentelemetry/test/spantestutil.py | 8 +- tox.ini | 12 +- 30 files changed, 910 insertions(+), 870 deletions(-) create mode 100755 mypy-upgrade.sh diff --git a/mypy-upgrade.sh b/mypy-upgrade.sh new file mode 100755 index 00000000000..54f34c5af28 --- /dev/null +++ b/mypy-upgrade.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# this is how i generated the type-ignore fixme comments automatically. +# planning to remove this after or I can move it to scripts dir + +export MYPYPATH=$PWD/opentelemetry-api/src/:$PWD/opentelemetry-sdk/src/:$PWD/tests/opentelemetry-test-utils/src/:$PWD/opentelemetry-semantic-conventions/src/ + +# src +.tox/mypysdk/bin/mypy --namespace-packages --explicit-package-bases --show-error-codes opentelemetry-sdk/src/opentelemetry/ > mypy_report.txt +mypy-upgrade --summarize -r mypy_report.txt --fix-me '' + +# tests +# .tox/mypysdk/bin/mypy --namespace-packages --show-error-codes --config-file=mypy-relaxed.ini opentelemetry-sdk/tests/ > mypy_report.txt +# mypy-upgrade --summarize -r mypy_report.txt --fix-me '' diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py index 33c5147a599..7a0dbd11910 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py @@ -96,11 +96,11 @@ def _import_config_components( for selected_component in selected_components: try: - component_implementations.append( - ( + component_implementations.append( # type: ignore[misc] # + ( # type: ignore[misc] # selected_component, - next( - iter( + next( # type: ignore[misc] # + iter( # type: ignore[misc] # entry_points( group=entry_point_name, name=selected_component ) @@ -121,7 +121,7 @@ def _import_config_components( f"entry point '{entry_point_name}'" ) - return component_implementations + return component_implementations # type: ignore[misc] # def _get_sampler() -> Optional[str]: @@ -132,7 +132,7 @@ def _get_id_generator() -> str: return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR) -def _get_exporter_entry_point( +def _get_exporter_entry_point( # type: ignore[no-untyped-def] # exporter_name: str, signal_type: Literal["traces", "metrics", "logs"] ): if exporter_name not in ( @@ -185,16 +185,16 @@ def _get_exporter_names( return [] return [ - _get_exporter_entry_point(_exporter.strip(), signal_type) + _get_exporter_entry_point(_exporter.strip(), signal_type) # type: ignore[misc] # for _exporter in names.split(",") ] -def _init_tracing( +def _init_tracing( # type: ignore[no-untyped-def] # exporters: Dict[str, Type[SpanExporter]], - id_generator: IdGenerator = None, - sampler: Sampler = None, - resource: Resource = None, + id_generator: IdGenerator = None, # type: ignore[assignment] # + sampler: Sampler = None, # type: ignore[assignment] # + resource: Resource = None, # type: ignore[assignment] # ): provider = TracerProvider( id_generator=id_generator, @@ -204,29 +204,29 @@ def _init_tracing( set_tracer_provider(provider) for _, exporter_class in exporters.items(): - exporter_args = {} + exporter_args = {} # type: ignore[var-annotated] # provider.add_span_processor( - BatchSpanProcessor(exporter_class(**exporter_args)) + BatchSpanProcessor(exporter_class(**exporter_args)) # type: ignore[misc] # ) -def _init_metrics( +def _init_metrics( # type: ignore[no-untyped-def] # exporters_or_readers: Dict[ str, Union[Type[MetricExporter], Type[MetricReader]] ], - resource: Resource = None, + resource: Resource = None, # type: ignore[assignment] # ): metric_readers = [] for _, exporter_or_reader_class in exporters_or_readers.items(): - exporter_args = {} + exporter_args = {} # type: ignore[var-annotated] # if issubclass(exporter_or_reader_class, MetricReader): - metric_readers.append(exporter_or_reader_class(**exporter_args)) + metric_readers.append(exporter_or_reader_class(**exporter_args)) # type: ignore[misc] # else: metric_readers.append( PeriodicExportingMetricReader( - exporter_or_reader_class(**exporter_args) + exporter_or_reader_class(**exporter_args) # type: ignore[misc] # ) ) @@ -234,17 +234,17 @@ def _init_metrics( set_meter_provider(provider) -def _init_logging( +def _init_logging( # type: ignore[no-untyped-def] # exporters: Dict[str, Type[LogExporter]], - resource: Resource = None, + resource: Resource = None, # type: ignore[assignment] # ): provider = LoggerProvider(resource=resource) set_logger_provider(provider) for _, exporter_class in exporters.items(): - exporter_args = {} + exporter_args = {} # type: ignore[var-annotated] # provider.add_log_record_processor( - BatchLogRecordProcessor(exporter_class(**exporter_args)) + BatchLogRecordProcessor(exporter_class(**exporter_args)) # type: ignore[misc] # ) handler = LoggingHandler(level=logging.NOTSET, logger_provider=provider) @@ -266,39 +266,39 @@ def _import_exporters( log_exporters = {} for (exporter_name, exporter_impl,) in _import_config_components( - trace_exporter_names, "opentelemetry_traces_exporter" + trace_exporter_names, "opentelemetry_traces_exporter" # type: ignore[arg-type] # ): - if issubclass(exporter_impl, SpanExporter): + if issubclass(exporter_impl, SpanExporter): # type: ignore[arg-type] # trace_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a trace exporter") for (exporter_name, exporter_impl,) in _import_config_components( - metric_exporter_names, "opentelemetry_metrics_exporter" + metric_exporter_names, "opentelemetry_metrics_exporter" # type: ignore[arg-type] # ): # The metric exporter components may be push MetricExporter or pull exporters which # subclass MetricReader directly - if issubclass(exporter_impl, (MetricExporter, MetricReader)): + if issubclass(exporter_impl, (MetricExporter, MetricReader)): # type: ignore[arg-type] # metric_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a metric exporter") for (exporter_name, exporter_impl,) in _import_config_components( - log_exporter_names, "opentelemetry_logs_exporter" + log_exporter_names, "opentelemetry_logs_exporter" # type: ignore[arg-type] # ): - if issubclass(exporter_impl, LogExporter): + if issubclass(exporter_impl, LogExporter): # type: ignore[arg-type] # log_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a log exporter") - return trace_exporters, metric_exporters, log_exporters + return trace_exporters, metric_exporters, log_exporters # type: ignore[return-value] # def _import_sampler_factory(sampler_name: str) -> Callable[[str], Sampler]: _, sampler_impl = _import_config_components( [sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP )[0] - return sampler_impl + return sampler_impl # type: ignore[return-value] # def _import_sampler(sampler_name: str) -> Optional[Sampler]: @@ -309,7 +309,7 @@ def _import_sampler(sampler_name: str) -> Optional[Sampler]: arg = None if sampler_name in ("traceidratio", "parentbased_traceidratio"): try: - rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) + rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) # type: ignore[arg-type] # except (ValueError, TypeError): _logger.warning( "Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0." @@ -317,9 +317,9 @@ def _import_sampler(sampler_name: str) -> Optional[Sampler]: rate = 1.0 arg = rate else: - arg = os.getenv(OTEL_TRACES_SAMPLER_ARG) + arg = os.getenv(OTEL_TRACES_SAMPLER_ARG) # type: ignore[assignment] # - sampler = sampler_factory(arg) + sampler = sampler_factory(arg) # type: ignore[arg-type] # if not isinstance(sampler, Sampler): message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler." _logger.warning(message) @@ -339,36 +339,36 @@ def _import_id_generator(id_generator_name: str) -> IdGenerator: [id_generator_name.strip()], "opentelemetry_id_generator" )[0] - if issubclass(id_generator_impl, IdGenerator): - return id_generator_impl() + if issubclass(id_generator_impl, IdGenerator): # type: ignore[arg-type] # + return id_generator_impl() # type: ignore[misc, no-any-return, operator] # raise RuntimeError(f"{id_generator_name} is not an IdGenerator") -def _initialize_components(auto_instrumentation_version): +def _initialize_components(auto_instrumentation_version): # type: ignore[no-untyped-def] # trace_exporters, metric_exporters, log_exporters = _import_exporters( _get_exporter_names("traces"), _get_exporter_names("metrics"), _get_exporter_names("logs"), ) sampler_name = _get_sampler() - sampler = _import_sampler(sampler_name) + sampler = _import_sampler(sampler_name) # type: ignore[arg-type] # id_generator_name = _get_id_generator() id_generator = _import_id_generator(id_generator_name) # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name # from the env variable else defaults to "unknown_service" auto_resource = {} # populate version if using auto-instrumentation - if auto_instrumentation_version: - auto_resource[ + if auto_instrumentation_version: # type: ignore[misc] # + auto_resource[ # type: ignore[misc] # ResourceAttributes.TELEMETRY_AUTO_VERSION - ] = auto_instrumentation_version - resource = Resource.create(auto_resource) + ] = auto_instrumentation_version # type: ignore[misc] # + resource = Resource.create(auto_resource) # type: ignore[misc] # _init_tracing( exporters=trace_exporters, id_generator=id_generator, - sampler=sampler, + sampler=sampler, # type: ignore[arg-type] # resource=resource, ) _init_metrics(metric_exporters, resource) @@ -390,20 +390,20 @@ class _BaseConfigurator(ABC): _instance = None _is_instrumented = False - def __new__(cls, *args, **kwargs): + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # if cls._instance is None: - cls._instance = object.__new__(cls, *args, **kwargs) + cls._instance = object.__new__(cls, *args, **kwargs) # type: ignore[misc] # return cls._instance @abstractmethod - def _configure(self, **kwargs): + def _configure(self, **kwargs): # type: ignore[misc, no-untyped-def] # """Configure the SDK""" - def configure(self, **kwargs): + def configure(self, **kwargs): # type: ignore[no-untyped-def] # """Configure the SDK""" - self._configure(**kwargs) + self._configure(**kwargs) # type: ignore[misc, no-untyped-call] # class _OTelSDKConfigurator(_BaseConfigurator): @@ -418,5 +418,5 @@ class _OTelSDKConfigurator(_BaseConfigurator): this Configurator and enhance it as needed. """ - def _configure(self, **kwargs): - _initialize_components(kwargs.get("auto_instrumentation_version")) + def _configure(self, **kwargs): # type: ignore[no-untyped-def] # + _initialize_components(kwargs.get("auto_instrumentation_version")) # type: ignore[misc, no-untyped-call] # diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py index 8ba0dae6f2e..a920e3a9774 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py @@ -33,7 +33,9 @@ get_logger_provider, std_to_otel, ) -from opentelemetry.attributes import BoundedAttributes +from opentelemetry.attributes import ( + BoundedAttributes, # type: ignore[attr-defined] # +) from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, @@ -111,7 +113,7 @@ def __init__( OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ) - def __repr__(self): + def __repr__(self): # type: ignore[no-untyped-def] # return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})" @classmethod @@ -172,7 +174,7 @@ def __init__( limits: Optional[LogLimits] = _UnsetLogLimits, ): super().__init__( - **{ + **{ # type: ignore[misc] # "timestamp": timestamp, "observed_timestamp": observed_timestamp, "trace_id": trace_id, @@ -180,33 +182,33 @@ def __init__( "trace_flags": trace_flags, "severity_text": severity_text, "severity_number": severity_number, - "body": body, - "attributes": BoundedAttributes( - maxlen=limits.max_attributes, + "body": body, # type: ignore[misc] # + "attributes": BoundedAttributes( # type: ignore[misc] # + maxlen=limits.max_attributes, # type: ignore[misc, union-attr] # attributes=attributes if bool(attributes) else None, immutable=False, - max_value_len=limits.max_attribute_length, + max_value_len=limits.max_attribute_length, # type: ignore[misc, union-attr] # ), } ) self.resource = resource def __eq__(self, other: object) -> bool: - if not isinstance(other, LogRecord): + if not isinstance(other, LogRecord): # type: ignore[misc] # return NotImplemented - return self.__dict__ == other.__dict__ + return self.__dict__ == other.__dict__ # type: ignore[misc, no-any-return] # - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return json.dumps( - { - "body": self.body, + { # type: ignore[misc] # + "body": self.body, # type: ignore[misc] # "severity_number": repr(self.severity_number), "severity_text": self.severity_text, - "attributes": dict(self.attributes) + "attributes": dict(self.attributes) # type: ignore[arg-type, misc] # if bool(self.attributes) else None, "dropped_attributes": self.dropped_attributes, - "timestamp": ns_to_iso_str(self.timestamp), + "timestamp": ns_to_iso_str(self.timestamp), # type: ignore[arg-type] # "observed_timestamp": ns_to_iso_str(self.observed_timestamp), "trace_id": f"0x{format_trace_id(self.trace_id)}" if self.trace_id is not None @@ -219,13 +221,13 @@ def to_json(self, indent=4) -> str: if self.resource else "", }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @property def dropped_attributes(self) -> int: if self.attributes: - return self.attributes.dropped + return self.attributes.dropped # type: ignore[attr-defined, misc, no-any-return] # return 0 @@ -250,15 +252,15 @@ class LogRecordProcessor(abc.ABC): """ @abc.abstractmethod - def emit(self, log_data: LogData): + def emit(self, log_data: LogData): # type: ignore[misc, no-untyped-def] # """Emits the `LogData`""" @abc.abstractmethod - def shutdown(self): + def shutdown(self): # type: ignore[misc, no-untyped-def] # """Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown""" @abc.abstractmethod - def force_flush(self, timeout_millis: int = 30000): + def force_flush(self, timeout_millis: int = 30000): # type: ignore[misc, no-untyped-def] # """Export all the received logs to the configured Exporter that have not yet been exported. @@ -281,7 +283,7 @@ class SynchronousMultiLogRecordProcessor(LogRecordProcessor): added. """ - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # # use a tuple to avoid race conditions when adding a new log and # iterating through it on "emit". self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...] @@ -301,7 +303,7 @@ def emit(self, log_data: LogData) -> None: def shutdown(self) -> None: """Shutdown the log processors one by one""" for lp in self._log_record_processors: - lp.shutdown() + lp.shutdown() # type: ignore[no-untyped-call] # def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors one by one @@ -321,7 +323,7 @@ def force_flush(self, timeout_millis: int = 30000) -> bool: if current_ts >= deadline_ns: return False - if not lp.force_flush((deadline_ns - current_ts) // 1000000): + if not lp.force_flush((deadline_ns - current_ts) // 1000000): # type: ignore[misc] # return False return True @@ -349,13 +351,13 @@ def __init__(self, max_workers: int = 2): max_workers=max_workers ) - def add_log_record_processor( + def add_log_record_processor( # type: ignore[no-untyped-def] # self, log_record_processor: LogRecordProcessor ): with self._lock: self._log_record_processors += (log_record_processor,) - def _submit_and_wait( + def _submit_and_wait( # type: ignore[no-untyped-def] # self, func: Callable[[LogRecordProcessor], Callable[..., None]], *args: Any, @@ -363,16 +365,16 @@ def _submit_and_wait( ): futures = [] for lp in self._log_record_processors: - future = self._executor.submit(func(lp), *args, **kwargs) + future = self._executor.submit(func(lp), *args, **kwargs) # type: ignore[misc] # futures.append(future) for future in futures: future.result() - def emit(self, log_data: LogData): - self._submit_and_wait(lambda lp: lp.emit, log_data) + def emit(self, log_data: LogData): # type: ignore[no-untyped-def] # + self._submit_and_wait(lambda lp: lp.emit, log_data) # type: ignore[misc] # - def shutdown(self): - self._submit_and_wait(lambda lp: lp.shutdown) + def shutdown(self): # type: ignore[no-untyped-def] # + self._submit_and_wait(lambda lp: lp.shutdown) # type: ignore[misc] # def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors in parallel. @@ -387,18 +389,18 @@ def force_flush(self, timeout_millis: int = 30000) -> bool: """ futures = [] for lp in self._log_record_processors: - future = self._executor.submit(lp.force_flush, timeout_millis) - futures.append(future) + future = self._executor.submit(lp.force_flush, timeout_millis) # type: ignore[misc] # + futures.append(future) # type: ignore[misc] # - done_futures, not_done_futures = concurrent.futures.wait( - futures, timeout_millis / 1e3 + done_futures, not_done_futures = concurrent.futures.wait( # type: ignore[misc] # + futures, timeout_millis / 1e3 # type: ignore[misc] # ) - if not_done_futures: + if not_done_futures: # type: ignore[misc] # return False - for future in done_futures: - if not future.result(): + for future in done_futures: # type: ignore[misc] # + if not future.result(): # type: ignore[misc] # return False return True @@ -441,40 +443,40 @@ class LoggingHandler(logging.Handler): https://docs.python.org/3/library/logging.html """ - def __init__( + def __init__( # type: ignore[no-untyped-def] # self, level=logging.NOTSET, logger_provider=None, ) -> None: - super().__init__(level=level) - self._logger_provider = logger_provider or get_logger_provider() + super().__init__(level=level) # type: ignore[misc] # + self._logger_provider = logger_provider or get_logger_provider() # type: ignore[misc] # self._logger = get_logger( - __name__, logger_provider=self._logger_provider + __name__, logger_provider=self._logger_provider # type: ignore[misc] # ) @staticmethod def _get_attributes(record: logging.LogRecord) -> Attributes: - attributes = { - k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS + attributes = { # type: ignore[misc] # + k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS # type: ignore[misc] # } # Add standard code attributes for logs. - attributes[SpanAttributes.CODE_FILEPATH] = record.pathname - attributes[SpanAttributes.CODE_FUNCTION] = record.funcName - attributes[SpanAttributes.CODE_LINENO] = record.lineno + attributes[SpanAttributes.CODE_FILEPATH] = record.pathname # type: ignore[misc] # + attributes[SpanAttributes.CODE_FUNCTION] = record.funcName # type: ignore[misc] # + attributes[SpanAttributes.CODE_LINENO] = record.lineno # type: ignore[misc] # if record.exc_info: exctype, value, tb = record.exc_info if exctype is not None: - attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__ - if value is not None and value.args: - attributes[SpanAttributes.EXCEPTION_MESSAGE] = value.args[0] + attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__ # type: ignore[misc] # + if value is not None and value.args: # type: ignore[misc] # + attributes[SpanAttributes.EXCEPTION_MESSAGE] = value.args[0] # type: ignore[misc] # if tb is not None: # https://github.com/open-telemetry/opentelemetry-specification/blob/9fa7c656b26647b27e485a6af7e38dc716eba98a/specification/trace/semantic_conventions/exceptions.md#stacktrace-representation - attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join( + attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join( # type: ignore[misc] # traceback.format_exception(*record.exc_info) ) - return attributes + return attributes # type: ignore[misc] # def _translate(self, record: logging.LogRecord) -> LogRecord: timestamp = int(record.created * 1e9) @@ -539,7 +541,7 @@ def _translate(self, record: logging.LogRecord) -> LogRecord: severity_text=level_name, severity_number=severity_number, body=body, - resource=self._logger.resource, + resource=self._logger.resource, # type: ignore[attr-defined, misc] # attributes=attributes, ) @@ -557,7 +559,7 @@ def flush(self) -> None: Flushes the logging output. Skip flushing if logger is NoOp. """ if not isinstance(self._logger, NoOpLogger): - self._logger_provider.force_flush() + self._logger_provider.force_flush() # type: ignore[misc, union-attr] # class Logger(APILogger): @@ -580,10 +582,10 @@ def __init__( self._instrumentation_scope = instrumentation_scope @property - def resource(self): + def resource(self): # type: ignore[misc, no-untyped-def] # return self._resource - def emit(self, record: LogRecord): + def emit(self, record: LogRecord): # type: ignore[no-untyped-def, override] # """Emits the :class:`LogData` by associating :class:`LogRecord` and instrumentation info. """ @@ -594,29 +596,29 @@ def emit(self, record: LogRecord): class LoggerProvider(APILoggerProvider): def __init__( self, - resource: Resource = None, + resource: Resource = None, # type: ignore[assignment] # shutdown_on_exit: bool = True, multi_log_record_processor: Union[ SynchronousMultiLogRecordProcessor, ConcurrentMultiLogRecordProcessor, - ] = None, + ] = None, # type: ignore[assignment] # ): if resource is None: self._resource = Resource.create({}) else: - self._resource = resource + self._resource = resource # type: ignore[has-type] # self._multi_log_record_processor = ( - multi_log_record_processor or SynchronousMultiLogRecordProcessor() + multi_log_record_processor or SynchronousMultiLogRecordProcessor() # type: ignore[no-untyped-call] # ) disabled = environ.get(OTEL_SDK_DISABLED, "") self._disabled = disabled.lower().strip() == "true" self._at_exit_handler = None if shutdown_on_exit: - self._at_exit_handler = atexit.register(self.shutdown) + self._at_exit_handler = atexit.register(self.shutdown) # type: ignore[misc] # @property - def resource(self): - return self._resource + def resource(self): # type: ignore[misc, no-untyped-def] # + return self._resource # type: ignore[has-type] # def get_logger( self, @@ -626,9 +628,9 @@ def get_logger( ) -> Logger: if self._disabled: _logger.warning("SDK is disabled.") - return NoOpLogger(name, version=version, schema_url=schema_url) + return NoOpLogger(name, version=version, schema_url=schema_url) # type: ignore[return-value] # return Logger( - self._resource, + self._resource, # type: ignore[has-type] # self._multi_log_record_processor, InstrumentationScope( name, @@ -637,7 +639,7 @@ def get_logger( ), ) - def add_log_record_processor( + def add_log_record_processor( # type: ignore[no-untyped-def] # self, log_record_processor: LogRecordProcessor ): """Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance. @@ -648,11 +650,11 @@ def add_log_record_processor( log_record_processor ) - def shutdown(self): + def shutdown(self): # type: ignore[no-untyped-def] # """Shuts down the log processors.""" self._multi_log_record_processor.shutdown() - if self._at_exit_handler is not None: - atexit.unregister(self._at_exit_handler) + if self._at_exit_handler is not None: # type: ignore[misc] # + atexit.unregister(self._at_exit_handler) # type: ignore[misc] # self._at_exit_handler = None def force_flush(self, timeout_millis: int = 30000) -> bool: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py index 597c55a6725..efe3188bb51 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py @@ -65,7 +65,7 @@ class LogExporter(abc.ABC): """ @abc.abstractmethod - def export(self, batch: Sequence[LogData]): + def export(self, batch: Sequence[LogData]): # type: ignore[misc, no-untyped-def] # """Exports a batch of logs. Args: @@ -76,7 +76,7 @@ def export(self, batch: Sequence[LogData]): """ @abc.abstractmethod - def shutdown(self): + def shutdown(self): # type: ignore[misc, no-untyped-def] # """Shuts down the exporter. Called when the SDK is shut down. @@ -93,20 +93,20 @@ class ConsoleLogExporter(LogExporter): def __init__( self, - out: IO = sys.stdout, + out: IO = sys.stdout, # type: ignore[type-arg] # formatter: Callable[[LogRecord], str] = lambda record: record.to_json() + linesep, ): - self.out = out + self.out = out # type: ignore[misc] # self.formatter = formatter - def export(self, batch: Sequence[LogData]): + def export(self, batch: Sequence[LogData]): # type: ignore[no-untyped-def] # for data in batch: - self.out.write(self.formatter(data.log_record)) - self.out.flush() + self.out.write(self.formatter(data.log_record)) # type: ignore[misc] # + self.out.flush() # type: ignore[misc] # return LogExportResult.SUCCESS - def shutdown(self): + def shutdown(self): # type: ignore[no-untyped-def] # pass @@ -120,7 +120,7 @@ def __init__(self, exporter: LogExporter): self._exporter = exporter self._shutdown = False - def emit(self, log_data: LogData): + def emit(self, log_data: LogData): # type: ignore[no-untyped-def] # if self._shutdown: _logger.warning("Processor is already shutdown, ignoring call") return @@ -131,9 +131,9 @@ def emit(self, log_data: LogData): _logger.exception("Exception while exporting logs.") detach(token) - def shutdown(self): + def shutdown(self): # type: ignore[no-untyped-def] # self._shutdown = True - self._exporter.shutdown() + self._exporter.shutdown() # type: ignore[no-untyped-call] # def force_flush( self, timeout_millis: int = 30000 @@ -144,7 +144,7 @@ def force_flush( class _FlushRequest: __slots__ = ["event", "num_log_records"] - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # self.event = threading.Event() self.num_log_records = 0 @@ -173,10 +173,10 @@ class BatchLogRecordProcessor(LogRecordProcessor): def __init__( self, exporter: LogExporter, - schedule_delay_millis: float = None, - max_export_batch_size: int = None, - export_timeout_millis: float = None, - max_queue_size: int = None, + schedule_delay_millis: float = None, # type: ignore[assignment] # + max_export_batch_size: int = None, # type: ignore[assignment] # + export_timeout_millis: float = None, # type: ignore[assignment] # + max_queue_size: int = None, # type: ignore[assignment] # ): if max_queue_size is None: max_queue_size = BatchLogRecordProcessor._default_max_queue_size() @@ -196,7 +196,7 @@ def __init__( BatchLogRecordProcessor._default_export_timeout_millis() ) - BatchLogRecordProcessor._validate_arguments( + BatchLogRecordProcessor._validate_arguments( # type: ignore[no-untyped-call] # max_queue_size, schedule_delay_millis, max_export_batch_size ) @@ -208,7 +208,7 @@ def __init__( self._queue = collections.deque([], max_queue_size) self._worker_thread = threading.Thread( name="OtelBatchLogRecordProcessor", - target=self.worker, + target=self.worker, # type: ignore[misc] # daemon=True, ) self._condition = threading.Condition(threading.Lock()) @@ -218,22 +218,22 @@ def __init__( self._worker_thread.start() if hasattr(os, "register_at_fork"): os.register_at_fork( - after_in_child=self._at_fork_reinit + after_in_child=self._at_fork_reinit # type: ignore[misc] # ) # pylint: disable=protected-access self._pid = os.getpid() - def _at_fork_reinit(self): + def _at_fork_reinit(self): # type: ignore[no-untyped-def] # self._condition = threading.Condition(threading.Lock()) self._queue.clear() self._worker_thread = threading.Thread( name="OtelBatchLogRecordProcessor", - target=self.worker, + target=self.worker, # type: ignore[misc] # daemon=True, ) self._worker_thread.start() self._pid = os.getpid() - def worker(self): + def worker(self): # type: ignore[no-untyped-def] # timeout = self._schedule_delay_millis / 1e3 flush_request: Optional[_FlushRequest] = None while not self._shutdown: @@ -274,11 +274,11 @@ def worker(self): shutdown_flush_request = self._get_and_unset_flush_request() # flush the remaining logs - self._drain_queue() + self._drain_queue() # type: ignore[no-untyped-call] # self._notify_flush_request_finished(flush_request) self._notify_flush_request_finished(shutdown_flush_request) - def _export(self, flush_request: Optional[_FlushRequest] = None): + def _export(self, flush_request: Optional[_FlushRequest] = None): # type: ignore[no-untyped-def] # """Exports logs considering the given flush_request. If flush_request is not None then logs are exported in batches @@ -317,7 +317,7 @@ def _export_batch(self) -> int: self._log_records[index] = None return idx - def _drain_queue(self): + def _drain_queue(self): # type: ignore[no-untyped-def] # """Export all elements until queue is empty. Can only be called from the worker thread context because it invokes @@ -334,7 +334,7 @@ def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]: return flush_request @staticmethod - def _notify_flush_request_finished( + def _notify_flush_request_finished( # type: ignore[misc, no-untyped-def] # flush_request: Optional[_FlushRequest] = None, ): if flush_request is not None: @@ -342,7 +342,7 @@ def _notify_flush_request_finished( def _get_or_create_flush_request(self) -> _FlushRequest: if self._flush_request is None: - self._flush_request = _FlushRequest() + self._flush_request = _FlushRequest() # type: ignore[no-untyped-call] # return self._flush_request def emit(self, log_data: LogData) -> None: @@ -352,23 +352,23 @@ def emit(self, log_data: LogData) -> None: if self._shutdown: return if self._pid != os.getpid(): - _BSP_RESET_ONCE.do_once(self._at_fork_reinit) + _BSP_RESET_ONCE.do_once(self._at_fork_reinit) # type: ignore[misc] # self._queue.appendleft(log_data) if len(self._queue) >= self._max_export_batch_size: with self._condition: self._condition.notify() - def shutdown(self): + def shutdown(self): # type: ignore[no-untyped-def] # self._shutdown = True with self._condition: self._condition.notify_all() self._worker_thread.join() - self._exporter.shutdown() + self._exporter.shutdown() # type: ignore[no-untyped-call] # def force_flush(self, timeout_millis: Optional[int] = None) -> bool: if timeout_millis is None: - timeout_millis = self._export_timeout_millis + timeout_millis = self._export_timeout_millis # type: ignore[assignment] # if self._shutdown: return True @@ -376,13 +376,13 @@ def force_flush(self, timeout_millis: Optional[int] = None) -> bool: flush_request = self._get_or_create_flush_request() self._condition.notify_all() - ret = flush_request.event.wait(timeout_millis / 1e3) + ret = flush_request.event.wait(timeout_millis / 1e3) # type: ignore[operator] # if not ret: _logger.warning("Timeout was exceeded in force_flush().") return ret @staticmethod - def _default_max_queue_size(): + def _default_max_queue_size(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) @@ -396,7 +396,7 @@ def _default_max_queue_size(): return _DEFAULT_MAX_QUEUE_SIZE @staticmethod - def _default_schedule_delay_millis(): + def _default_schedule_delay_millis(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -412,7 +412,7 @@ def _default_schedule_delay_millis(): return _DEFAULT_SCHEDULE_DELAY_MILLIS @staticmethod - def _default_max_export_batch_size(): + def _default_max_export_batch_size(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -429,7 +429,7 @@ def _default_max_export_batch_size(): return _DEFAULT_MAX_EXPORT_BATCH_SIZE @staticmethod - def _default_export_timeout_millis(): + def _default_export_timeout_millis(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -445,21 +445,21 @@ def _default_export_timeout_millis(): return _DEFAULT_EXPORT_TIMEOUT_MILLIS @staticmethod - def _validate_arguments( + def _validate_arguments( # type: ignore[misc, no-untyped-def] # max_queue_size, schedule_delay_millis, max_export_batch_size ): - if max_queue_size <= 0: + if max_queue_size <= 0: # type: ignore[misc] # raise ValueError("max_queue_size must be a positive integer.") - if schedule_delay_millis <= 0: + if schedule_delay_millis <= 0: # type: ignore[misc] # raise ValueError("schedule_delay_millis must be positive.") - if max_export_batch_size <= 0: + if max_export_batch_size <= 0: # type: ignore[misc] # raise ValueError( "max_export_batch_size must be a positive integer." ) - if max_export_batch_size > max_queue_size: + if max_export_batch_size > max_queue_size: # type: ignore[misc] # raise ValueError( "max_export_batch_size must be less than or equal to max_queue_size." ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py index 68cb6b7389a..daf409d0e7c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py @@ -27,24 +27,24 @@ class InMemoryLogExporter(LogExporter): :func:`.get_finished_logs` method. """ - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # self._logs = [] self._lock = threading.Lock() self._stopped = False def clear(self) -> None: with self._lock: - self._logs.clear() + self._logs.clear() # type: ignore[misc] # def get_finished_logs(self) -> typing.Tuple[LogData, ...]: with self._lock: - return tuple(self._logs) + return tuple(self._logs) # type: ignore[misc] # def export(self, batch: typing.Sequence[LogData]) -> LogExportResult: if self._stopped: return LogExportResult.FAILURE with self._lock: - self._logs.extend(batch) + self._logs.extend(batch) # type: ignore[misc] # return LogExportResult.SUCCESS def shutdown(self) -> None: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/__init__.py index 7b21d92d2af..ac4a20bc6aa 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/__init__.py @@ -69,7 +69,7 @@ def _handle(self, error: Exception, *args, **kwargs): class ErrorHandler(ABC): @abstractmethod - def _handle(self, error: Exception, *args, **kwargs): + def _handle(self, error: Exception, *args, **kwargs): # type: ignore[misc, no-untyped-def] # """ Handle an exception """ @@ -83,7 +83,7 @@ class _DefaultErrorHandler(ErrorHandler): """ # pylint: disable=useless-return - def _handle(self, error: Exception, *args, **kwargs): + def _handle(self, error: Exception, *args, **kwargs): # type: ignore[no-untyped-def] # logger.exception("Error handled by default error handler: ") return None @@ -106,13 +106,13 @@ def __new__(cls) -> "GlobalErrorHandler": return cls._instance - def __enter__(self): + def __enter__(self): # type: ignore[no-untyped-def] # pass # pylint: disable=no-self-use - def __exit__(self, exc_type, exc_value, traceback): + def __exit__(self, exc_type, exc_value, traceback): # type: ignore[no-untyped-def] # - if exc_value is None: + if exc_value is None: # type: ignore[misc] # return None @@ -122,15 +122,15 @@ def __exit__(self, exc_type, exc_value, traceback): group="opentelemetry_error_handler" ) - for error_handler_entry_point in error_handler_entry_points: + for error_handler_entry_point in error_handler_entry_points: # type: ignore[misc] # - error_handler_class = error_handler_entry_point.load() + error_handler_class = error_handler_entry_point.load() # type: ignore[misc] # - if issubclass(error_handler_class, exc_value.__class__): + if issubclass(error_handler_class, exc_value.__class__): # type: ignore[misc] # try: - error_handler_class()._handle(exc_value) + error_handler_class()._handle(exc_value) # type: ignore[misc] # plugin_handled = True # pylint: disable=broad-except @@ -140,12 +140,12 @@ def __exit__(self, exc_type, exc_value, traceback): "%s error while handling error" " %s by error handler %s", error_handling_error.__class__.__name__, - exc_value.__class__.__name__, - error_handler_class.__name__, + exc_value.__class__.__name__, # type: ignore[misc] # + error_handler_class.__name__, # type: ignore[misc] # ) if not plugin_handled: - _DefaultErrorHandler()._handle(exc_value) + _DefaultErrorHandler()._handle(exc_value) # type: ignore[misc] # return True diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py index 908d8f81cf8..ba92b37a03c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py @@ -73,15 +73,17 @@ def __init__( ) self._instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer - self._instrument_id_instrument = {} + self._instrument_id_instrument = {} # type: ignore[var-annotated] # self._instrument_id_instrument_lock = Lock() - def create_counter(self, name, unit="", description="") -> APICounter: + def create_counter(self, name, unit="", description="") -> APICounter: # type: ignore[no-untyped-def] # ( is_instrument_registered, instrument_id, - ) = self._is_instrument_registered(name, _Counter, unit, description) + ) = self._is_instrument_registered( + name, _Counter, unit, description + ) # type: ignore[misc] # if is_instrument_registered: # FIXME #2558 go through all views here and check if this @@ -90,27 +92,27 @@ def create_counter(self, name, unit="", description="") -> APICounter: _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APICounter.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _Counter( - name, + instrument = _Counter( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_up_down_counter( + def create_up_down_counter( # type: ignore[no-untyped-def] # self, name, unit="", description="" ) -> APIUpDownCounter: @@ -118,7 +120,7 @@ def create_up_down_counter( is_instrument_registered, instrument_id, ) = self._is_instrument_registered( - name, _UpDownCounter, unit, description + name, _UpDownCounter, unit, description # type: ignore[misc] # ) if is_instrument_registered: @@ -128,27 +130,27 @@ def create_up_down_counter( _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIUpDownCounter.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _UpDownCounter( - name, + instrument = _UpDownCounter( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_observable_counter( + def create_observable_counter( # type: ignore[no-untyped-def] # self, name, callbacks=None, unit="", description="" ) -> APIObservableCounter: @@ -156,7 +158,7 @@ def create_observable_counter( is_instrument_registered, instrument_id, ) = self._is_instrument_registered( - name, _ObservableCounter, unit, description + name, _ObservableCounter, unit, description # type: ignore[misc] # ) if is_instrument_registered: @@ -166,35 +168,37 @@ def create_observable_counter( _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIObservableCounter.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _ObservableCounter( - name, + instrument = _ObservableCounter( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - callbacks, - unit, - description, + callbacks, # type: ignore[misc] # + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_histogram(self, name, unit="", description="") -> APIHistogram: + def create_histogram(self, name, unit="", description="") -> APIHistogram: # type: ignore[no-untyped-def] # ( is_instrument_registered, instrument_id, - ) = self._is_instrument_registered(name, _Histogram, unit, description) + ) = self._is_instrument_registered( + name, _Histogram, unit, description + ) # type: ignore[misc] # if is_instrument_registered: # FIXME #2558 go through all views here and check if this @@ -203,31 +207,33 @@ def create_histogram(self, name, unit="", description="") -> APIHistogram: _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIHistogram.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _Histogram( - name, + instrument = _Histogram( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_gauge(self, name, unit="", description="") -> APIGauge: + def create_gauge(self, name, unit="", description="") -> APIGauge: # type: ignore[no-untyped-def] # ( is_instrument_registered, instrument_id, - ) = self._is_instrument_registered(name, _Gauge, unit, description) + ) = self._is_instrument_registered( + name, _Gauge, unit, description + ) # type: ignore[misc] # if is_instrument_registered: # FIXME #2558 go through all views here and check if this @@ -236,27 +242,27 @@ def create_gauge(self, name, unit="", description="") -> APIGauge: _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIGauge.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _Gauge( - name, + instrument = _Gauge( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_observable_gauge( + def create_observable_gauge( # type: ignore[no-untyped-def] # self, name, callbacks=None, unit="", description="" ) -> APIObservableGauge: @@ -264,7 +270,7 @@ def create_observable_gauge( is_instrument_registered, instrument_id, ) = self._is_instrument_registered( - name, _ObservableGauge, unit, description + name, _ObservableGauge, unit, description # type: ignore[misc] # ) if is_instrument_registered: @@ -274,30 +280,30 @@ def create_observable_gauge( _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIObservableGauge.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _ObservableGauge( - name, + instrument = _ObservableGauge( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - callbacks, - unit, - description, + callbacks, # type: ignore[misc] # + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument - def create_observable_up_down_counter( + def create_observable_up_down_counter( # type: ignore[no-untyped-def] # self, name, callbacks=None, unit="", description="" ) -> APIObservableUpDownCounter: @@ -305,7 +311,7 @@ def create_observable_up_down_counter( is_instrument_registered, instrument_id, ) = self._is_instrument_registered( - name, _ObservableUpDownCounter, unit, description + name, _ObservableUpDownCounter, unit, description # type: ignore[misc] # ) if is_instrument_registered: @@ -315,27 +321,27 @@ def create_observable_up_down_counter( _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already.", - name, + name, # type: ignore[misc] # APIObservableUpDownCounter.__name__, - unit, - description, + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) with self._instrument_id_instrument_lock: - return self._instrument_id_instrument[instrument_id] + return self._instrument_id_instrument[instrument_id] # type: ignore[misc, no-any-return] # - instrument = _ObservableUpDownCounter( - name, + instrument = _ObservableUpDownCounter( # type: ignore[no-untyped-call] # + name, # type: ignore[misc] # self._instrumentation_scope, self._measurement_consumer, - callbacks, - unit, - description, + callbacks, # type: ignore[misc] # + unit, # type: ignore[misc] # + description, # type: ignore[misc] # ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: - self._instrument_id_instrument[instrument_id] = instrument + self._instrument_id_instrument[instrument_id] = instrument # type: ignore[misc] # return instrument @@ -372,16 +378,16 @@ class MeterProvider(APIMeterProvider): """ _all_metric_readers_lock = Lock() - _all_metric_readers = set() + _all_metric_readers = set() # type: ignore[var-annotated] # def __init__( self, - metric_readers: Sequence[ + metric_readers: Sequence[ # type: ignore[name-defined] # "opentelemetry.sdk.metrics.export.MetricReader" ] = (), - resource: Resource = None, + resource: Resource = None, # type: ignore[assignment] # shutdown_on_exit: bool = True, - views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), + views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), # type: ignore[name-defined] # ): self._lock = Lock() self._meter_lock = Lock() @@ -400,22 +406,22 @@ def __init__( self._disabled = disabled.lower().strip() == "true" if shutdown_on_exit: - self._atexit_handler = register(self.shutdown) + self._atexit_handler = register(self.shutdown) # type: ignore[misc] # - self._meters = {} + self._meters = {} # type: ignore[var-annotated] # self._shutdown_once = Once() self._shutdown = False for metric_reader in self._sdk_config.metric_readers: with self._all_metric_readers_lock: - if metric_reader in self._all_metric_readers: + if metric_reader in self._all_metric_readers: # type: ignore[misc] # raise Exception( f"MetricReader {metric_reader} has been registered " "already in other MeterProvider instance" ) - self._all_metric_readers.add(metric_reader) + self._all_metric_readers.add(metric_reader) # type: ignore[misc] # metric_reader._set_collect_callback( self._measurement_consumer.collect @@ -446,7 +452,7 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: metric_reader_error_string = "\n".join( [ - f"{metric_reader.__class__.__name__}: {repr(error)}" + f"{metric_reader.__class__.__name__}: {repr(error)}" # type: ignore[misc] # for metric_reader, error in metric_reader_error.items() ] ) @@ -458,13 +464,13 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: ) return True - def shutdown(self, timeout_millis: float = 30_000): + def shutdown(self, timeout_millis: float = 30_000): # type: ignore[no-untyped-def] # deadline_ns = time_ns() + timeout_millis * 10**6 - def _shutdown(): + def _shutdown(): # type: ignore[no-untyped-def] # self._shutdown = True - did_shutdown = self._shutdown_once.do_once(_shutdown) + did_shutdown = self._shutdown_once.do_once(_shutdown) # type: ignore[misc] # if not did_shutdown: _logger.warning("shutdown can only be called once") @@ -488,15 +494,15 @@ def _shutdown(): metric_reader_error[metric_reader] = error - if self._atexit_handler is not None: - unregister(self._atexit_handler) + if self._atexit_handler is not None: # type: ignore[misc] # + unregister(self._atexit_handler) # type: ignore[misc] # self._atexit_handler = None if metric_reader_error: metric_reader_error_string = "\n".join( [ - f"{metric_reader.__class__.__name__}: {repr(error)}" + f"{metric_reader.__class__.__name__}: {repr(error)}" # type: ignore[misc] # for metric_reader, error in metric_reader_error.items() ] ) @@ -518,25 +524,25 @@ def get_meter( if self._disabled: _logger.warning("SDK is disabled.") - return NoOpMeter(name, version=version, schema_url=schema_url) + return NoOpMeter(name, version=version, schema_url=schema_url) # type: ignore[return-value] # if self._shutdown: _logger.warning( "A shutdown `MeterProvider` can not provide a `Meter`" ) - return NoOpMeter(name, version=version, schema_url=schema_url) + return NoOpMeter(name, version=version, schema_url=schema_url) # type: ignore[return-value] # if not name: _logger.warning("Meter name cannot be None or empty.") - return NoOpMeter(name, version=version, schema_url=schema_url) + return NoOpMeter(name, version=version, schema_url=schema_url) # type: ignore[return-value] # info = InstrumentationScope(name, version, schema_url) with self._meter_lock: - if not self._meters.get(info): + if not self._meters.get(info): # type: ignore[misc] # # FIXME #2558 pass SDKConfig object to meter so that the meter # has access to views. - self._meters[info] = Meter( + self._meters[info] = Meter( # type: ignore[misc] # info, self._measurement_consumer, ) - return self._meters[info] + return self._meters[info] # type: ignore[misc, no-any-return] # diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py index 7dd7f58f272..11218c38a64 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py @@ -25,7 +25,9 @@ _Aggregation, _SumAggregation, ) -from opentelemetry.sdk.metrics._internal.export import AggregationTemporality +from opentelemetry.sdk.metrics._internal.export import ( + AggregationTemporality, # type: ignore[attr-defined] # +) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import DataPointT from opentelemetry.sdk.metrics._internal.view import View @@ -43,15 +45,15 @@ def __init__( self._start_time_unix_nano = time_ns() self._view = view self._instrument = instrument - self._attributes_aggregation: Dict[frozenset, _Aggregation] = {} + self._attributes_aggregation: Dict[frozenset, _Aggregation] = {} # type: ignore[type-arg] # self._lock = Lock() self._instrument_class_aggregation = instrument_class_aggregation - self._name = self._view._name or self._instrument.name + self._name = self._view._name or self._instrument.name # type: ignore[attr-defined, misc] # self._description = ( - self._view._description or self._instrument.description + self._view._description or self._instrument.description # type: ignore[attr-defined, misc] # ) if not isinstance(self._view._aggregation, DefaultAggregation): - self._aggregation = self._view._aggregation._create_aggregation( + self._aggregation = self._view._aggregation._create_aggregation( # type: ignore[misc] # self._instrument, None, 0 ) else: @@ -63,22 +65,22 @@ def conflicts(self, other: "_ViewInstrumentMatch") -> bool: # pylint: disable=protected-access result = ( - self._name == other._name - and self._instrument.unit == other._instrument.unit + self._name == other._name # type: ignore[misc] # + and self._instrument.unit == other._instrument.unit # type: ignore[attr-defined, misc] # # The aggregation class is being used here instead of data point # type since they are functionally equivalent. - and self._aggregation.__class__ == other._aggregation.__class__ + and self._aggregation.__class__ == other._aggregation.__class__ # type: ignore[misc] # ) - if isinstance(self._aggregation, _SumAggregation): + if isinstance(self._aggregation, _SumAggregation): # type: ignore[misc] # result = ( - result - and self._aggregation._instrument_is_monotonic - == other._aggregation._instrument_is_monotonic - and self._aggregation._instrument_aggregation_temporality - == other._aggregation._instrument_aggregation_temporality + result # type: ignore[misc] # + and self._aggregation._instrument_is_monotonic # type: ignore[misc] # + == other._aggregation._instrument_is_monotonic # type: ignore[attr-defined, misc] # + and self._aggregation._instrument_aggregation_temporality # type: ignore[misc] # + == other._aggregation._instrument_aggregation_temporality # type: ignore[attr-defined, misc] # ) - return result + return result # type: ignore[misc] # # pylint: disable=protected-access def consume_measurement(self, measurement: Measurement) -> None: @@ -91,24 +93,22 @@ def consume_measurement(self, measurement: Measurement) -> None: if key in self._view._attribute_keys: attributes[key] = value elif measurement.attributes is not None: - attributes = measurement.attributes + attributes = measurement.attributes # type: ignore[assignment] # else: attributes = {} aggr_key = frozenset(attributes.items()) - if aggr_key not in self._attributes_aggregation: + if aggr_key not in self._attributes_aggregation: # type: ignore[misc] # with self._lock: - if aggr_key not in self._attributes_aggregation: + if aggr_key not in self._attributes_aggregation: # type: ignore[misc] # if not isinstance( self._view._aggregation, DefaultAggregation ): - aggregation = ( - self._view._aggregation._create_aggregation( - self._instrument, - attributes, - self._start_time_unix_nano, - ) + aggregation = self._view._aggregation._create_aggregation( # type: ignore[misc] # + self._instrument, + attributes, + self._start_time_unix_nano, ) else: aggregation = self._instrument_class_aggregation[ @@ -118,11 +118,11 @@ def consume_measurement(self, measurement: Measurement) -> None: attributes, self._start_time_unix_nano, ) - self._attributes_aggregation[aggr_key] = aggregation + self._attributes_aggregation[aggr_key] = aggregation # type: ignore[misc] # - self._attributes_aggregation[aggr_key].aggregate(measurement) + self._attributes_aggregation[aggr_key].aggregate(measurement) # type: ignore[misc] # - def collect( + def collect( # type: ignore[no-any-unimported] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nanos: int, @@ -130,12 +130,12 @@ def collect( data_points: List[DataPointT] = [] with self._lock: - for aggregation in self._attributes_aggregation.values(): - data_point = aggregation.collect( - collection_aggregation_temporality, collection_start_nanos + for aggregation in self._attributes_aggregation.values(): # type: ignore[misc] # + data_point = aggregation.collect( # type: ignore[misc] # + collection_aggregation_temporality, collection_start_nanos # type: ignore[misc] # ) - if data_point is not None: - data_points.append(data_point) + if data_point is not None: # type: ignore[misc] # + data_points.append(data_point) # type: ignore[misc] # # Returning here None instead of an empty list because the caller # does not consume a sequence and to be consistent with the rest of diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 3ec37473f60..7bc8a736284 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -95,11 +95,11 @@ def collect( pass -class _DropAggregation(_Aggregation): +class _DropAggregation(_Aggregation): # type: ignore[type-arg] # def aggregate(self, measurement: Measurement) -> None: pass - def collect( + def collect( # type: ignore[override] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, @@ -107,7 +107,7 @@ def collect( pass -class _SumAggregation(_Aggregation[Sum]): +class _SumAggregation(_Aggregation[Sum]): # type: ignore[type-var] # def __init__( self, attributes: Attributes, @@ -131,11 +131,11 @@ def __init__( def aggregate(self, measurement: Measurement) -> None: with self._lock: if self._current_value is None: - self._current_value = 0 + self._current_value = 0 # type: ignore[assignment] # - self._current_value = self._current_value + measurement.value + self._current_value = self._current_value + measurement.value # type: ignore[assignment, operator] # - def collect( + def collect( # type: ignore[override] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, @@ -342,16 +342,16 @@ def collect( ) -class _LastValueAggregation(_Aggregation[GaugePoint]): +class _LastValueAggregation(_Aggregation[GaugePoint]): # type: ignore[type-var] # def __init__(self, attributes: Attributes): super().__init__(attributes) self._value = None - def aggregate(self, measurement: Measurement): + def aggregate(self, measurement: Measurement): # type: ignore[no-untyped-def] # with self._lock: - self._value = measurement.value + self._value = measurement.value # type: ignore[assignment] # - def collect( + def collect( # type: ignore[override] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, @@ -373,7 +373,7 @@ def collect( ) -class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]): +class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]): # type: ignore[type-var] # def __init__( self, attributes: Attributes, @@ -425,19 +425,19 @@ def _get_empty_bucket_counts(self) -> List[int]: def aggregate(self, measurement: Measurement) -> None: with self._lock: if self._current_value is None: - self._current_value = self._get_empty_bucket_counts() + self._current_value = self._get_empty_bucket_counts() # type: ignore[assignment] # value = measurement.value - self._sum += value + self._sum += value # type: ignore[assignment] # if self._record_min_max: self._min = min(self._min, value) self._max = max(self._max, value) - self._current_value[bisect_left(self._boundaries, value)] += 1 + self._current_value[bisect_left(self._boundaries, value)] += 1 # type: ignore[index, misc] # - def collect( + def collect( # type: ignore[override] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, @@ -504,7 +504,7 @@ def collect( self._previous_max = max(max_, self._previous_max) self._previous_sum = sum_ + self._previous_sum - return HistogramDataPoint( + return HistogramDataPoint( # type: ignore[return-value] # attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, @@ -520,7 +520,7 @@ def collect( # pylint: disable=protected-access -class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): +class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): # type: ignore[type-var] # # _min_max_size and _max_max_size are the smallest and largest values # the max_size parameter may have, respectively. @@ -578,10 +578,10 @@ def __init__( self._max = -inf # _positive holds the positive values. - self._positive = Buckets() + self._positive = Buckets() # type: ignore[no-untyped-call] # # _negative holds the negative values by their absolute value. - self._negative = Buckets() + self._negative = Buckets() # type: ignore[no-untyped-call] # # _mapping corresponds to the current scale, is shared by both the # positive and negative buckets. @@ -632,7 +632,7 @@ def aggregate(self, measurement: Measurement) -> None: # zero count. return - self._sum += value + self._sum += value # type: ignore[assignment] # # 1. Use the positive buckets for positive values and the negative # buckets for negative values. @@ -679,7 +679,7 @@ def aggregate(self, measurement: Measurement) -> None: if is_rescaling_needed: self._downscale( - self._get_scale_change(low, high), + self._get_scale_change(low, high), # type: ignore[misc, no-untyped-call] # self._positive, self._negative, ) @@ -692,7 +692,7 @@ def aggregate(self, measurement: Measurement) -> None: if index < buckets.index_start: span = buckets.index_end - index - if span >= len(buckets.counts): + if span >= len(buckets.counts): # type: ignore[misc] # buckets.grow(span + 1, self._max_size) buckets.index_start = index @@ -700,7 +700,7 @@ def aggregate(self, measurement: Measurement) -> None: elif index > buckets.index_end: span = index - buckets.index_start - if span >= len(buckets.counts): + if span >= len(buckets.counts): # type: ignore[misc] # buckets.grow(span + 1, self._max_size) buckets.index_end = index @@ -709,12 +709,12 @@ def aggregate(self, measurement: Measurement) -> None: bucket_index = index - buckets.index_base if bucket_index < 0: - bucket_index += len(buckets.counts) + bucket_index += len(buckets.counts) # type: ignore[misc] # # 7. Increment the bucket. buckets.increment_bucket(bucket_index) - def collect( + def collect( # type: ignore[override] # self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, @@ -736,10 +736,10 @@ def collect( current_sum = self._sum current_max = self._max if current_max == -inf: - current_max = None + current_max = None # type: ignore[assignment] # current_min = self._min if current_min == inf: - current_min = None + current_min = None # type: ignore[assignment] # if self._count == self._zero_count: current_scale = 0 @@ -747,8 +747,8 @@ def collect( else: current_scale = self._mapping.scale - self._negative = Buckets() - self._positive = Buckets() + self._negative = Buckets() # type: ignore[no-untyped-call] # + self._positive = Buckets() # type: ignore[no-untyped-call] # self._start_time_unix_nano = collection_start_nano self._sum = 0 self._count = 0 @@ -766,11 +766,11 @@ def collect( zero_count=current_zero_count, positive=BucketsPoint( offset=current_positive.offset, - bucket_counts=current_positive.counts, + bucket_counts=current_positive.counts, # type: ignore[misc] # ), negative=BucketsPoint( offset=current_negative.offset, - bucket_counts=current_negative.counts, + bucket_counts=current_negative.counts, # type: ignore[misc] # ), # FIXME: Find the right value for flags flags=0, @@ -782,17 +782,15 @@ def collect( self._instrument_aggregation_temporality is collection_aggregation_temporality ): - self._previous_scale = current_scale - self._previous_start_time_unix_nano = ( - current_start_time_unix_nano - ) - self._previous_max = current_max - self._previous_min = current_min - self._previous_sum = current_sum - self._previous_positive = current_positive - self._previous_negative = current_negative + self._previous_scale = current_scale # type: ignore[assignment] # + self._previous_start_time_unix_nano = current_start_time_unix_nano # type: ignore[assignment] # + self._previous_max = current_max # type: ignore[assignment] # + self._previous_min = current_min # type: ignore[assignment] # + self._previous_sum = current_sum # type: ignore[assignment] # + self._previous_positive = current_positive # type: ignore[assignment] # + self._previous_negative = current_negative # type: ignore[assignment] # - return current_point + return current_point # type: ignore[return-value] # min_scale = min(self._previous_scale, current_scale) @@ -896,52 +894,52 @@ def collect( return current_point - def _get_low_high_previous_current( + def _get_low_high_previous_current( # type: ignore[no-untyped-def] # self, previous_point_buckets, current_point_buckets, min_scale ): - (previous_point_low, previous_point_high) = self._get_low_high( - previous_point_buckets, min_scale + (previous_point_low, previous_point_high) = self._get_low_high( # type: ignore[misc, no-untyped-call] # + previous_point_buckets, min_scale # type: ignore[misc] # ) - (current_point_low, current_point_high) = self._get_low_high( - current_point_buckets, min_scale + (current_point_low, current_point_high) = self._get_low_high( # type: ignore[misc, no-untyped-call] # + current_point_buckets, min_scale # type: ignore[misc] # ) - if current_point_low > current_point_high: - low = previous_point_low - high = previous_point_high + if current_point_low > current_point_high: # type: ignore[misc] # + low = previous_point_low # type: ignore[misc] # + high = previous_point_high # type: ignore[misc] # - elif previous_point_low > previous_point_high: - low = current_point_low - high = current_point_high + elif previous_point_low > previous_point_high: # type: ignore[misc] # + low = current_point_low # type: ignore[misc] # + high = current_point_high # type: ignore[misc] # else: - low = min(previous_point_low, current_point_low) - high = max(previous_point_high, current_point_high) + low = min(previous_point_low, current_point_low) # type: ignore[misc] # + high = max(previous_point_high, current_point_high) # type: ignore[misc] # - return low, high + return low, high # type: ignore[misc] # - def _get_low_high(self, buckets, min_scale): - if buckets.counts == [0]: + def _get_low_high(self, buckets, min_scale): # type: ignore[no-untyped-def] # + if buckets.counts == [0]: # type: ignore[misc] # return 0, -1 - shift = self._mapping._scale - min_scale + shift = self._mapping._scale - min_scale # type: ignore[misc] # - return buckets.index_start >> shift, buckets.index_end >> shift + return buckets.index_start >> shift, buckets.index_end >> shift # type: ignore[misc] # - def _get_scale_change(self, low, high): + def _get_scale_change(self, low, high): # type: ignore[no-untyped-def] # change = 0 - while high - low >= self._max_size: - high = high >> 1 - low = low >> 1 + while high - low >= self._max_size: # type: ignore[misc] # + high = high >> 1 # type: ignore[misc] # + low = low >> 1 # type: ignore[misc] # change += 1 return change - def _downscale(self, change: int, positive, negative): + def _downscale(self, change: int, positive, negative): # type: ignore[no-untyped-def] # if change == 0: return @@ -951,17 +949,17 @@ def _downscale(self, change: int, positive, negative): new_scale = self._mapping.scale - change - positive.downscale(change) - negative.downscale(change) + positive.downscale(change) # type: ignore[misc] # + negative.downscale(change) # type: ignore[misc] # if new_scale <= 0: mapping = ExponentMapping(new_scale) else: - mapping = LogarithmMapping(new_scale) + mapping = LogarithmMapping(new_scale) # type: ignore[assignment] # - self._mapping = mapping + self._mapping = mapping # type: ignore[assignment] # - def _merge( + def _merge( # type: ignore[no-untyped-def] # self, previous_buckets, current_buckets, @@ -970,55 +968,55 @@ def _merge( aggregation_temporality, ): - current_change = current_scale - min_scale + current_change = current_scale - min_scale # type: ignore[misc] # - for current_bucket_index, current_bucket in enumerate( - current_buckets.counts + for current_bucket_index, current_bucket in enumerate( # type: ignore[misc] # + current_buckets.counts # type: ignore[misc] # ): - if current_bucket == 0: + if current_bucket == 0: # type: ignore[misc] # continue # Not considering the case where len(previous_buckets) == 0. This # would not happen because self._previous_point is only assigned to # an ExponentialHistogramDataPoint object if self._count != 0. - index = ( - current_buckets.offset + current_bucket_index - ) >> current_change + index = ( # type: ignore[misc] # + current_buckets.offset + current_bucket_index # type: ignore[misc] # + ) >> current_change # type: ignore[misc] # - if index < previous_buckets.index_start: - span = previous_buckets.index_end - index + if index < previous_buckets.index_start: # type: ignore[misc] # + span = previous_buckets.index_end - index # type: ignore[misc] # - if span >= self._max_size: + if span >= self._max_size: # type: ignore[misc] # raise Exception("Incorrect merge scale") - if span >= len(previous_buckets.counts): - previous_buckets.grow(span + 1, self._max_size) + if span >= len(previous_buckets.counts): # type: ignore[misc] # + previous_buckets.grow(span + 1, self._max_size) # type: ignore[misc] # - previous_buckets.index_start = index + previous_buckets.index_start = index # type: ignore[misc] # - if index > previous_buckets.index_end: - span = index - previous_buckets.index_end + if index > previous_buckets.index_end: # type: ignore[misc] # + span = index - previous_buckets.index_end # type: ignore[misc] # - if span >= self._max_size: + if span >= self._max_size: # type: ignore[misc] # raise Exception("Incorrect merge scale") - if span >= len(previous_buckets.counts): - previous_buckets.grow(span + 1, self._max_size) + if span >= len(previous_buckets.counts): # type: ignore[misc] # + previous_buckets.grow(span + 1, self._max_size) # type: ignore[misc] # - previous_buckets.index_end = index + previous_buckets.index_end = index # type: ignore[misc] # - bucket_index = index - previous_buckets.index_base + bucket_index = index - previous_buckets.index_base # type: ignore[misc] # - if bucket_index < 0: - bucket_index += len(previous_buckets.counts) + if bucket_index < 0: # type: ignore[misc] # + bucket_index += len(previous_buckets.counts) # type: ignore[misc] # - if aggregation_temporality is AggregationTemporality.DELTA: - current_bucket = -current_bucket + if aggregation_temporality is AggregationTemporality.DELTA: # type: ignore[misc] # + current_bucket = -current_bucket # type: ignore[misc] # - previous_buckets.increment_bucket( - bucket_index, increment=current_bucket + previous_buckets.increment_bucket( # type: ignore[misc] # + bucket_index, increment=current_bucket # type: ignore[misc] # ) @@ -1028,12 +1026,12 @@ class Aggregation(ABC): """ @abstractmethod - def _create_aggregation( + def _create_aggregation( # type: ignore[misc] # self, instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # """Creates an aggregation""" @@ -1061,7 +1059,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # # pylint: disable=too-many-return-statements if isinstance(instrument, Counter): @@ -1135,7 +1133,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # return _ExponentialBucketHistogramAggregation( attributes, start_time_unix_nano, @@ -1187,7 +1185,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED if isinstance(instrument, Synchronous): @@ -1217,7 +1215,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED if isinstance(instrument, Synchronous): @@ -1248,7 +1246,7 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # return _LastValueAggregation(attributes) @@ -1260,5 +1258,5 @@ def _create_aggregation( instrument: Instrument, attributes: Attributes, start_time_unix_nano: int, - ) -> _Aggregation: + ) -> _Aggregation: # type: ignore[type-arg] # return _DropAggregation(attributes) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py index 5c6b04bd39b..50f5f18ca96 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py @@ -20,7 +20,7 @@ class Buckets: # No method of this class is protected by locks because instances of this # class are only used in methods that are protected by locks themselves. - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # self._counts = [0] # The term index refers to the number of the exponential histogram bucket @@ -70,7 +70,7 @@ def index_base(self, value: int) -> None: self.__index_base = value @property - def counts(self): + def counts(self): # type: ignore[misc, no-untyped-def] # return self._counts def grow(self, needed: int, max_size: int) -> None: @@ -89,12 +89,12 @@ def grow(self, needed: int, max_size: int) -> None: # 2 ** ceil(log2(6)) == 8 # 2 ** ceil(log2(7)) == 8 # 2 ** ceil(log2(8)) == 8 - new_size = min(2 ** ceil(log2(needed)), max_size) + new_size = min(2 ** ceil(log2(needed)), max_size) # type: ignore[misc] # - new_positive_limit = new_size - bias + new_positive_limit = new_size - bias # type: ignore[misc] # - tmp = [0] * new_size - tmp[new_positive_limit:] = self._counts[old_positive_limit:] + tmp = [0] * new_size # type: ignore[misc] # + tmp[new_positive_limit:] = self._counts[old_positive_limit:] # type: ignore[misc] # tmp[0:old_positive_limit] = self._counts[0:old_positive_limit] self._counts = tmp diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py index d8c780cf404..a2c91997dca 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py @@ -21,29 +21,29 @@ class Mapping(ABC): """ # pylint: disable=no-member - def __new__(cls, scale: int): + def __new__(cls, scale: int): # type: ignore[no-untyped-def] # - with cls._mappings_lock: + with cls._mappings_lock: # type: ignore[attr-defined, misc] # # cls._mappings and cls._mappings_lock are implemented in each of # the child classes as a dictionary and a lock, respectively. They # are not instantiated here because that would lead to both child # classes having the same instance of cls._mappings and # cls._mappings_lock. - if scale not in cls._mappings: - cls._mappings[scale] = super().__new__(cls) - cls._mappings[scale]._init(scale) + if scale not in cls._mappings: # type: ignore[attr-defined, misc] # + cls._mappings[scale] = super().__new__(cls) # type: ignore[attr-defined, misc] # + cls._mappings[scale]._init(scale) # type: ignore[attr-defined, misc] # - return cls._mappings[scale] + return cls._mappings[scale] # type: ignore[attr-defined, misc] # @abstractmethod def _init(self, scale: int) -> None: # pylint: disable=attribute-defined-outside-init if scale > self._get_max_scale(): - raise Exception(f"scale is larger than {self._max_scale}") + raise Exception(f"scale is larger than {self._max_scale}") # type: ignore[attr-defined, misc] # if scale < self._get_min_scale(): - raise Exception(f"scale is smaller than {self._min_scale}") + raise Exception(f"scale is smaller than {self._min_scale}") # type: ignore[attr-defined, misc] # # The size of the exponential histogram buckets is determined by a # parameter known as scale, larger values of scale will produce smaller diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py index 297bb7a4831..75d6e6baa78 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py @@ -36,24 +36,24 @@ class ExponentMapping(Mapping): # Reference implementation here: # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go - _mappings = {} + _mappings = {} # type: ignore[var-annotated] # _mappings_lock = Lock() _min_scale = -10 _max_scale = 0 - def _get_min_scale(self): + def _get_min_scale(self): # type: ignore[no-untyped-def] # # _min_scale defines the point at which the exponential mapping # function becomes useless for 64-bit floats. With scale -10, ignoring # subnormal values, bucket indices range from -1 to 1. return -10 - def _get_max_scale(self): + def _get_max_scale(self): # type: ignore[no-untyped-def] # # _max_scale is the largest scale supported by exponential mapping. Use # a logarithm mapping for larger scales. return 0 - def _init(self, scale: int): + def _init(self, scale: int): # type: ignore[no-untyped-def] # # pylint: disable=attribute-defined-outside-init super()._init(scale) @@ -64,15 +64,15 @@ def _init(self, scale: int): # bucket with this index covers the range # (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This # is the smallest valid index that contains at least one normal value. - index = MIN_NORMAL_EXPONENT >> -self._scale + index = MIN_NORMAL_EXPONENT >> -self._scale # type: ignore[misc] # if -self._scale < 2: # For scales -1 and 0, the maximum value 2 ** -1022 is a # power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE. # Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE. - index -= 1 + index -= 1 # type: ignore[misc] # - self._min_normal_lower_boundary_index = index + self._min_normal_lower_boundary_index = index # type: ignore[misc] # # self._max_normal_lower_boundary_index is the index such that # base**index equals the greatest representable lower boundary. An @@ -83,12 +83,12 @@ def _init(self, scale: int): # represented. One greater than this index corresponds with the bucket # containing values > 2 ** 1024. self._max_normal_lower_boundary_index = ( - MAX_NORMAL_EXPONENT >> -self._scale + MAX_NORMAL_EXPONENT >> -self._scale # type: ignore[misc] # ) def map_to_index(self, value: float) -> int: if value < MIN_NORMAL_VALUE: - return self._min_normal_lower_boundary_index + return self._min_normal_lower_boundary_index # type: ignore[misc, no-any-return] # exponent = get_ieee_754_exponent(value) @@ -128,10 +128,10 @@ def map_to_index(self, value: float) -> int: return (exponent + correction) >> -self._scale def get_lower_boundary(self, index: int) -> float: - if index < self._min_normal_lower_boundary_index: + if index < self._min_normal_lower_boundary_index: # type: ignore[misc] # raise MappingUnderflowError() - if index > self._max_normal_lower_boundary_index: + if index > self._max_normal_lower_boundary_index: # type: ignore[misc] # raise MappingOverflowError() return ldexp(1, index << -self._scale) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py index 9503b57c0e0..21d4ef69d16 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py @@ -29,7 +29,7 @@ # from 1 to 2046. To calculate the exponent value, 1023 (the bias) is # subtracted from the exponent, so the exponent value range is from -1022 to # +1023. -EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1 +EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1 # type: ignore[misc] # # All the exponent mask bits are set to 1 for the 11 exponent bits. EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH @@ -39,8 +39,8 @@ # For normal floating point numbers, the exponent can have a value in the # range [-1022, 1023]. -MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1 -MAX_NORMAL_EXPONENT = EXPONENT_BIAS +MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1 # type: ignore[misc] # +MAX_NORMAL_EXPONENT = EXPONENT_BIAS # type: ignore[misc] # # The smallest possible normal value is 2.2250738585072014e-308. # This value is the result of using the smallest possible number in the @@ -65,7 +65,7 @@ def get_ieee_754_exponent(value: float) -> int: Gets the exponent of the IEEE 754 representation of a float. """ - return ( + return ( # type: ignore[misc, no-any-return] # ( # This step gives the integer that corresponds to the IEEE 754 # representation of a float. For example, consider @@ -102,7 +102,7 @@ def get_ieee_754_exponent(value: float) -> int: # exponent. # This step subtracts the exponent bias from the IEEE 754 value, # leaving the actual exponent value. - ) - EXPONENT_BIAS + ) - EXPONENT_BIAS # type: ignore[misc] # # For the example this means: # 2046 - EXPONENT_BIAS == 1023 # As mentioned in a comment above, the largest value for the exponent is diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py index 5abf9238b9b..689446b5079 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py @@ -35,25 +35,25 @@ class LogarithmMapping(Mapping): # Reference implementation here: # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go - _mappings = {} + _mappings = {} # type: ignore[var-annotated] # _mappings_lock = Lock() _min_scale = 1 _max_scale = 20 - def _get_min_scale(self): + def _get_min_scale(self): # type: ignore[no-untyped-def] # # _min_scale ensures that ExponentMapping is used for zero and negative # scale values. return self._min_scale - def _get_max_scale(self): + def _get_max_scale(self): # type: ignore[no-untyped-def] # # FIXME The Go implementation uses a value of 20 here, find out the # right value for this implementation, more information here: # https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45 return self._max_scale - def _init(self, scale: int): + def _init(self, scale: int): # type: ignore[no-untyped-def] # # pylint: disable=attribute-defined-outside-init super()._init(scale) @@ -80,7 +80,7 @@ def _init(self, scale: int): # (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index # corresponds with the bucket containing values <= MIN_NORMAL_VALUE. self._min_normal_lower_boundary_index = ( - MIN_NORMAL_EXPONENT << self._scale + MIN_NORMAL_EXPONENT << self._scale # type: ignore[misc] # ) # self._max_normal_lower_boundary_index is the index such that @@ -91,8 +91,8 @@ def _init(self, scale: int): # This bucket is incomplete, since the upper boundary cannot be # represented. One greater than this index corresponds with the bucket # containing values > 2 ** 1024. - self._max_normal_lower_boundary_index = ( - (MAX_NORMAL_EXPONENT + 1) << self._scale + self._max_normal_lower_boundary_index = ( # type: ignore[misc] # + (MAX_NORMAL_EXPONENT + 1) << self._scale # type: ignore[misc] # ) - 1 def map_to_index(self, value: float) -> int: @@ -102,31 +102,31 @@ def map_to_index(self, value: float) -> int: # value is subnormal if value <= MIN_NORMAL_VALUE: - return self._min_normal_lower_boundary_index - 1 + return self._min_normal_lower_boundary_index - 1 # type: ignore[misc, no-any-return] # # value is an exact power of two. if get_ieee_754_mantissa(value) == 0: exponent = get_ieee_754_exponent(value) return (exponent << self._scale) - 1 - return min( + return min( # type: ignore[misc, no-any-return] # floor(log(value) * self._scale_factor), - self._max_normal_lower_boundary_index, + self._max_normal_lower_boundary_index, # type: ignore[misc] # ) def get_lower_boundary(self, index: int) -> float: - if index >= self._max_normal_lower_boundary_index: - if index == self._max_normal_lower_boundary_index: + if index >= self._max_normal_lower_boundary_index: # type: ignore[misc] # + if index == self._max_normal_lower_boundary_index: # type: ignore[misc] # return 2 * exp( (index - (1 << self._scale)) / self._scale_factor ) raise MappingOverflowError() - if index <= self._min_normal_lower_boundary_index: - if index == self._min_normal_lower_boundary_index: + if index <= self._min_normal_lower_boundary_index: # type: ignore[misc] # + if index == self._min_normal_lower_boundary_index: # type: ignore[misc] # return MIN_NORMAL_VALUE - if index == self._min_normal_lower_boundary_index - 1: + if index == self._min_normal_lower_boundary_index - 1: # type: ignore[misc] # return ( exp((index + (1 << self._scale)) / self._scale_factor) / 2 ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export/__init__.py index 14546636a94..639d8b6904e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export/__init__.py @@ -90,16 +90,16 @@ class MetricExporter(ABC): def __init__( self, - preferred_temporality: Dict[type, AggregationTemporality] = None, - preferred_aggregation: Dict[ + preferred_temporality: Dict[type, AggregationTemporality] = None, # type: ignore[assignment] # + preferred_aggregation: Dict[ # type: ignore[name-defined] # type, "opentelemetry.sdk.metrics.view.Aggregation" - ] = None, + ] = None, # type: ignore[assignment] # ) -> None: self._preferred_temporality = preferred_temporality self._preferred_aggregation = preferred_aggregation @abstractmethod - def export( + def export( # type: ignore[misc, no-untyped-def] # self, metrics_data: MetricsData, timeout_millis: float = 10_000, @@ -122,7 +122,7 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: """ @abstractmethod - def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: # type: ignore[misc, no-untyped-def] # """Shuts down the exporter. Called when the SDK is shut down. @@ -139,34 +139,34 @@ class ConsoleMetricExporter(MetricExporter): def __init__( self, - out: IO = stdout, - formatter: Callable[ + out: IO = stdout, # type: ignore[type-arg] # + formatter: Callable[ # type: ignore[name-defined] # ["opentelemetry.sdk.metrics.export.MetricsData"], str - ] = lambda metrics_data: metrics_data.to_json() + ] = lambda metrics_data: metrics_data.to_json() # type: ignore[misc, no-any-return] # + linesep, - preferred_temporality: Dict[type, AggregationTemporality] = None, - preferred_aggregation: Dict[ + preferred_temporality: Dict[type, AggregationTemporality] = None, # type: ignore[assignment] # + preferred_aggregation: Dict[ # type: ignore[name-defined] # type, "opentelemetry.sdk.metrics.view.Aggregation" - ] = None, + ] = None, # type: ignore[assignment] # ): super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) - self.out = out + self.out = out # type: ignore[misc] # self.formatter = formatter - def export( + def export( # type: ignore[no-untyped-def] # self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: - self.out.write(self.formatter(metrics_data)) - self.out.flush() + self.out.write(self.formatter(metrics_data)) # type: ignore[misc] # + self.out.flush() # type: ignore[misc] # return MetricExportResult.SUCCESS - def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: # type: ignore[no-untyped-def] # pass def force_flush(self, timeout_millis: float = 10_000) -> bool: @@ -209,27 +209,27 @@ class MetricReader(ABC): def __init__( self, - preferred_temporality: Dict[type, AggregationTemporality] = None, - preferred_aggregation: Dict[ + preferred_temporality: Dict[type, AggregationTemporality] = None, # type: ignore[assignment] # + preferred_aggregation: Dict[ # type: ignore[name-defined] # type, "opentelemetry.sdk.metrics.view.Aggregation" - ] = None, + ] = None, # type: ignore[assignment] # ) -> None: - self._collect: Callable[ + self._collect: Callable[ # type: ignore[name-defined] # [ "opentelemetry.sdk.metrics.export.MetricReader", AggregationTemporality, ], Iterable["opentelemetry.sdk.metrics.export.Metric"], - ] = None - - self._instrument_class_temporality = { - _Counter: AggregationTemporality.CUMULATIVE, - _UpDownCounter: AggregationTemporality.CUMULATIVE, - _Histogram: AggregationTemporality.CUMULATIVE, - _Gauge: AggregationTemporality.CUMULATIVE, - _ObservableCounter: AggregationTemporality.CUMULATIVE, - _ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, - _ObservableGauge: AggregationTemporality.CUMULATIVE, + ] = None # type: ignore[assignment] # + + self._instrument_class_temporality = { # type: ignore[misc] # + _Counter: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _UpDownCounter: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _Histogram: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _Gauge: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _ObservableCounter: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # + _ObservableGauge: AggregationTemporality.CUMULATIVE, # type: ignore[misc] # } if preferred_temporality is not None: @@ -244,69 +244,69 @@ def __init__( if preferred_temporality is not None: for typ, temporality in preferred_temporality.items(): - if typ is Counter: - self._instrument_class_temporality[_Counter] = temporality - elif typ is UpDownCounter: - self._instrument_class_temporality[ - _UpDownCounter + if typ is Counter: # type: ignore[misc] # + self._instrument_class_temporality[_Counter] = temporality # type: ignore[misc] # + elif typ is UpDownCounter: # type: ignore[misc] # + self._instrument_class_temporality[ # type: ignore[misc] # + _UpDownCounter # type: ignore[misc] # ] = temporality - elif typ is Histogram: - self._instrument_class_temporality[ - _Histogram + elif typ is Histogram: # type: ignore[misc] # + self._instrument_class_temporality[ # type: ignore[misc] # + _Histogram # type: ignore[misc] # ] = temporality - elif typ is Gauge: - self._instrument_class_temporality[_Gauge] = temporality - elif typ is ObservableCounter: - self._instrument_class_temporality[ - _ObservableCounter + elif typ is Gauge: # type: ignore[misc] # + self._instrument_class_temporality[_Gauge] = temporality # type: ignore[misc] # + elif typ is ObservableCounter: # type: ignore[misc] # + self._instrument_class_temporality[ # type: ignore[misc] # + _ObservableCounter # type: ignore[misc] # ] = temporality - elif typ is ObservableUpDownCounter: - self._instrument_class_temporality[ - _ObservableUpDownCounter + elif typ is ObservableUpDownCounter: # type: ignore[misc] # + self._instrument_class_temporality[ # type: ignore[misc] # + _ObservableUpDownCounter # type: ignore[misc] # ] = temporality - elif typ is ObservableGauge: - self._instrument_class_temporality[ - _ObservableGauge + elif typ is ObservableGauge: # type: ignore[misc] # + self._instrument_class_temporality[ # type: ignore[misc] # + _ObservableGauge # type: ignore[misc] # ] = temporality else: raise Exception(f"Invalid instrument class found {typ}") self._preferred_temporality = preferred_temporality - self._instrument_class_aggregation = { - _Counter: DefaultAggregation(), - _UpDownCounter: DefaultAggregation(), - _Histogram: DefaultAggregation(), - _Gauge: DefaultAggregation(), - _ObservableCounter: DefaultAggregation(), - _ObservableUpDownCounter: DefaultAggregation(), - _ObservableGauge: DefaultAggregation(), + self._instrument_class_aggregation = { # type: ignore[misc] # + _Counter: DefaultAggregation(), # type: ignore[misc] # + _UpDownCounter: DefaultAggregation(), # type: ignore[misc] # + _Histogram: DefaultAggregation(), # type: ignore[misc] # + _Gauge: DefaultAggregation(), # type: ignore[misc] # + _ObservableCounter: DefaultAggregation(), # type: ignore[misc] # + _ObservableUpDownCounter: DefaultAggregation(), # type: ignore[misc] # + _ObservableGauge: DefaultAggregation(), # type: ignore[misc] # } if preferred_aggregation is not None: for typ, aggregation in preferred_aggregation.items(): - if typ is Counter: - self._instrument_class_aggregation[_Counter] = aggregation - elif typ is UpDownCounter: - self._instrument_class_aggregation[ - _UpDownCounter + if typ is Counter: # type: ignore[misc] # + self._instrument_class_aggregation[_Counter] = aggregation # type: ignore[misc] # + elif typ is UpDownCounter: # type: ignore[misc] # + self._instrument_class_aggregation[ # type: ignore[misc] # + _UpDownCounter # type: ignore[misc] # ] = aggregation - elif typ is Histogram: - self._instrument_class_aggregation[ - _Histogram + elif typ is Histogram: # type: ignore[misc] # + self._instrument_class_aggregation[ # type: ignore[misc] # + _Histogram # type: ignore[misc] # ] = aggregation - elif typ is Gauge: - self._instrument_class_aggregation[_Gauge] = aggregation - elif typ is ObservableCounter: - self._instrument_class_aggregation[ - _ObservableCounter + elif typ is Gauge: # type: ignore[misc] # + self._instrument_class_aggregation[_Gauge] = aggregation # type: ignore[misc] # + elif typ is ObservableCounter: # type: ignore[misc] # + self._instrument_class_aggregation[ # type: ignore[misc] # + _ObservableCounter # type: ignore[misc] # ] = aggregation - elif typ is ObservableUpDownCounter: - self._instrument_class_aggregation[ - _ObservableUpDownCounter + elif typ is ObservableUpDownCounter: # type: ignore[misc] # + self._instrument_class_aggregation[ # type: ignore[misc] # + _ObservableUpDownCounter # type: ignore[misc] # ] = aggregation - elif typ is ObservableGauge: - self._instrument_class_aggregation[ - _ObservableGauge + elif typ is ObservableGauge: # type: ignore[misc] # + self._instrument_class_aggregation[ # type: ignore[misc] # + _ObservableGauge # type: ignore[misc] # ] = aggregation else: raise Exception(f"Invalid instrument class found {typ}") @@ -330,7 +330,7 @@ def collect(self, timeout_millis: float = 10_000) -> None: ) return - metrics = self._collect(self, timeout_millis=timeout_millis) + metrics = self._collect(self, timeout_millis=timeout_millis) # type: ignore[call-arg] # if metrics is not None: @@ -342,7 +342,7 @@ def collect(self, timeout_millis: float = 10_000) -> None: @final def _set_collect_callback( self, - func: Callable[ + func: Callable[ # type: ignore[name-defined] # [ "opentelemetry.sdk.metrics.export.MetricReader", AggregationTemporality, @@ -354,9 +354,9 @@ def _set_collect_callback( self._collect = func @abstractmethod - def _receive_metrics( + def _receive_metrics( # type: ignore[misc, no-untyped-def] # self, - metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", + metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", # type: ignore[name-defined] # timeout_millis: float = 10_000, **kwargs, ) -> None: @@ -367,7 +367,7 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: return True @abstractmethod - def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: # type: ignore[misc, no-untyped-def] # """Shuts down the MetricReader. This method provides a way for the MetricReader to do any cleanup required. A metric reader can only be shutdown once, any subsequent calls are ignored and return @@ -388,23 +388,23 @@ class InMemoryMetricReader(MetricReader): def __init__( self, - preferred_temporality: Dict[type, AggregationTemporality] = None, - preferred_aggregation: Dict[ + preferred_temporality: Dict[type, AggregationTemporality] = None, # type: ignore[assignment] # + preferred_aggregation: Dict[ # type: ignore[name-defined] # type, "opentelemetry.sdk.metrics.view.Aggregation" - ] = None, + ] = None, # type: ignore[assignment] # ) -> None: super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) self._lock = RLock() - self._metrics_data: ( + self._metrics_data: ( # type: ignore[name-defined] # "opentelemetry.sdk.metrics.export.MetricsData" ) = None def get_metrics_data( self, - ) -> ("opentelemetry.sdk.metrics.export.MetricsData"): + ) -> ("opentelemetry.sdk.metrics.export.MetricsData"): # type: ignore[name-defined] # """Reads and returns current metrics from the SDK""" with self._lock: self.collect() @@ -412,16 +412,16 @@ def get_metrics_data( self._metrics_data = None return metrics_data - def _receive_metrics( + def _receive_metrics( # type: ignore[no-untyped-def] # self, - metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", + metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", # type: ignore[name-defined] # timeout_millis: float = 10_000, **kwargs, ) -> None: with self._lock: self._metrics_data = metrics_data - def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: # type: ignore[no-untyped-def] # pass @@ -491,7 +491,7 @@ def __init__( self._daemon_thread.start() if hasattr(os, "register_at_fork"): os.register_at_fork( - after_in_child=self._at_fork_reinit + after_in_child=self._at_fork_reinit # type: ignore[misc] # ) # pylint: disable=protected-access elif self._export_interval_millis <= 0: raise ValueError( @@ -499,7 +499,7 @@ def __init__( and needs to be larger than zero." ) - def _at_fork_reinit(self): + def _at_fork_reinit(self): # type: ignore[no-untyped-def] # self._daemon_thread = Thread( name="OtelPeriodicExportingMetricReader", target=self._ticker, @@ -521,7 +521,7 @@ def _ticker(self) -> None: # one last collection below before shutting down completely self.collect(timeout_millis=self._export_interval_millis) - def _receive_metrics( + def _receive_metrics( # type: ignore[no-untyped-def] # self, metrics_data: MetricsData, timeout_millis: float = 10_000, @@ -538,13 +538,13 @@ def _receive_metrics( _logger.exception("Exception while exporting metrics %s", str(e)) detach(token) - def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: # type: ignore[no-untyped-def] # deadline_ns = time_ns() + timeout_millis * 10**6 - def _shutdown(): + def _shutdown(): # type: ignore[no-untyped-def] # self._shutdown = True - did_set = self._shutdown_once.do_once(_shutdown) + did_set = self._shutdown_once.do_once(_shutdown) # type: ignore[misc] # if not did_set: _logger.warning("Can't shutdown multiple times") return diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py index 11dd8499341..0a0b8a114b7 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py @@ -46,29 +46,29 @@ def __init__( self, name: str, instrumentation_scope: InstrumentationScope, - measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", + measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", # type: ignore[name-defined] # unit: str = "", description: str = "", ): # pylint: disable=no-member - result = self._check_name_unit_description(name, unit, description) + result = self._check_name_unit_description(name, unit, description) # type: ignore[attr-defined, misc] # - if result["name"] is None: + if result["name"] is None: # type: ignore[misc] # raise Exception(_ERROR_MESSAGE.format(name)) - if result["unit"] is None: + if result["unit"] is None: # type: ignore[misc] # raise Exception(_ERROR_MESSAGE.format(unit)) - name = result["name"] - unit = result["unit"] - description = result["description"] + name = result["name"] # type: ignore[misc] # + unit = result["unit"] # type: ignore[misc] # + description = result["description"] # type: ignore[misc] # self.name = name.lower() self.unit = unit self.description = description self.instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer - super().__init__(name, unit=unit, description=description) + super().__init__(name, unit=unit, description=description) # type: ignore[call-arg] # class _Asynchronous: @@ -76,30 +76,30 @@ def __init__( self, name: str, instrumentation_scope: InstrumentationScope, - measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", + measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", # type: ignore[name-defined] # callbacks: Optional[Iterable[CallbackT]] = None, unit: str = "", description: str = "", ): # pylint: disable=no-member - result = self._check_name_unit_description(name, unit, description) + result = self._check_name_unit_description(name, unit, description) # type: ignore[attr-defined, misc] # - if result["name"] is None: + if result["name"] is None: # type: ignore[misc] # raise Exception(_ERROR_MESSAGE.format(name)) - if result["unit"] is None: + if result["unit"] is None: # type: ignore[misc] # raise Exception(_ERROR_MESSAGE.format(unit)) - name = result["name"] - unit = result["unit"] - description = result["description"] + name = result["name"] # type: ignore[misc] # + unit = result["unit"] # type: ignore[misc] # + description = result["description"] # type: ignore[misc] # self.name = name.lower() self.unit = unit self.description = description self.instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer - super().__init__(name, callbacks, unit=unit, description=description) + super().__init__(name, callbacks, unit=unit, description=description) # type: ignore[call-arg] # self._callbacks: List[CallbackT] = [] @@ -112,16 +112,16 @@ def __init__( # advance generator to it's first yield next(callback) - def inner( + def inner( # type: ignore[no-untyped-def] # options: CallbackOptions, callback=callback, ) -> Iterable[Measurement]: try: - return callback.send(options) + return callback.send(options) # type: ignore[misc, no-any-return] # except StopIteration: return [] - self._callbacks.append(inner) + self._callbacks.append(inner) # type: ignore[arg-type, misc] # else: self._callbacks.append(callback) @@ -130,11 +130,11 @@ def callback( ) -> Iterable[Measurement]: for callback in self._callbacks: try: - for api_measurement in callback(callback_options): + for api_measurement in callback(callback_options): # type: ignore[misc, operator] # yield Measurement( - api_measurement.value, - instrument=self, - attributes=api_measurement.attributes, + api_measurement.value, # type: ignore[misc] # + instrument=self, # type: ignore[arg-type] # + attributes=api_measurement.attributes, # type: ignore[misc] # ) except Exception: # pylint: disable=broad-except _logger.exception( @@ -143,13 +143,13 @@ def callback( class Counter(_Synchronous, APICounter): - def __new__(cls, *args, **kwargs): - if cls is Counter: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is Counter: # type: ignore[misc] # raise TypeError("Counter must be instantiated via a meter.") return super().__new__(cls) - def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None + def add( # type: ignore[no-untyped-def, override] # + self, amount: Union[int, float], attributes: Dict[str, str] = None # type: ignore[assignment] # ): if amount < 0: _logger.warning( @@ -162,13 +162,13 @@ def add( class UpDownCounter(_Synchronous, APIUpDownCounter): - def __new__(cls, *args, **kwargs): - if cls is UpDownCounter: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is UpDownCounter: # type: ignore[misc] # raise TypeError("UpDownCounter must be instantiated via a meter.") return super().__new__(cls) - def add( - self, amount: Union[int, float], attributes: Dict[str, str] = None + def add( # type: ignore[no-untyped-def, override] # + self, amount: Union[int, float], attributes: Dict[str, str] = None # type: ignore[assignment] # ): self._measurement_consumer.consume_measurement( Measurement(amount, self, attributes) @@ -176,8 +176,8 @@ def add( class ObservableCounter(_Asynchronous, APIObservableCounter): - def __new__(cls, *args, **kwargs): - if cls is ObservableCounter: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is ObservableCounter: # type: ignore[misc] # raise TypeError( "ObservableCounter must be instantiated via a meter." ) @@ -185,8 +185,8 @@ def __new__(cls, *args, **kwargs): class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter): - def __new__(cls, *args, **kwargs): - if cls is ObservableUpDownCounter: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is ObservableUpDownCounter: # type: ignore[misc] # raise TypeError( "ObservableUpDownCounter must be instantiated via a meter." ) @@ -194,13 +194,13 @@ def __new__(cls, *args, **kwargs): class Histogram(_Synchronous, APIHistogram): - def __new__(cls, *args, **kwargs): - if cls is Histogram: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is Histogram: # type: ignore[misc] # raise TypeError("Histogram must be instantiated via a meter.") return super().__new__(cls) - def record( - self, amount: Union[int, float], attributes: Dict[str, str] = None + def record( # type: ignore[no-untyped-def, override] # + self, amount: Union[int, float], attributes: Dict[str, str] = None # type: ignore[assignment] # ): if amount < 0: _logger.warning( @@ -214,13 +214,13 @@ def record( class Gauge(_Synchronous, APIGauge): - def __new__(cls, *args, **kwargs): - if cls is Gauge: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is Gauge: # type: ignore[misc] # raise TypeError("Gauge must be instantiated via a meter.") return super().__new__(cls) - def set( - self, amount: Union[int, float], attributes: Dict[str, str] = None + def set( # type: ignore[no-untyped-def, override] # + self, amount: Union[int, float], attributes: Dict[str, str] = None # type: ignore[assignment] # ): self._measurement_consumer.consume_measurement( Measurement(amount, self, attributes) @@ -228,8 +228,8 @@ def set( class ObservableGauge(_Asynchronous, APIObservableGauge): - def __new__(cls, *args, **kwargs): - if cls is ObservableGauge: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is ObservableGauge: # type: ignore[misc] # raise TypeError( "ObservableGauge must be instantiated via a meter." ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py index c5e81678dcb..31f8469738e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement_consumer.py @@ -38,9 +38,9 @@ def consume_measurement(self, measurement: Measurement) -> None: pass @abstractmethod - def register_asynchronous_instrument( + def register_asynchronous_instrument( # type: ignore[misc, no-untyped-def] # self, - instrument: ( + instrument: ( # type: ignore[name-defined] # "opentelemetry.sdk.metrics._internal.instrument_Asynchronous" ), ): @@ -49,7 +49,7 @@ def register_asynchronous_instrument( @abstractmethod def collect( self, - metric_reader: "opentelemetry.sdk.metrics.MetricReader", + metric_reader: "opentelemetry.sdk.metrics.MetricReader", # type: ignore[name-defined] # timeout_millis: float = 10_000, ) -> Optional[Iterable[Metric]]: pass @@ -58,20 +58,20 @@ def collect( class SynchronousMeasurementConsumer(MeasurementConsumer): def __init__( self, - sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration", + sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration", # type: ignore[name-defined] # ) -> None: self._lock = Lock() self._sdk_config = sdk_config # should never be mutated - self._reader_storages: Mapping[ + self._reader_storages: Mapping[ # type: ignore[name-defined] # "opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage - ] = { - reader: MetricReaderStorage( + ] = { # type: ignore[misc] # + reader: MetricReaderStorage( # type: ignore[misc] # sdk_config, - reader._instrument_class_temporality, - reader._instrument_class_aggregation, + reader._instrument_class_temporality, # type: ignore[misc] # + reader._instrument_class_aggregation, # type: ignore[misc] # ) - for reader in sdk_config.metric_readers + for reader in sdk_config.metric_readers # type: ignore[misc] # } self._async_instruments: List[ "opentelemetry.sdk.metrics._internal.instrument._Asynchronous" @@ -92,7 +92,7 @@ def register_asynchronous_instrument( def collect( self, - metric_reader: "opentelemetry.sdk.metrics.MetricReader", + metric_reader: "opentelemetry.sdk.metrics.MetricReader", # type: ignore[name-defined] # timeout_millis: float = 10_000, ) -> Optional[Iterable[Metric]]: @@ -125,4 +125,4 @@ def collect( result = self._reader_storages[metric_reader].collect() - return result + return result # type: ignore[return-value] # diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py index 700ace87204..d1083607c4d 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py @@ -35,7 +35,9 @@ _LastValueAggregation, _SumAggregation, ) -from opentelemetry.sdk.metrics._internal.export import AggregationTemporality +from opentelemetry.sdk.metrics._internal.export import ( + AggregationTemporality, # type: ignore[attr-defined] # +) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogram, @@ -61,7 +63,7 @@ class MetricReaderStorage: """The SDK's storage for a given reader""" - def __init__( + def __init__( # type: ignore[no-any-unimported] # self, sdk_config: SdkConfiguration, instrument_class_temporality: Dict[type, AggregationTemporality], @@ -72,7 +74,7 @@ def __init__( self._instrument_view_instrument_matches: Dict[ Instrument, List[_ViewInstrumentMatch] ] = {} - self._instrument_class_temporality = instrument_class_temporality + self._instrument_class_temporality = instrument_class_temporality # type: ignore[misc] # self._instrument_class_aggregation = instrument_class_aggregation def _get_or_init_view_instrument_match( @@ -90,14 +92,14 @@ def _get_or_init_view_instrument_match( return self._instrument_view_instrument_matches[instrument] # not present, hold the lock and add a new mapping - view_instrument_matches = [] + view_instrument_matches = [] # type: ignore[var-annotated] # self._handle_view_instrument_match( - instrument, view_instrument_matches + instrument, view_instrument_matches # type: ignore[misc] # ) # if no view targeted the instrument, use the default - if not view_instrument_matches: + if not view_instrument_matches: # type: ignore[misc] # view_instrument_matches.append( _ViewInstrumentMatch( view=_DEFAULT_VIEW, @@ -144,7 +146,7 @@ def collect(self) -> Optional[MetricsData]: instrument, view_instrument_matches, ) in self._instrument_view_instrument_matches.items(): - aggregation_temporality = self._instrument_class_temporality[ + aggregation_temporality = self._instrument_class_temporality[ # type: ignore[misc] # instrument.__class__ ] @@ -153,7 +155,7 @@ def collect(self) -> Optional[MetricsData]: for view_instrument_match in view_instrument_matches: data_points = view_instrument_match.collect( - aggregation_temporality, collection_start_nanos + aggregation_temporality, collection_start_nanos # type: ignore[misc] # ) if data_points is None: @@ -161,74 +163,79 @@ def collect(self) -> Optional[MetricsData]: if isinstance( # pylint: disable=protected-access - view_instrument_match._aggregation, + view_instrument_match._aggregation, # type: ignore[misc] # _SumAggregation, ): data = Sum( - aggregation_temporality=aggregation_temporality, - data_points=data_points, + aggregation_temporality=aggregation_temporality, # type: ignore[misc] # + data_points=data_points, # type: ignore[arg-type] # is_monotonic=isinstance( instrument, (Counter, ObservableCounter) ), ) elif isinstance( # pylint: disable=protected-access - view_instrument_match._aggregation, + view_instrument_match._aggregation, # type: ignore[misc] # _LastValueAggregation, ): - data = Gauge(data_points=data_points) + data = Gauge(data_points=data_points) # type: ignore[arg-type, assignment] # elif isinstance( # pylint: disable=protected-access - view_instrument_match._aggregation, + view_instrument_match._aggregation, # type: ignore[misc] # _ExplicitBucketHistogramAggregation, ): - data = Histogram( - data_points=data_points, - aggregation_temporality=aggregation_temporality, + data = Histogram( # type: ignore[assignment] # + data_points=data_points, # type: ignore[arg-type] # + aggregation_temporality=aggregation_temporality, # type: ignore[misc] # ) elif isinstance( # pylint: disable=protected-access - view_instrument_match._aggregation, + view_instrument_match._aggregation, # type: ignore[misc] # _DropAggregation, ): continue elif isinstance( # pylint: disable=protected-access - view_instrument_match._aggregation, + view_instrument_match._aggregation, # type: ignore[misc] # _ExponentialBucketHistogramAggregation, ): - data = ExponentialHistogram( - data_points=data_points, - aggregation_temporality=aggregation_temporality, + data = ExponentialHistogram( # type: ignore[assignment] # + data_points=data_points, # type: ignore[arg-type] # + aggregation_temporality=aggregation_temporality, # type: ignore[misc] # ) metrics.append( Metric( # pylint: disable=protected-access - name=view_instrument_match._name, - description=view_instrument_match._description, - unit=view_instrument_match._instrument.unit, + name=view_instrument_match._name, # type: ignore[misc] # + description=view_instrument_match._description, # type: ignore[misc] # + unit=view_instrument_match._instrument.unit, # type: ignore[attr-defined, misc] # data=data, ) ) if metrics: - if instrument.instrumentation_scope not in ( - instrumentation_scope_scope_metrics + if ( + instrument.instrumentation_scope + not in ( # type: ignore[attr-defined, misc] # + instrumentation_scope_scope_metrics + ) ): instrumentation_scope_scope_metrics[ - instrument.instrumentation_scope + instrument.instrumentation_scope # type: ignore[attr-defined, misc] # ] = ScopeMetrics( - scope=instrument.instrumentation_scope, + scope=instrument.instrumentation_scope, # type: ignore[attr-defined, misc] # metrics=metrics, - schema_url=instrument.instrumentation_scope.schema_url, + schema_url=instrument.instrumentation_scope.schema_url, # type: ignore[attr-defined, misc] # ) else: - instrumentation_scope_scope_metrics[ - instrument.instrumentation_scope - ].metrics.extend(metrics) + instrumentation_scope_scope_metrics[ # type: ignore[attr-defined] # + instrument.instrumentation_scope # type: ignore[attr-defined, misc] # + ].metrics.extend( + metrics + ) if instrumentation_scope_scope_metrics: @@ -253,7 +260,7 @@ def _handle_view_instrument_match( ) -> None: for view in self._sdk_config.views: # pylint: disable=protected-access - if not view._match(instrument): + if not view._match(instrument): # type: ignore[misc] # continue if not self._check_view_instrument_compatibility(view, instrument): diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py index c30705c59a4..d04ab797ff6 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py @@ -36,8 +36,8 @@ class NumberDataPoint: time_unix_nano: int value: Union[int, float] - def to_json(self, indent=4) -> str: - return dumps(asdict(self), indent=indent) + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # + return dumps(asdict(self), indent=indent) # type: ignore[misc] # @dataclass(frozen=True) @@ -56,8 +56,8 @@ class HistogramDataPoint: min: float max: float - def to_json(self, indent=4) -> str: - return dumps(asdict(self), indent=indent) + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # + return dumps(asdict(self), indent=indent) # type: ignore[misc] # @dataclass(frozen=True) @@ -86,8 +86,8 @@ class ExponentialHistogramDataPoint: min: float max: float - def to_json(self, indent=4) -> str: - return dumps(asdict(self), indent=indent) + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # + return dumps(asdict(self), indent=indent) # type: ignore[misc] # @dataclass(frozen=True) @@ -97,7 +97,7 @@ class ExponentialHistogram: """ data_points: Sequence[ExponentialHistogramDataPoint] - aggregation_temporality: ( + aggregation_temporality: ( # type: ignore[name-defined] # "opentelemetry.sdk.metrics.export.AggregationTemporality" ) @@ -108,22 +108,22 @@ class Sum: all reported measurements over a time interval.""" data_points: Sequence[NumberDataPoint] - aggregation_temporality: ( + aggregation_temporality: ( # type: ignore[name-defined] # "opentelemetry.sdk.metrics.export.AggregationTemporality" ) is_monotonic: bool - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "data_points": [ - loads(data_point.to_json(indent=indent)) + { # type: ignore[misc] # + "data_points": [ # type: ignore[misc] # + loads(data_point.to_json(indent=indent)) # type: ignore[misc] # for data_point in self.data_points ], "aggregation_temporality": self.aggregation_temporality, "is_monotonic": self.is_monotonic, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -135,15 +135,15 @@ class Gauge: data_points: Sequence[NumberDataPoint] - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "data_points": [ - loads(data_point.to_json(indent=indent)) + { # type: ignore[misc] # + "data_points": [ # type: ignore[misc] # + loads(data_point.to_json(indent=indent)) # type: ignore[misc] # for data_point in self.data_points ], }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -153,20 +153,20 @@ class Histogram: histogram of all reported measurements over a time interval.""" data_points: Sequence[HistogramDataPoint] - aggregation_temporality: ( + aggregation_temporality: ( # type: ignore[name-defined] # "opentelemetry.sdk.metrics.export.AggregationTemporality" ) - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "data_points": [ - loads(data_point.to_json(indent=indent)) + { # type: ignore[misc] # + "data_points": [ # type: ignore[misc] # + loads(data_point.to_json(indent=indent)) # type: ignore[misc] # for data_point in self.data_points ], "aggregation_temporality": self.aggregation_temporality, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -185,15 +185,15 @@ class Metric: unit: Optional[str] data: DataT - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { + { # type: ignore[misc] # "name": self.name, "description": self.description or "", "unit": self.unit or "", - "data": loads(self.data.to_json(indent=indent)), + "data": loads(self.data.to_json(indent=indent)), # type: ignore[misc] # }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -205,17 +205,17 @@ class ScopeMetrics: metrics: Sequence[Metric] schema_url: str - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "scope": loads(self.scope.to_json(indent=indent)), - "metrics": [ - loads(metric.to_json(indent=indent)) + { # type: ignore[misc] # + "scope": loads(self.scope.to_json(indent=indent)), # type: ignore[misc] # + "metrics": [ # type: ignore[misc] # + loads(metric.to_json(indent=indent)) # type: ignore[misc] # for metric in self.metrics ], "schema_url": self.schema_url, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -227,17 +227,17 @@ class ResourceMetrics: scope_metrics: Sequence[ScopeMetrics] schema_url: str - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "resource": loads(self.resource.to_json(indent=indent)), - "scope_metrics": [ - loads(scope_metrics.to_json(indent=indent)) + { # type: ignore[misc] # + "resource": loads(self.resource.to_json(indent=indent)), # type: ignore[misc] # + "scope_metrics": [ # type: ignore[misc] # + loads(scope_metrics.to_json(indent=indent)) # type: ignore[misc] # for scope_metrics in self.scope_metrics ], "schema_url": self.schema_url, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -247,13 +247,13 @@ class MetricsData: resource_metrics: Sequence[ResourceMetrics] - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "resource_metrics": [ - loads(resource_metrics.to_json(indent=indent)) + { # type: ignore[misc] # + "resource_metrics": [ # type: ignore[misc] # + loads(resource_metrics.to_json(indent=indent)) # type: ignore[misc] # for resource_metrics in self.resource_metrics ] }, - indent=indent, + indent=indent, # type: ignore[misc] # ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py index 9594ab38a74..be9e1c86467 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/sdk_configuration.py @@ -25,5 +25,5 @@ @dataclass class SdkConfiguration: resource: "opentelemetry.sdk.resources.Resource" - metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"] - views: Sequence["opentelemetry.sdk.metrics.View"] + metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"] # type: ignore[name-defined] # + views: Sequence["opentelemetry.sdk.metrics.View"] # type: ignore[name-defined] # diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py index 5b548a5e05c..c24e39f29c2 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py @@ -95,7 +95,7 @@ def __init__( instrument_unit: Optional[str] = None, ): if ( - instrument_type + instrument_type # type: ignore[comparison-overlap] # is instrument_name is instrument_unit is meter_name @@ -142,24 +142,24 @@ def _match(self, instrument: Instrument) -> bool: return False if self._instrument_name is not None: - if not fnmatch(instrument.name, self._instrument_name): + if not fnmatch(instrument.name, self._instrument_name): # type: ignore[attr-defined, misc] # return False if self._instrument_unit is not None: - if not fnmatch(instrument.unit, self._instrument_unit): + if not fnmatch(instrument.unit, self._instrument_unit): # type: ignore[attr-defined, misc] # return False if self._meter_name is not None: - if instrument.instrumentation_scope.name != self._meter_name: + if instrument.instrumentation_scope.name != self._meter_name: # type: ignore[attr-defined, misc] # return False if self._meter_version is not None: - if instrument.instrumentation_scope.version != self._meter_version: + if instrument.instrumentation_scope.version != self._meter_version: # type: ignore[attr-defined, misc] # return False if self._meter_schema_url is not None: if ( - instrument.instrumentation_scope.schema_url + instrument.instrumentation_scope.schema_url # type: ignore[attr-defined, misc] # != self._meter_schema_url ): return False diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py index 97c31b97ec7..1094b9aeb26 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. -from opentelemetry.sdk.metrics._internal.export import ( +from opentelemetry.sdk.metrics._internal.export import ( # type: ignore[attr-defined] # AggregationTemporality, ConsoleMetricExporter, InMemoryMetricReader, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py index 852b23f5002..cec633a4983 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py @@ -65,7 +65,9 @@ from os import environ from urllib import parse -from opentelemetry.attributes import BoundedAttributes +from opentelemetry.attributes import ( + BoundedAttributes, # type: ignore[attr-defined] # +) from opentelemetry.sdk.environment_variables import ( OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, OTEL_RESOURCE_ATTRIBUTES, @@ -78,7 +80,7 @@ try: import psutil except ImportError: - psutil = None + psutil = None # type: ignore[assignment] # LabelValue = AttributeValue Attributes = typing.Dict[str, LabelValue] @@ -150,7 +152,7 @@ class Resource: def __init__( self, attributes: Attributes, schema_url: typing.Optional[str] = None ): - self._attributes = BoundedAttributes(attributes=attributes) + self._attributes = BoundedAttributes(attributes=attributes) # type: ignore[misc] # if schema_url is None: schema_url = "" self._schema_url = schema_url @@ -185,9 +187,9 @@ def create( otel_experimental_resource_detectors.append("otel") for resource_detector in otel_experimental_resource_detectors: - resource_detectors.append( - next( - iter( + resource_detectors.append( # type: ignore[misc] # + next( # type: ignore[misc] # + iter( # type: ignore[misc] # entry_points( group="opentelemetry_resource_detector", name=resource_detector.strip(), @@ -197,7 +199,7 @@ def create( ) resource = get_aggregated_resources( - resource_detectors, _DEFAULT_RESOURCE + resource_detectors, _DEFAULT_RESOURCE # type: ignore[misc] # ).merge(Resource(attributes, schema_url)) if not resource.attributes.get(SERVICE_NAME, None): @@ -206,7 +208,7 @@ def create( PROCESS_EXECUTABLE_NAME, None ) if process_executable_name: - default_service_name += ":" + process_executable_name + default_service_name += ":" + process_executable_name # type: ignore[misc, operator] # resource = resource.merge( Resource({SERVICE_NAME: default_service_name}, schema_url) ) @@ -218,7 +220,7 @@ def get_empty() -> "Resource": @property def attributes(self) -> Attributes: - return self._attributes + return self._attributes # type: ignore[misc, no-any-return] # @property def schema_url(self) -> str: @@ -264,22 +266,22 @@ def __eq__(self, other: object) -> bool: if not isinstance(other, Resource): return False return ( - self._attributes == other._attributes + self._attributes == other._attributes # type: ignore[misc] # and self._schema_url == other._schema_url ) - def __hash__(self): + def __hash__(self): # type: ignore[no-untyped-def] # return hash( - f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}" + f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}" # type: ignore[misc] # ) - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { - "attributes": dict(self._attributes), + { # type: ignore[misc] # + "attributes": dict(self._attributes), # type: ignore[misc] # "schema_url": self._schema_url, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) @@ -294,8 +296,8 @@ def to_json(self, indent=4) -> str: class ResourceDetector(abc.ABC): - def __init__(self, raise_on_error=False): - self.raise_on_error = raise_on_error + def __init__(self, raise_on_error=False): # type: ignore[no-untyped-def] # + self.raise_on_error = raise_on_error # type: ignore[misc] # @abc.abstractmethod def detect(self) -> "Resource": @@ -326,7 +328,7 @@ def detect(self) -> "Resource": service_name = environ.get(OTEL_SERVICE_NAME) if service_name: env_resource_map[SERVICE_NAME] = service_name - return Resource(env_resource_map) + return Resource(env_resource_map) # type: ignore[arg-type] # class ProcessResourceDetector(ResourceDetector): @@ -366,10 +368,10 @@ def detect(self) -> "Resource": process = psutil.Process() resource_info[PROCESS_OWNER] = process.username() - return Resource(resource_info) + return Resource(resource_info) # type: ignore[arg-type] # -def get_aggregated_resources( +def get_aggregated_resources( # type: ignore[no-untyped-def] # detectors: typing.List["ResourceDetector"], initial_resource: typing.Optional[Resource] = None, timeout=5, @@ -389,18 +391,18 @@ def get_aggregated_resources( detector = detectors[detector_ind] detected_resource: Resource = _EMPTY_RESOURCE try: - detected_resource = future.result(timeout=timeout) + detected_resource = future.result(timeout=timeout) # type: ignore[misc] # except concurrent.futures.TimeoutError as ex: - if detector.raise_on_error: + if detector.raise_on_error: # type: ignore[misc] # raise ex logger.warning( "Detector %s took longer than %s seconds, skipping", detector, - timeout, + timeout, # type: ignore[misc] # ) # pylint: disable=broad-except except Exception as ex: - if detector.raise_on_error: + if detector.raise_on_error: # type: ignore[misc] # raise ex logger.warning( "Exception %s in detector %s, ignoring", ex, detector diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py index ff999b9ba89..81e4f1ac018 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py @@ -43,7 +43,9 @@ from opentelemetry import context as context_api from opentelemetry import trace as trace_api -from opentelemetry.attributes import BoundedAttributes +from opentelemetry.attributes import ( + BoundedAttributes, # type: ignore[attr-defined] # +) from opentelemetry.sdk import util from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, @@ -144,7 +146,7 @@ class SynchronousMultiSpanProcessor(SpanProcessor): _span_processors: Tuple[SpanProcessor, ...] - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # # use a tuple to avoid race conditions when adding a new span and # iterating through it on "on_start" and "on_end". self._span_processors = () @@ -225,7 +227,7 @@ def add_span_processor(self, span_processor: SpanProcessor) -> None: with self._lock: self._span_processors += (span_processor,) - def _submit_and_await( + def _submit_and_await( # type: ignore[no-untyped-def] # self, func: Callable[[SpanProcessor], Callable[..., None]], *args: Any, @@ -233,7 +235,7 @@ def _submit_and_await( ): futures = [] for sp in self._span_processors: - future = self._executor.submit(func(sp), *args, **kwargs) + future = self._executor.submit(func(sp), *args, **kwargs) # type: ignore[misc] # futures.append(future) for future in futures: future.result() @@ -332,19 +334,19 @@ def attributes(self) -> types.Attributes: return self._attributes -def _check_span_ended(func): - def wrapper(self, *args, **kwargs): +def _check_span_ended(func): # type: ignore[no-untyped-def] # + def wrapper(self, *args, **kwargs): # type: ignore[no-untyped-def] # already_ended = False - with self._lock: # pylint: disable=protected-access - if self._end_time is None: # pylint: disable=protected-access - func(self, *args, **kwargs) + with self._lock: # type: ignore[misc] # pylint: disable=protected-access # + if self._end_time is None: # type: ignore[misc] # pylint: disable=protected-access # + func(self, *args, **kwargs) # type: ignore[misc] # else: already_ended = True if already_ended: - logger.warning("Tried calling %s on an ended span.", func.__name__) + logger.warning("Tried calling %s on an ended span.", func.__name__) # type: ignore[misc] # - return wrapper + return wrapper # type: ignore[misc] # class ReadableSpan: @@ -390,8 +392,8 @@ def __init__( @property def dropped_attributes(self) -> int: - if isinstance(self._attributes, BoundedAttributes): - return self._attributes.dropped + if isinstance(self._attributes, BoundedAttributes): # type: ignore[misc] # + return self._attributes.dropped # type: ignore[misc, no-any-return] # return 0 @property @@ -410,11 +412,11 @@ def dropped_links(self) -> int: def name(self) -> str: return self._name - def get_span_context(self): + def get_span_context(self): # type: ignore[no-untyped-def] # return self._context @property - def context(self): + def context(self): # type: ignore[misc, no-untyped-def] # return self._context @property @@ -454,7 +456,7 @@ def resource(self) -> Resource: return self._resource @property - @deprecated( + @deprecated( # type: ignore[misc] # version="1.11.1", reason="You should use instrumentation_scope" ) def instrumentation_info(self) -> Optional[InstrumentationInfo]: @@ -464,7 +466,7 @@ def instrumentation_info(self) -> Optional[InstrumentationInfo]: def instrumentation_scope(self) -> Optional[InstrumentationScope]: return self._instrumentation_scope - def to_json(self, indent: int = 4): + def to_json(self, indent: int = 4): # type: ignore[no-untyped-def] # parent_id = None if self.parent is not None: parent_id = f"0x{trace_api.format_span_id(self.parent.span_id)}" @@ -478,12 +480,12 @@ def to_json(self, indent: int = 4): end_time = util.ns_to_iso_str(self._end_time) status = { - "status_code": str(self._status.status_code.name), + "status_code": str(self._status.status_code.name), # type: ignore[misc] # } if self._status.description: status["description"] = self._status.description - f_span = { + f_span = { # type: ignore[misc] # "name": self._name, "context": self._format_context(self._context) if self._context @@ -493,13 +495,13 @@ def to_json(self, indent: int = 4): "start_time": start_time, "end_time": end_time, "status": status, - "attributes": self._format_attributes(self._attributes), - "events": self._format_events(self._events), - "links": self._format_links(self._links), - "resource": json.loads(self.resource.to_json()), + "attributes": self._format_attributes(self._attributes), # type: ignore[misc] # + "events": self._format_events(self._events), # type: ignore[misc] # + "links": self._format_links(self._links), # type: ignore[misc] # + "resource": json.loads(self.resource.to_json()), # type: ignore[misc] # } - return json.dumps(f_span, indent=indent) + return json.dumps(f_span, indent=indent) # type: ignore[misc] # @staticmethod def _format_context(context: SpanContext) -> Dict[str, str]: @@ -510,20 +512,20 @@ def _format_context(context: SpanContext) -> Dict[str, str]: } @staticmethod - def _format_attributes( + def _format_attributes( # type: ignore[misc] # attributes: types.Attributes, ) -> Optional[Dict[str, Any]]: if attributes is not None and not isinstance(attributes, dict): - return dict(attributes) + return dict(attributes) # type: ignore[misc] # return attributes @staticmethod - def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]: - return [ - { + def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]: # type: ignore[misc] # + return [ # type: ignore[misc] # + { # type: ignore[misc] # "name": event.name, "timestamp": util.ns_to_iso_str(event.timestamp), - "attributes": Span._format_attributes( # pylint: disable=protected-access + "attributes": Span._format_attributes( # type: ignore[misc] # pylint: disable=protected-access # event.attributes ), } @@ -531,13 +533,13 @@ def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]: ] @staticmethod - def _format_links(links: Sequence[trace_api.Link]) -> List[Dict[str, Any]]: - return [ - { - "context": Span._format_context( # pylint: disable=protected-access + def _format_links(links: Sequence[trace_api.Link]) -> List[Dict[str, Any]]: # type: ignore[misc] # + return [ # type: ignore[misc] # + { # type: ignore[misc] # + "context": Span._format_context( # type: ignore[misc] # pylint: disable=protected-access # link.context ), - "attributes": Span._format_attributes( # pylint: disable=protected-access + "attributes": Span._format_attributes( # type: ignore[misc] # pylint: disable=protected-access # link.attributes ), } @@ -658,7 +660,7 @@ def __init__( self.max_attribute_length, ) - def __repr__(self): + def __repr__(self): # type: ignore[no-untyped-def] # return f"{type(self).__name__}(max_span_attributes={self.max_span_attributes}, max_events_attributes={self.max_event_attributes}, max_link_attributes={self.max_link_attributes}, max_attributes={self.max_attributes}, max_events={self.max_events}, max_links={self.max_links}, max_attribute_length={self.max_attribute_length})" @classmethod @@ -733,13 +735,13 @@ class Span(trace_api.Span, ReadableSpan): limits: `SpanLimits` instance that was passed to the `TracerProvider` """ - def __new__(cls, *args, **kwargs): - if cls is Span: + def __new__(cls, *args, **kwargs): # type: ignore[no-untyped-def] # + if cls is Span: # type: ignore[misc] # raise TypeError("Span must be instantiated via a tracer.") return super().__new__(cls) # pylint: disable=too-many-locals - def __init__( + def __init__( # type: ignore[no-untyped-def] # self, name: str, context: trace_api.SpanContext, @@ -774,45 +776,45 @@ def __init__( self._record_exception = record_exception self._set_status_on_exception = set_status_on_exception self._span_processor = span_processor - self._limits = limits + self._limits = limits # type: ignore[misc] # self._lock = threading.Lock() self._attributes = BoundedAttributes( - self._limits.max_span_attributes, + self._limits.max_span_attributes, # type: ignore[misc] # attributes, immutable=False, - max_value_len=self._limits.max_span_attribute_length, + max_value_len=self._limits.max_span_attribute_length, # type: ignore[misc] # ) - self._events = self._new_events() + self._events = self._new_events() # type: ignore[no-untyped-call] # if events: for event in events: event._attributes = BoundedAttributes( - self._limits.max_event_attributes, + self._limits.max_event_attributes, # type: ignore[misc] # event.attributes, - max_value_len=self._limits.max_attribute_length, + max_value_len=self._limits.max_attribute_length, # type: ignore[misc] # ) - self._events.append(event) + self._events.append(event) # type: ignore[attr-defined] # if links is None: self._links = self._new_links() else: for link in links: link._attributes = BoundedAttributes( - self._limits.max_link_attributes, + self._limits.max_link_attributes, # type: ignore[misc] # link.attributes, - max_value_len=self._limits.max_attribute_length, + max_value_len=self._limits.max_attribute_length, # type: ignore[misc] # ) - self._links = BoundedList.from_seq(self._limits.max_links, links) + self._links = BoundedList.from_seq(self._limits.max_links, links) # type: ignore[misc] # - def __repr__(self): + def __repr__(self): # type: ignore[no-untyped-def] # return f'{type(self).__name__}(name="{self._name}", context={self._context})' - def _new_events(self): - return BoundedList(self._limits.max_events) + def _new_events(self): # type: ignore[no-untyped-def] # + return BoundedList(self._limits.max_events) # type: ignore[misc] # - def _new_links(self): - return BoundedList(self._limits.max_links) + def _new_links(self): # type: ignore[no-untyped-def] # + return BoundedList(self._limits.max_links) # type: ignore[misc] # - def get_span_context(self): + def get_span_context(self): # type: ignore[no-untyped-def] # return self._context def set_attributes( @@ -824,14 +826,14 @@ def set_attributes( return for key, value in attributes.items(): - self._attributes[key] = value + self._attributes[key] = value # type: ignore[index] # def set_attribute(self, key: str, value: types.AttributeValue) -> None: return self.set_attributes({key: value}) - @_check_span_ended + @_check_span_ended # type: ignore[misc] # def _add_event(self, event: EventBase) -> None: - self._events.append(event) + self._events.append(event) # type: ignore[attr-defined] # def add_event( self, @@ -840,9 +842,9 @@ def add_event( timestamp: Optional[int] = None, ) -> None: attributes = BoundedAttributes( - self._limits.max_event_attributes, + self._limits.max_event_attributes, # type: ignore[misc] # attributes, - max_value_len=self._limits.max_attribute_length, + max_value_len=self._limits.max_attribute_length, # type: ignore[misc] # ) self._add_event( Event( @@ -852,9 +854,9 @@ def add_event( ) ) - @_check_span_ended + @_check_span_ended # type: ignore[misc] # def _add_link(self, link: trace_api.Link) -> None: - self._links.append(link) + self._links.append(link) # type: ignore[attr-defined] # def add_link( self, @@ -865,9 +867,9 @@ def add_link( return attributes = BoundedAttributes( - self._limits.max_link_attributes, + self._limits.max_link_attributes, # type: ignore[misc] # attributes, - max_value_len=self._limits.max_attribute_length, + max_value_len=self._limits.max_attribute_length, # type: ignore[misc] # ) self._add_link( trace_api.Link( @@ -920,14 +922,14 @@ def end(self, end_time: Optional[int] = None) -> None: self._span_processor.on_end(self._readable_span()) - @_check_span_ended + @_check_span_ended # type: ignore[misc] # def update_name(self, name: str) -> None: self._name = name def is_recording(self) -> bool: return self._end_time is None - @_check_span_ended + @_check_span_ended # type: ignore[misc] # def set_status( self, status: typing.Union[Status, StatusCode], @@ -968,14 +970,14 @@ def __exit__( # Record the exception as an event # pylint:disable=protected-access if self._record_exception: - self.record_exception(exception=exc_val, escaped=True) + self.record_exception(exception=exc_val, escaped=True) # type: ignore[arg-type] # # Records status if span is used as context manager # i.e. with tracer.start_span() as span: if self._set_status_on_exception: self.set_status( Status( status_code=StatusCode.ERROR, - description=f"{exc_type.__name__}: {exc_val}", + description=f"{exc_type.__name__}: {exc_val}", # type: ignore[misc, union-attr] # ) ) @@ -1095,7 +1097,7 @@ def start_span( # pylint: disable=too-many-locals # is_valid determines root span if parent_span_context is None or not parent_span_context.is_valid: - parent_span_context = None + parent_span_context = None # type: ignore[assignment] # trace_id = self.id_generator.generate_trace_id() else: trace_id = parent_span_context.trace_id @@ -1112,7 +1114,7 @@ def start_span( # pylint: disable=too-many-locals trace_flags = ( trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED) - if sampling_result.decision.is_sampled() + if sampling_result.decision.is_sampled() # type: ignore[misc, no-untyped-call] # else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT) ) span_context = trace_api.SpanContext( @@ -1124,7 +1126,7 @@ def start_span( # pylint: disable=too-many-locals ) # Only record if is_recording() is true - if sampling_result.decision.is_recording(): + if sampling_result.decision.is_recording(): # type: ignore[misc, no-untyped-call] # # pylint:disable=protected-access span = _Span( name=name, @@ -1132,10 +1134,10 @@ def start_span( # pylint: disable=too-many-locals parent=parent_span_context, sampler=self.sampler, resource=self.resource, - attributes=sampling_result.attributes.copy(), + attributes=sampling_result.attributes.copy(), # type: ignore[misc] # span_processor=self.span_processor, kind=kind, - links=links, + links=links, # type: ignore[arg-type] # instrumentation_info=self.instrumentation_info, record_exception=record_exception, set_status_on_exception=set_status_on_exception, @@ -1144,7 +1146,7 @@ def start_span( # pylint: disable=too-many-locals ) span.start(start_time=start_time, parent_context=context) else: - span = trace_api.NonRecordingSpan(context=span_context) + span = trace_api.NonRecordingSpan(context=span_context) # type: ignore[assignment] # return span @@ -1163,12 +1165,12 @@ def __init__( span_limits: Optional[SpanLimits] = None, ) -> None: self._active_span_processor = ( - active_span_processor or SynchronousMultiSpanProcessor() + active_span_processor or SynchronousMultiSpanProcessor() # type: ignore[no-untyped-call] # ) if id_generator is None: self.id_generator = RandomIdGenerator() else: - self.id_generator = id_generator + self.id_generator = id_generator # type: ignore[assignment] # if resource is None: self._resource = Resource.create({}) else: @@ -1182,7 +1184,7 @@ def __init__( self._atexit_handler = None if shutdown_on_exit: - self._atexit_handler = atexit.register(self.shutdown) + self._atexit_handler = atexit.register(self.shutdown) # type: ignore[misc] # @property def resource(self) -> Resource: @@ -1243,11 +1245,11 @@ def add_span_processor(self, span_processor: SpanProcessor) -> None: # SynchronousMultiSpanProcessor and ConcurrentMultiSpanProcessor. self._active_span_processor.add_span_processor(span_processor) - def shutdown(self): + def shutdown(self): # type: ignore[no-untyped-def] # """Shut down the span processors added to the tracer provider.""" self._active_span_processor.shutdown() - if self._atexit_handler is not None: - atexit.unregister(self._atexit_handler) + if self._atexit_handler is not None: # type: ignore[misc] # + atexit.unregister(self._atexit_handler) # type: ignore[misc] # self._atexit_handler = None def force_flush(self, timeout_millis: int = 30000) -> bool: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py index a4a9958343e..8704fa8b254 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py @@ -106,7 +106,7 @@ def on_start( pass def on_end(self, span: ReadableSpan) -> None: - if not span.context.trace_flags.sampled: + if not span.context.trace_flags.sampled: # type: ignore[misc] # return token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) try: @@ -129,7 +129,7 @@ class _FlushRequest: __slots__ = ["event", "num_spans"] - def __init__(self): + def __init__(self): # type: ignore[no-untyped-def] # self.event = threading.Event() self.num_spans = 0 @@ -155,10 +155,10 @@ class BatchSpanProcessor(SpanProcessor): def __init__( self, span_exporter: SpanExporter, - max_queue_size: int = None, - schedule_delay_millis: float = None, - max_export_batch_size: int = None, - export_timeout_millis: float = None, + max_queue_size: int = None, # type: ignore[assignment] # + schedule_delay_millis: float = None, # type: ignore[assignment] # + max_export_batch_size: int = None, # type: ignore[assignment] # + export_timeout_millis: float = None, # type: ignore[assignment] # ): if max_queue_size is None: max_queue_size = BatchSpanProcessor._default_max_queue_size() @@ -178,7 +178,7 @@ def __init__( BatchSpanProcessor._default_export_timeout_millis() ) - BatchSpanProcessor._validate_arguments( + BatchSpanProcessor._validate_arguments( # type: ignore[no-untyped-call] # max_queue_size, schedule_delay_millis, max_export_batch_size ) @@ -187,7 +187,7 @@ def __init__( [], max_queue_size ) # type: typing.Deque[Span] self.worker_thread = threading.Thread( - name="OtelBatchSpanProcessor", target=self.worker, daemon=True + name="OtelBatchSpanProcessor", target=self.worker, daemon=True # type: ignore[misc] # ) self.condition = threading.Condition(threading.Lock()) self._flush_request = None # type: typing.Optional[_FlushRequest] @@ -205,7 +205,7 @@ def __init__( self.worker_thread.start() if hasattr(os, "register_at_fork"): os.register_at_fork( - after_in_child=self._at_fork_reinit + after_in_child=self._at_fork_reinit # type: ignore[misc] # ) # pylint: disable=protected-access self._pid = os.getpid() @@ -218,35 +218,35 @@ def on_end(self, span: ReadableSpan) -> None: if self.done: logger.warning("Already shutdown, dropping span.") return - if not span.context.trace_flags.sampled: + if not span.context.trace_flags.sampled: # type: ignore[misc] # return if self._pid != os.getpid(): - _BSP_RESET_ONCE.do_once(self._at_fork_reinit) + _BSP_RESET_ONCE.do_once(self._at_fork_reinit) # type: ignore[misc] # if len(self.queue) == self.max_queue_size: if not self._spans_dropped: logger.warning("Queue is full, likely spans will be dropped.") self._spans_dropped = True - self.queue.appendleft(span) + self.queue.appendleft(span) # type: ignore[arg-type] # if len(self.queue) >= self.max_export_batch_size: with self.condition: self.condition.notify() - def _at_fork_reinit(self): + def _at_fork_reinit(self): # type: ignore[no-untyped-def] # self.condition = threading.Condition(threading.Lock()) self.queue.clear() # worker_thread is local to a process, only the thread that issued fork continues # to exist. A new worker thread must be started in child process. self.worker_thread = threading.Thread( - name="OtelBatchSpanProcessor", target=self.worker, daemon=True + name="OtelBatchSpanProcessor", target=self.worker, daemon=True # type: ignore[misc] # ) self.worker_thread.start() self._pid = os.getpid() - def worker(self): + def worker(self): # type: ignore[no-untyped-def] # timeout = self.schedule_delay_millis / 1e3 flush_request = None # type: typing.Optional[_FlushRequest] while not self.done: @@ -288,7 +288,7 @@ def worker(self): shutdown_flush_request = self._get_and_unset_flush_request() # be sure that all spans are sent - self._drain_queue() + self._drain_queue() # type: ignore[no-untyped-call] # self._notify_flush_request_finished(flush_request) self._notify_flush_request_finished(shutdown_flush_request) @@ -305,7 +305,7 @@ def _get_and_unset_flush_request( return flush_request @staticmethod - def _notify_flush_request_finished( + def _notify_flush_request_finished( # type: ignore[misc, no-untyped-def] # flush_request: typing.Optional[_FlushRequest], ): """Notifies the flush initiator(s) waiting on the given request/event @@ -326,10 +326,10 @@ def _get_or_create_flush_request(self) -> _FlushRequest: synchronization/locking. """ if self._flush_request is None: - self._flush_request = _FlushRequest() + self._flush_request = _FlushRequest() # type: ignore[no-untyped-call] # return self._flush_request - def _export(self, flush_request: typing.Optional[_FlushRequest]): + def _export(self, flush_request: typing.Optional[_FlushRequest]): # type: ignore[no-untyped-def] # """Exports spans considering the given flush_request. In case of a given flush_requests spans are exported in batches until @@ -374,7 +374,7 @@ def _export_batch(self) -> int: self.spans_list[index] = None return idx - def _drain_queue(self): + def _drain_queue(self): # type: ignore[no-untyped-def] # """Export all elements until queue is empty. Can only be called from the worker thread context because it invokes @@ -383,7 +383,7 @@ def _drain_queue(self): while self.queue: self._export_batch() - def force_flush(self, timeout_millis: int = None) -> bool: + def force_flush(self, timeout_millis: int = None) -> bool: # type: ignore[assignment] # if timeout_millis is None: timeout_millis = self.export_timeout_millis @@ -412,7 +412,7 @@ def shutdown(self) -> None: self.span_exporter.shutdown() @staticmethod - def _default_max_queue_size(): + def _default_max_queue_size(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get(OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) @@ -426,7 +426,7 @@ def _default_max_queue_size(): return _DEFAULT_MAX_QUEUE_SIZE @staticmethod - def _default_schedule_delay_millis(): + def _default_schedule_delay_millis(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -442,7 +442,7 @@ def _default_schedule_delay_millis(): return _DEFAULT_SCHEDULE_DELAY_MILLIS @staticmethod - def _default_max_export_batch_size(): + def _default_max_export_batch_size(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -459,7 +459,7 @@ def _default_max_export_batch_size(): return _DEFAULT_MAX_EXPORT_BATCH_SIZE @staticmethod - def _default_export_timeout_millis(): + def _default_export_timeout_millis(): # type: ignore[misc, no-untyped-def] # try: return int( environ.get( @@ -475,21 +475,21 @@ def _default_export_timeout_millis(): return _DEFAULT_EXPORT_TIMEOUT_MILLIS @staticmethod - def _validate_arguments( + def _validate_arguments( # type: ignore[misc, no-untyped-def] # max_queue_size, schedule_delay_millis, max_export_batch_size ): - if max_queue_size <= 0: + if max_queue_size <= 0: # type: ignore[misc] # raise ValueError("max_queue_size must be a positive integer.") - if schedule_delay_millis <= 0: + if schedule_delay_millis <= 0: # type: ignore[misc] # raise ValueError("schedule_delay_millis must be positive.") - if max_export_batch_size <= 0: + if max_export_batch_size <= 0: # type: ignore[misc] # raise ValueError( "max_export_batch_size must be a positive integer." ) - if max_export_batch_size > max_queue_size: + if max_export_batch_size > max_queue_size: # type: ignore[misc] # raise ValueError( "max_export_batch_size must be less than or equal to max_queue_size." ) @@ -506,20 +506,20 @@ class ConsoleSpanExporter(SpanExporter): def __init__( self, service_name: Optional[str] = None, - out: typing.IO = sys.stdout, + out: typing.IO = sys.stdout, # type: ignore[type-arg] # formatter: typing.Callable[ [ReadableSpan], str - ] = lambda span: span.to_json() + ] = lambda span: span.to_json() # type: ignore[misc, no-any-return] # + linesep, ): - self.out = out + self.out = out # type: ignore[misc] # self.formatter = formatter self.service_name = service_name def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: for span in spans: - self.out.write(self.formatter(span)) - self.out.flush() + self.out.write(self.formatter(span)) # type: ignore[misc] # + self.out.flush() # type: ignore[misc] # return SpanExportResult.SUCCESS def force_flush(self, timeout_millis: int = 30000) -> bool: diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py index 40e142ea1a9..06cd4a9ff82 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py @@ -159,10 +159,10 @@ class Decision(enum.Enum): # IsRecording() == true AND Sampled flag` MUST be set. RECORD_AND_SAMPLE = 2 - def is_recording(self): + def is_recording(self): # type: ignore[no-untyped-def] # return self in (Decision.RECORD_ONLY, Decision.RECORD_AND_SAMPLE) - def is_sampled(self): + def is_sampled(self): # type: ignore[no-untyped-def] # return self is Decision.RECORD_AND_SAMPLE @@ -178,7 +178,7 @@ class SamplingResult: """ def __repr__(self) -> str: - return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})" + return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})" # type: ignore[misc] # def __init__( self, @@ -188,7 +188,7 @@ def __init__( ) -> None: self.decision = decision if attributes is None: - self.attributes = MappingProxyType({}) + self.attributes = MappingProxyType({}) # type: ignore[var-annotated] # else: self.attributes = MappingProxyType(attributes) self.trace_state = trace_state @@ -372,7 +372,7 @@ def should_sample( links=links, ) - def get_description(self): + def get_description(self): # type: ignore[no-untyped-def] # return f"ParentBased{{root:{self._root.get_description()},remoteParentSampled:{self._remote_parent_sampled.get_description()},remoteParentNotSampled:{self._remote_parent_not_sampled.get_description()},localParentSampled:{self._local_parent_sampled.get_description()},localParentNotSampled:{self._local_parent_not_sampled.get_description()}}}" @@ -395,22 +395,22 @@ def __init__(self, rate: float): class _AlwaysOff(StaticSampler): - def __init__(self, _): + def __init__(self, _): # type: ignore[no-untyped-def] # super().__init__(Decision.DROP) class _AlwaysOn(StaticSampler): - def __init__(self, _): + def __init__(self, _): # type: ignore[no-untyped-def] # super().__init__(Decision.RECORD_AND_SAMPLE) class _ParentBasedAlwaysOff(ParentBased): - def __init__(self, _): + def __init__(self, _): # type: ignore[no-untyped-def] # super().__init__(ALWAYS_OFF) class _ParentBasedAlwaysOn(ParentBased): - def __init__(self, _): + def __init__(self, _): # type: ignore[no-untyped-def] # super().__init__(ALWAYS_ON) @@ -434,13 +434,13 @@ def _get_from_env_or_default() -> Sampler: if trace_sampler in ("traceidratio", "parentbased_traceidratio"): try: - rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) + rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) # type: ignore[arg-type] # except (ValueError, TypeError): _logger.warning("Could not convert TRACES_SAMPLER_ARG to float.") rate = 1.0 - return _KNOWN_SAMPLERS[trace_sampler](rate) + return _KNOWN_SAMPLERS[trace_sampler](rate) # type: ignore[misc, no-any-return, operator] # - return _KNOWN_SAMPLERS[trace_sampler] + return _KNOWN_SAMPLERS[trace_sampler] # type: ignore[return-value] # def _get_parent_trace_state( diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.pyi b/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.pyi index d42e0f018fa..d8a351a04c1 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.pyi +++ b/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.pyi @@ -48,8 +48,8 @@ class BoundedList(Sequence[_T]): @overload def __getitem__(self, s: slice) -> Sequence[_T]: ... def __len__(self) -> int: ... - def append(self, item: _T): ... - def extend(self, seq: Sequence[_T]): ... + def append(self, item: _T): ... # type: ignore[no-untyped-def] # + def extend(self, seq: Sequence[_T]): ... # type: ignore[no-untyped-def] # @classmethod def from_seq(cls, maxlen: int, seq: Iterable[_T]) -> BoundedList[_T]: ... diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py index 085d3fd874f..321ff5c45c1 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py @@ -26,7 +26,7 @@ class InstrumentationInfo: __slots__ = ("_name", "_version", "_schema_url") - @deprecated(version="1.11.1", reason="You should use InstrumentationScope") + @deprecated(version="1.11.1", reason="You should use InstrumentationScope") # type: ignore[misc] # def __init__( self, name: str, @@ -39,26 +39,30 @@ def __init__( schema_url = "" self._schema_url = schema_url - def __repr__(self): + def __repr__(self): # type: ignore[no-untyped-def] # return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})" - def __hash__(self): + def __hash__(self): # type: ignore[no-untyped-def] # return hash((self._name, self._version, self._schema_url)) - def __eq__(self, value): - return type(value) is type(self) and ( + def __eq__(self, value): # type: ignore[no-untyped-def] # + return type(value) is type(self) and ( # type: ignore[misc] # self._name, self._version, self._schema_url, - ) == (value._name, value._version, value._schema_url) - - def __lt__(self, value): - if type(value) is not type(self): - return NotImplemented - return (self._name, self._version, self._schema_url) < ( + ) == ( value._name, value._version, value._schema_url, + ) # type: ignore[misc] # + + def __lt__(self, value): # type: ignore[no-untyped-def] # + if type(value) is not type(self): # type: ignore[misc] # + return NotImplemented + return (self._name, self._version, self._schema_url) < ( # type: ignore[misc] # + value._name, # type: ignore[misc] # + value._version, # type: ignore[misc] # + value._schema_url, # type: ignore[misc] # ) @property @@ -132,12 +136,12 @@ def version(self) -> Optional[str]: def name(self) -> str: return self._name - def to_json(self, indent=4) -> str: + def to_json(self, indent=4) -> str: # type: ignore[no-untyped-def] # return dumps( - { + { # type: ignore[misc] # "name": self._name, "version": self._version, "schema_url": self._schema_url, }, - indent=indent, + indent=indent, # type: ignore[misc] # ) diff --git a/tests/opentelemetry-test-utils/src/opentelemetry/test/spantestutil.py b/tests/opentelemetry-test-utils/src/opentelemetry/test/spantestutil.py index 912de9ee031..c2dfd66f115 100644 --- a/tests/opentelemetry-test-utils/src/opentelemetry/test/spantestutil.py +++ b/tests/opentelemetry-test-utils/src/opentelemetry/test/spantestutil.py @@ -19,10 +19,10 @@ from opentelemetry.sdk.trace import Resource -def new_tracer(span_limits=None, resource=None) -> trace_api.Tracer: +def new_tracer(span_limits=None, resource=None) -> trace_api.Tracer: # type: ignore[no-untyped-def] # provider_factory = trace_sdk.TracerProvider if resource is not None: - provider_factory = partial(provider_factory, resource=resource) + provider_factory = partial(provider_factory, resource=resource) # type: ignore[assignment] # return provider_factory(span_limits=span_limits).get_tracer(__name__) @@ -34,7 +34,7 @@ def get_span_with_dropped_attributes_events_links(): for index in range(129): links.append( trace_api.Link( - trace_sdk._Span( + trace_sdk._Span( # type: ignore[no-untyped-call] # name=f"span{index}", context=trace_api.INVALID_SPAN_CONTEXT, attributes=attributes, @@ -45,7 +45,7 @@ def get_span_with_dropped_attributes_events_links(): tracer = new_tracer( span_limits=trace_sdk.SpanLimits(), - resource=Resource(attributes=attributes), + resource=Resource(attributes=attributes), # type: ignore[arg-type] # ) with tracer.start_as_current_span( "span", links=links, attributes=attributes diff --git a/tox.ini b/tox.ini index 2b82d804dc9..205dd2fa7e7 100644 --- a/tox.ini +++ b/tox.ini @@ -82,19 +82,21 @@ deps = opentelemetry: flaky coverage: pytest coverage: pytest-cov - mypy,mypyinstalled: mypy + mypy,mypysdk,mypyinstalled: mypy + mypysdk: types-psutil + mypysdk: -r opentelemetry-sdk/test-requirements.txt ; proto 3 and 4 tests install the respective version of protobuf proto3: protobuf~=3.19.0 proto4: protobuf~=4.0 - setenv = ; override CONTRIB_REPO_SHA via env variable when testing other branches/commits than main ; i.e: CONTRIB_REPO_SHA=dde62cebffe519c35875af6d06fae053b3be65ec tox -e CONTRIB_REPO_SHA={env:CONTRIB_REPO_SHA:main} CONTRIB_REPO=git+https://github.com/open-telemetry/opentelemetry-python-contrib.git@{env:CONTRIB_REPO_SHA} mypy: MYPYPATH={toxinidir}/opentelemetry-api/src/:{toxinidir}/tests/opentelemetry-test-utils/src/ + mypysdk: MYPYPATH={toxinidir}/opentelemetry-sdk/src/:{toxinidir}/tests/opentelemetry-test-utils/src/:{toxinidir}/opentelemetry-semantic-conventions/src/:{toxinidir}/opentelemetry-semantic-conventions/src/ commands_pre = ; Install without -e to test the actual installation @@ -181,10 +183,14 @@ commands = coverage: {toxinidir}/scripts/coverage.sh mypy: mypy --install-types --non-interactive --namespace-packages --explicit-package-bases opentelemetry-api/src/opentelemetry/ - ; For test code, we don't want to enforce the full mypy strictness mypy: mypy --install-types --non-interactive --namespace-packages --config-file=mypy-relaxed.ini opentelemetry-api/tests/ + mypysdk: mypy --install-types --non-interactive --namespace-packages --explicit-package-bases opentelemetry-sdk/src/opentelemetry/ {posargs} +; For test code, we don't want to enforce the full mypy strictness +; TODO: turn on for tests +; mypysdk: mypy --install-types --non-interactive --namespace-packages --config-file=mypy-relaxed.ini opentelemetry-sdk/tests/ + ; Test that mypy can pick up typeinfo from an installed package (otherwise, ; implicit Any due to unfollowed import would result). mypyinstalled: mypy --install-types --non-interactive --namespace-packages opentelemetry-api/tests/mypysmoke.py --strict