From 0ab939366af4a50d223308a5b702c93913eeb32a Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Thu, 19 Mar 2026 15:27:40 -0400 Subject: [PATCH 01/10] chore: Remove the dual_context fixture for scenario tests --- interfaces/otlp/README.md | 45 ++- .../src/charmlibs/interfaces/otlp/__init__.py | 32 +- .../src/charmlibs/interfaces/otlp/_otlp.py | 18 +- interfaces/otlp/tests/unit/conftest.py | 135 +++++---- interfaces/otlp/tests/unit/test_endpoints.py | 104 +++---- interfaces/otlp/tests/unit/test_rules.py | 286 ++++++++---------- 6 files changed, 273 insertions(+), 347 deletions(-) diff --git a/interfaces/otlp/README.md b/interfaces/otlp/README.md index 7091f0b92..33a15d7a2 100644 --- a/interfaces/otlp/README.md +++ b/interfaces/otlp/README.md @@ -26,25 +26,22 @@ from charmlibs.interfaces.otlp import OtlpProvider class MyOtlpServer(CharmBase): def __init__(self, *args): super().__init__(*args) - self.otlp_provider = OtlpProvider(self) self.framework.observe(self.on.ingress_ready, self._on_ingress_ready) def _on_ingress_ready(self, event): - self.otlp_provider.add_endpoint( - protocol="grpc", - endpoint="https://my-app.ingress:4317", - telemetries=["logs", "metrics"], - ) - self.otlp_provider.add_endpoint( - protocol="http", - endpoint="https://my-app.ingress:4318", - telemetries=["traces"], - ) - # publish the registered endpoints to the relation databag - self.otlp_provider.publish() + OtlpProvider(self).add_endpoint( + protocol="grpc", + endpoint="https://my-app.ingress:4317", + telemetries=["logs", "metrics"], + ).add_endpoint( + protocol="http", + endpoint="https://my-app.ingress:4318", + telemetries=["traces"], + ).publish() + # optionally, get the alerting and recording rules - promql_rules = self.otlp_provider.rules("promql") - logql_rules = self.otlp_provider.rules("logql") + promql_rules = OtlpProvider(self).rules("promql") + logql_rules = OtlpProvider(self).rules("logql") ``` ### Requirer Side @@ -53,22 +50,18 @@ class MyOtlpServer(CharmBase): from charmlibs.interfaces.otlp import OtlpRequirer class MyOtlpSender(CharmBase): - def __init__(self, *args): - super().__init__(*args) - self.otlp_requirer = OtlpRequirer( + def __init__(self, framework: ops.Framework): + super().__init__(framework) + self.framework.observe(self.on.update_status, self._publish_rules) + + def _publish_rules(self, _: ops.EventBase) -> None: + OtlpRequirer( self, protocols=["grpc", "http"], telemetries=["logs", "metrics", "traces"], loki_rules_path="./src/loki_alert_rules", prometheus_rules_path="./src/prometheus_alert_rules", - ) - self.framework.observe(self.on.update_status, self._reconcile) - - def _reconcile(self, event): - # publish the rules to the relation databag - self.otlp_requirer.publish() - # get the endpoints from the provider - supported_endpoints = self.otlp_requirer.endpoints + ).publish() ``` ## Documentation diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index 2896f84c7..6a67e8aa5 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -38,21 +38,18 @@ class MyOtlpServer(CharmBase): def __init__(self, *args): super().__init__(*args) - self.otlp_provider = OtlpProvider(self) self.framework.observe(self.on.ingress_ready, self._on_ingress_ready) def _on_ingress_ready(self, event): - self.otlp_provider.add_endpoint( + OtlpProvider(self).add_endpoint( protocol="grpc", endpoint="https://my-app.ingress:4317", telemetries=["logs", "metrics"], - ) - self.otlp_provider.add_endpoint( + ).add_endpoint( protocol="http", endpoint="https://my-app.ingress:4318", telemetries=["traces"], - ) - self.otlp_provider.publish() + ).publish() Providers add endpoints explicitly; nothing is auto-published by default. Make sure to add endpoints and publish them after the charm's endpoint details have been updated e.g., ingress or @@ -62,8 +59,8 @@ def _on_ingress_ready(self, event): the ``rules()`` method:: # snip ... - promql_rules = self.otlp_provider.rules("promql") - logql_rules = self.otlp_provider.rules("logql") + promql_rules = OtlpProvider(self).rules("promql") + logql_rules = OtlpProvider(self).rules("logql") Requirer Side (Charms requiring OTLP endpoints) ----------------------------------------------- @@ -74,19 +71,18 @@ def _on_ingress_ready(self, event): from charmlibs.interfaces.otlp import OtlpRequirer class MyOtlpSender(CharmBase): - def __init__(self, *args): - super().__init__(*args) - self.otlp_requirer = OtlpRequirer( + def __init__(self, framework: ops.Framework): + super().__init__(framework) + self.framework.observe(self.on.update_status, self._publish_rules) + + def _publish_rules(self, _: ops.EventBase) -> None: + OtlpRequirer( self, protocols=["grpc", "http"], telemetries=["logs", "metrics", "traces"], loki_rules_path="./src/loki_alert_rules", prometheus_rules_path="./src/prometheus_alert_rules", - ) - self.framework.observe(self.on.update_status, self._reconcile) - - def _reconcile(self, event): - supported_endpoints = self.otlp_requirer.endpoints + ).publish() Given the defined, supported protocols and telemetries, the OtlpRequirer will filter out unsupported endpoints and prune unsupported telemetries. After filtering, requirer selection @@ -99,7 +95,7 @@ def _reconcile(self, event): method:: # snip ... - self.otlp_requirer.publish() + OtlpRequirer(...).publish() It is the charm's responsibility to manage the rules in the ``loki_rules_path`` and ``prometheus_rules_path`` directories, which will be forwarded to the related OtlpProvider charms. @@ -135,7 +131,7 @@ def _reconcile(self, event): "model": "my-model", "model_uuid": "f4d59020-c8e7-4053-8044-a2c1e5591c7f", "application": "my-app", - "charm": "my-charm", + "charm_name": "my-charm", "unit": "my-charm/0", } """ diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py index b1d067d75..100267a3c 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py @@ -80,7 +80,7 @@ class _OtlpEndpoint(OtlpEndpoint): """A pydantic model for a single OTLP endpoint.""" -class OtlpProviderAppData(BaseModel): +class _OtlpProviderAppData(BaseModel): """A pydantic model for the OTLP provider's app databag.""" endpoints: list[_OtlpEndpoint] = Field( @@ -88,7 +88,7 @@ class OtlpProviderAppData(BaseModel): ) -class OtlpRequirerAppData(BaseModel): +class _OtlpRequirerAppData(BaseModel): """A pydantic model for the OTLP requirer's app databag. The rules are compressed when saved to databag to avoid hitting databag @@ -234,7 +234,7 @@ def publish(self): prom_rules.add_path(self._prom_rules_path, recursive=True) # Publish to databag - databag = OtlpRequirerAppData.model_validate({ + databag = _OtlpRequirerAppData.model_validate({ 'rules': {'logql': loki_rules.as_dict(), 'promql': prom_rules.as_dict()}, 'metadata': self._topology.as_dict(), }) @@ -259,11 +259,10 @@ def endpoints(self) -> dict[int, OtlpEndpoint]: continue try: - provider = relation.load(OtlpProviderAppData, relation.app) + provider = relation.load(_OtlpProviderAppData, relation.app) except ValidationError as e: logger.error('OTLP databag failed validation: %s', e) continue - if endpoints := self._filter_endpoints(provider.endpoints): endpoint_map[relation.id] = self._favor_modern_endpoints(endpoints) @@ -293,7 +292,7 @@ def add_endpoint( protocol: Literal['http', 'grpc'], endpoint: str, telemetries: Sequence[Literal['logs', 'metrics', 'traces']], - ): + ) -> 'OtlpProvider': """Add an OtlpEndpoint to the list of endpoints to publish. Call this method after endpoint-changing events e.g. TLS and ingress. @@ -301,6 +300,7 @@ def add_endpoint( self._endpoints.append( _OtlpEndpoint(protocol=protocol, endpoint=endpoint, telemetries=telemetries) ) + return self def publish(self) -> None: """Triggers programmatically the update of the relation data.""" @@ -308,11 +308,11 @@ def publish(self) -> None: # Only the leader unit can write to app data. return - databag = OtlpProviderAppData.model_validate({'endpoints': self._endpoints}) + databag = _OtlpProviderAppData.model_validate({'endpoints': self._endpoints}) for relation in self._charm.model.relations[self._relation_name]: relation.save(databag, self._charm.app) - def rules(self, query_type: Literal['logql', 'promql']): + def rules(self, query_type: Literal['logql', 'promql']) -> dict[str, dict[str, Any]]: """Fetch rules for all relations of the desired query and rule types. This method returns all rules of the desired query and rule types @@ -336,7 +336,7 @@ def rules(self, query_type: Literal['logql', 'promql']): continue try: - requirer = relation.load(OtlpRequirerAppData, relation.app) + requirer = relation.load(_OtlpRequirerAppData, relation.app) except ValidationError as e: logger.error('OTLP databag failed validation: %s', e) continue diff --git a/interfaces/otlp/tests/unit/conftest.py b/interfaces/otlp/tests/unit/conftest.py index a7c8bb5ca..211cad1a2 100644 --- a/interfaces/otlp/tests/unit/conftest.py +++ b/interfaces/otlp/tests/unit/conftest.py @@ -18,7 +18,7 @@ import logging import socket -from typing import cast +from typing import Final, Literal from unittest.mock import patch import ops @@ -27,73 +27,96 @@ from ops.charm import CharmBase from charmlibs.interfaces.otlp import OtlpProvider, OtlpRequirer +from charmlibs.interfaces.otlp._otlp import DEFAULT_REQUIRER_RELATION_NAME as SEND from helpers import add_alerts, patch_cos_tool_path logger = logging.getLogger(__name__) LOKI_RULES_DEST_PATH = 'loki_alert_rules' METRICS_RULES_DEST_PATH = 'prometheus_alert_rules' +SINGLE_LOGQL_ALERT: Final = { + 'alert': 'HighLogVolume', + 'expr': 'count_over_time({job=~".+"}[30s]) > 100', + 'labels': {'severity': 'high'}, +} +SINGLE_LOGQL_RECORD: Final = { + 'record': 'log:error_rate:rate5m', + 'expr': 'sum by (service) (rate({job=~".+"} | json | level="error" [5m]))', + 'labels': {'severity': 'high'}, +} +SINGLE_PROMQL_ALERT: Final = { + 'alert': 'Workload Missing', + 'expr': 'up{job=~".+"} == 0', + 'for': '0m', + 'labels': {'severity': 'critical'}, +} +SINGLE_PROMQL_RECORD: Final = { + 'record': 'code:prometheus_http_requests_total:sum', + 'expr': 'sum by (code) (prometheus_http_requests_total{job=~".+"})', + 'labels': {'severity': 'high'}, +} +OFFICIAL_LOGQL_RULES: Final = { + 'groups': [ + { + 'name': 'test_logql', + 'rules': [SINGLE_LOGQL_ALERT, SINGLE_LOGQL_RECORD], + }, + ] +} +OFFICIAL_PROMQL_RULES: Final = { + 'groups': [ + { + 'name': 'test_promql', + 'rules': [SINGLE_PROMQL_ALERT, SINGLE_PROMQL_RECORD], + }, + ] +} +ALL_PROTOCOLS: Final[list[Literal['grpc', 'http']]] = ['grpc', 'http'] +ALL_TELEMETRIES: Final[list[Literal['logs', 'metrics', 'traces']]] = ['logs', 'metrics', 'traces'] + # --- Tester charms --- class OtlpRequirerCharm(CharmBase): - def __init__(self, framework: ops.Framework): - super().__init__(framework) - self.otlp_requirer = OtlpRequirer( - self, protocols=['http', 'grpc'], telemetries=['metrics', 'logs'] - ) - self.framework.observe(self.on.update_status, self._on_update_status) - - def _on_update_status(self, event: ops.EventBase) -> None: - self.otlp_requirer.publish() - - -class OtlpProviderCharm(CharmBase): - def __init__(self, framework: ops.Framework): - super().__init__(framework) - self.otlp_provider = OtlpProvider(self) - self.framework.observe(self.on.update_status, self._on_update_status) - - def _on_update_status(self, event: ops.EventBase) -> None: - self.otlp_provider.add_endpoint( - protocol='http', endpoint=f'{socket.getfqdn()}:4318', telemetries=['metrics'] - ) - self.otlp_provider.publish() - - -class OtlpDualCharm(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) self.charm_root = self.charm_dir.absolute() - self.otlp_requirer = OtlpRequirer( - self, - protocols=['http', 'grpc'], - telemetries=['metrics', 'logs'], - loki_rules_path=self.charm_root.joinpath(*LOKI_RULES_DEST_PATH.split('/')), - prometheus_rules_path=self.charm_root.joinpath(*METRICS_RULES_DEST_PATH.split('/')), - ) - self.otlp_provider = OtlpProvider(self) - self.framework.observe(self.on.update_status, self._on_update_status) - - def _on_update_status(self, event: ops.EventBase) -> None: - forward_alert_rules = cast('bool', self.config.get('forward_alert_rules')) - self.otlp_provider.add_endpoint( - protocol='http', endpoint=f'{socket.getfqdn()}:4318', telemetries=['metrics'] - ) + self.loki_rules_path = self.charm_root.joinpath(*LOKI_RULES_DEST_PATH.split('/')) + self.prometheus_rules_path = self.charm_root.joinpath(*METRICS_RULES_DEST_PATH.split('/')) + self.framework.observe(self.on.update_status, self._publish_rules) + def _add_rules_to_disk(self): with patch_cos_tool_path(): add_alerts( - alerts=self.otlp_provider.rules('logql') if forward_alert_rules else {}, - dest_path=self.charm_root.joinpath(*LOKI_RULES_DEST_PATH.split('/')), + alerts={'test_identifier': OFFICIAL_LOGQL_RULES}, dest_path=self.loki_rules_path ) add_alerts( - alerts=self.otlp_provider.rules('promql') if forward_alert_rules else {}, - dest_path=self.charm_root.joinpath(*METRICS_RULES_DEST_PATH.split('/')), + alerts={'test_identifier': OFFICIAL_PROMQL_RULES}, + dest_path=self.prometheus_rules_path, ) - self.otlp_provider.publish() - self.otlp_requirer.publish() + def _publish_rules(self, _: ops.EventBase) -> None: + self._add_rules_to_disk() + OtlpRequirer( + self, + SEND, + ALL_PROTOCOLS, + ALL_TELEMETRIES, + loki_rules_path=self.loki_rules_path, + prometheus_rules_path=self.prometheus_rules_path, + ).publish() + + +class OtlpProviderCharm(CharmBase): + def __init__(self, framework: ops.Framework): + super().__init__(framework) + self.framework.observe(self.on.update_status, self._publish_endpoints) + + def _publish_endpoints(self, _: ops.EventBase) -> None: + OtlpProvider(self).add_endpoint( + protocol='http', endpoint=f'{socket.getfqdn()}:4318', telemetries=['metrics'] + ).publish() # --- Fixtures --- @@ -115,21 +138,3 @@ def otlp_requirer_ctx() -> testing.Context[OtlpRequirerCharm]: def otlp_provider_ctx() -> testing.Context[OtlpProviderCharm]: meta = {'name': 'otlp-provider', 'provides': {'receive-otlp': {'interface': 'otlp'}}} return testing.Context(OtlpProviderCharm, meta=meta) - - -@pytest.fixture -def otlp_dual_ctx() -> testing.Context[OtlpDualCharm]: - meta = { - 'name': 'otlp-dual', - 'requires': {'send-otlp': {'interface': 'otlp'}}, - 'provides': {'receive-otlp': {'interface': 'otlp'}}, - } - config = { - 'options': { - 'forward_alert_rules': { - 'type': 'boolean', - 'default': True, - }, - }, - } - return testing.Context(OtlpDualCharm, meta=meta, config=config) diff --git a/interfaces/otlp/tests/unit/test_endpoints.py b/interfaces/otlp/tests/unit/test_endpoints.py index 5dea6605f..95711a200 100644 --- a/interfaces/otlp/tests/unit/test_endpoints.py +++ b/interfaces/otlp/tests/unit/test_endpoints.py @@ -4,27 +4,24 @@ """Feature: OTLP endpoint handling.""" import json -from typing import Any, cast -from unittest.mock import patch +from collections.abc import Sequence +from typing import Any, Final, Literal, cast import ops import pytest from ops import testing from ops.testing import Relation, State -from charmlibs.interfaces.otlp._otlp import OtlpProviderAppData, _OtlpEndpoint +from charmlibs.interfaces.otlp._otlp import DEFAULT_PROVIDER_RELATION_NAME as RECEIVE +from charmlibs.interfaces.otlp._otlp import DEFAULT_REQUIRER_RELATION_NAME as SEND +from charmlibs.interfaces.otlp._otlp import OtlpRequirer, _OtlpEndpoint, _OtlpProviderAppData +from conftest import ALL_PROTOCOLS, ALL_TELEMETRIES -ALL_PROTOCOLS = ['grpc', 'http'] -ALL_TELEMETRIES = ['logs', 'metrics', 'traces'] -EMPTY_REQUIRER = { - 'rules': json.dumps({'logql': {}, 'promql': {}}), - 'metadata': json.dumps({}), -} +PROTOCOLS: Final[list[Literal['http', 'grpc']]] = ['http', 'grpc'] +TELEMETRIES: Final[list[Literal['metrics', 'logs']]] = ['metrics', 'logs'] -RECEIVE_OTLP = Relation('receive-otlp', remote_app_data=EMPTY_REQUIRER) - -def test_new_endpoint_key_is_ignored_by_databag_model() -> None: +def test_new_endpoint_key_is_ignored_by_databag_model(): # GIVEN the provider offers a new endpoint type (protocol or telemetry) # * the requirer does not support this new endpoint type endpoint = { @@ -36,7 +33,7 @@ def test_new_endpoint_key_is_ignored_by_databag_model() -> None: # WHEN validating the provider databag model, which the requirer uses to access endpoints # THEN the validation succeeds - provider_databag: OtlpProviderAppData = OtlpProviderAppData.model_validate({ + provider_databag: _OtlpProviderAppData = _OtlpProviderAppData.model_validate({ 'endpoints': [endpoint] }) assert provider_databag @@ -115,22 +112,18 @@ def test_send_otlp_invalid_databag( ): # GIVEN a remote app provides an _OtlpEndpoint # WHEN they are related over the "send-otlp" endpoint - provider = Relation('send-otlp', id=123, remote_app_data=provides) + provider = Relation(SEND, id=123, remote_app_data=provides) state = State(relations=[provider], leader=True) with otlp_requirer_ctx(otlp_requirer_ctx.on.update_status(), state=state) as mgr: # WHEN the requirer processes the relation data # * the requirer supports all protocols and telemetries charm_any = cast('Any', mgr.charm) - with ( - patch.object(charm_any.otlp_requirer, '_protocols', new=ALL_PROTOCOLS), - patch.object(charm_any.otlp_requirer, '_telemetries', new=ALL_TELEMETRIES), - ): - # THEN the requirer does not raise an error - # * the returned endpoint does not include new protocols or telemetries - assert mgr.run() - result = charm_any.otlp_requirer.endpoints[123] - assert result.model_dump() == otlp_endpoint.model_dump() + # THEN the requirer does not raise an error + # * the returned endpoint does not include new protocols or telemetries + assert mgr.run() + endpoints = OtlpRequirer(charm_any, SEND, ALL_PROTOCOLS, ALL_TELEMETRIES).endpoints + assert endpoints[123].model_dump() == otlp_endpoint.model_dump() @pytest.mark.parametrize( @@ -184,8 +177,8 @@ def test_send_otlp_invalid_databag( ) def test_send_otlp_with_varying_requirer_support( otlp_requirer_ctx: testing.Context[ops.CharmBase], - protocols: list[str], - telemetries: list[str], + protocols: Sequence[Literal['http', 'grpc']], + telemetries: Sequence[Literal['logs', 'metrics', 'traces']], expected: dict[int, _OtlpEndpoint], ): # GIVEN a remote app provides multiple _OtlpEndpoints @@ -214,29 +207,14 @@ def test_send_otlp_with_varying_requirer_support( } # WHEN they are related over the "send-otlp" endpoint - provider_0 = Relation( - 'send-otlp', - id=123, - remote_app_data=remote_app_data_1, - ) - provider_1 = Relation( - 'send-otlp', - id=456, - remote_app_data=remote_app_data_2, - ) - state = State( - relations=[provider_0, provider_1], - leader=True, - ) + provider_0 = Relation(SEND, id=123, remote_app_data=remote_app_data_1) + provider_1 = Relation(SEND, id=456, remote_app_data=remote_app_data_2) + state = State(relations=[provider_0, provider_1], leader=True) # AND WHEN the requirer has varying support for OTLP protocols and telemetries with otlp_requirer_ctx(otlp_requirer_ctx.on.update_status(), state=state) as mgr: charm_any = cast('Any', mgr.charm) - with ( - patch.object(charm_any.otlp_requirer, '_protocols', new=protocols), - patch.object(charm_any.otlp_requirer, '_telemetries', new=telemetries), - ): - remote_endpoints = charm_any.otlp_requirer.endpoints + remote_endpoints = OtlpRequirer(charm_any, SEND, protocols, telemetries).endpoints # THEN the returned endpoints are filtered accordingly assert {k: v.model_dump() for k, v in remote_endpoints.items()} == { @@ -284,25 +262,14 @@ def test_send_otlp(otlp_requirer_ctx: testing.Context[ops.CharmBase]): } # WHEN they are related over the "send-otlp" endpoint - provider_1 = Relation( - 'send-otlp', - id=123, - remote_app_data=remote_app_data_1, - ) - provider_2 = Relation( - 'send-otlp', - id=456, - remote_app_data=remote_app_data_2, - ) - state = State( - relations=[provider_1, provider_2], - leader=True, - ) + provider_1 = Relation(SEND, id=123, remote_app_data=remote_app_data_1) + provider_2 = Relation(SEND, id=456, remote_app_data=remote_app_data_2) + state = State(relations=[provider_1, provider_2], leader=True) # AND WHEN otelcol supports a subset of OTLP protocols and telemetries with otlp_requirer_ctx(otlp_requirer_ctx.on.update_status(), state=state) as mgr: charm_any = cast('Any', mgr.charm) - remote_endpoints = charm_any.otlp_requirer.endpoints + remote_endpoints = OtlpRequirer(charm_any, SEND, PROTOCOLS, TELEMETRIES).endpoints # THEN the returned endpoints are filtered accordingly assert {k: v.model_dump() for k, v in remote_endpoints.items()} == { @@ -312,10 +279,14 @@ def test_send_otlp(otlp_requirer_ctx: testing.Context[ops.CharmBase]): def test_receive_otlp(otlp_provider_ctx: testing.Context[ops.CharmBase]): # GIVEN a receive-otlp relation - state = State( - leader=True, - relations=[RECEIVE_OTLP], + receiver = Relation( + RECEIVE, + remote_app_data={ + 'rules': json.dumps({'logql': {}, 'promql': {}}), + 'metadata': '{}', + }, ) + state = State(leader=True, relations=[receiver]) # AND WHEN any event executes the reconciler state_out = otlp_provider_ctx.run(otlp_provider_ctx.on.update_status(), state=state) @@ -333,7 +304,7 @@ def test_receive_otlp(otlp_provider_ctx: testing.Context[ops.CharmBase]): } assert (actual_endpoints := json.loads(local_app_data.get('endpoints', '[]'))) assert ( - OtlpProviderAppData.model_validate({'endpoints': actual_endpoints}).model_dump() + _OtlpProviderAppData.model_validate({'endpoints': actual_endpoints}).model_dump() == expected_endpoints ) @@ -393,10 +364,11 @@ def test_favor_modern_endpoints( # GIVEN a list of endpoints state = State(leader=True) with otlp_requirer_ctx(otlp_requirer_ctx.on.update_status(), state=state) as mgr: - charm_any = cast('Any', mgr.charm) - # WHEN the requirer selects an endpoint - result = charm_any.otlp_requirer._favor_modern_endpoints(endpoints) + charm_any = cast('Any', mgr.charm) + result = OtlpRequirer(charm_any, SEND, PROTOCOLS, TELEMETRIES)._favor_modern_endpoints( + endpoints + ) # THEN the most modern one is chosen assert result.protocol == expected_protocol diff --git a/interfaces/otlp/tests/unit/test_rules.py b/interfaces/otlp/tests/unit/test_rules.py index b8e19a945..60199e5d2 100644 --- a/interfaces/otlp/tests/unit/test_rules.py +++ b/interfaces/otlp/tests/unit/test_rules.py @@ -1,10 +1,10 @@ # Copyright 2026 Canonical Ltd. # See LICENSE file for licensing details. -"""Feature: Rules aggregation and forwarding.""" +"""Feature: Rules aggregation and labeling.""" import json -from typing import Any +from typing import Any, cast import ops import pytest @@ -12,67 +12,17 @@ from ops import testing from ops.testing import Model, Relation, State -from charmlibs.interfaces.otlp._otlp import OtlpRequirerAppData, _RulesModel - -MODEL = Model('otelcol', uuid='f4d59020-c8e7-4053-8044-a2c1e5591c7f') -OTELCOL_LABELS = { - 'juju_model': 'otelcol', - 'juju_model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', - 'juju_application': 'opentelemetry-collector-k8s', - 'juju_charm': 'opentelemetry-collector-k8s', -} -LOGQL_ALERT = { - 'name': 'otelcol_f4d59020_charm_x_foo_alerts', - 'rules': [ - { - 'alert': 'HighLogVolume', - 'expr': 'count_over_time({job=~".+"}[30s]) > 100', - 'labels': {'severity': 'high'}, - }, - ], -} -LOGQL_RECORD = { - 'name': 'otelcol_f4d59020_charm_x_foobar_alerts', - 'rules': [ - { - 'record': 'log:error_rate:rate5m', - 'expr': 'sum by (service) (rate({job=~".+"} | json | level="error" [5m]))', - 'labels': {'severity': 'high'}, - } - ], -} -PROMQL_ALERT = { - 'name': 'otelcol_f4d59020_charm_x_bar_alerts', - 'rules': [ - { - 'alert': 'Workload Missing', - 'expr': 'up{job=~".+"} == 0', - 'for': '0m', - 'labels': {'severity': 'critical'}, - }, - ], -} -PROMQL_RECORD = { - 'name': 'otelcol_f4d59020_charm_x_barfoo_alerts', - 'rules': [ - { - 'record': 'code:prometheus_http_requests_total:sum', - 'expr': 'sum by (code) (prometheus_http_requests_total{job=~".+"})', - 'labels': {'severity': 'high'}, - } - ], -} -ALL_RULES = { - 'logql': {'groups': [LOGQL_ALERT, LOGQL_RECORD]}, - 'promql': {'groups': [PROMQL_ALERT, PROMQL_RECORD]}, -} -METADATA = { - 'model': 'otelcol', - 'model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', - 'application': 'opentelemetry-collector-k8s', - 'charm': 'opentelemetry-collector-k8s', - 'unit': 'opentelemetry-collector-k8s/0', -} +from charmlibs.interfaces.otlp._otlp import DEFAULT_PROVIDER_RELATION_NAME as RECEIVE +from charmlibs.interfaces.otlp._otlp import DEFAULT_REQUIRER_RELATION_NAME as SEND +from charmlibs.interfaces.otlp._otlp import OtlpProvider, _OtlpRequirerAppData, _RulesModel +from conftest import ( + SINGLE_LOGQL_ALERT, + SINGLE_LOGQL_RECORD, + SINGLE_PROMQL_ALERT, + SINGLE_PROMQL_RECORD, +) + +MODEL = Model('foo-model', uuid='f4d59020-c8e7-4053-8044-a2c1e5591c7f') def _decompress(rules: str | None) -> dict[str, Any]: @@ -81,14 +31,14 @@ def _decompress(rules: str | None) -> dict[str, Any]: return json.loads(LZMABase64.decompress(rules)) -def test_new_rule_is_ignored_by_databag_model() -> None: +def test_new_rule_is_ignored_by_databag_model(): # GIVEN the requirer offers a new rule type # * the provider does not support this new rule type # WHEN validating the requirer databag model, which the provider uses to access rules # THEN the validation succeeds - requirer_databag = OtlpRequirerAppData.model_validate({ + requirer_databag = _OtlpRequirerAppData.model_validate({ 'rules': {'promql': {}, 'new_rule': {}}, - 'metadata': METADATA, + 'metadata': {}, }) assert requirer_databag assert isinstance(requirer_databag.rules, _RulesModel) @@ -96,35 +46,24 @@ def test_new_rule_is_ignored_by_databag_model() -> None: assert 'new_rule' not in requirer_databag.rules.model_dump() -def test_missing_rule_type_defaults() -> None: - # GIVEN the requirer offers a new rule type - # * the provider does not support this new rule type - # WHEN validating the requirer databag model, which the provider uses to access rules +def test_missing_rule_type_defaults(): + # GIVEN no rules or metadata is provided + # WHEN validating the requirer databag model # THEN the validation succeeds - requirer_databag = OtlpRequirerAppData.model_validate({'rules': {}, 'metadata': METADATA}) + requirer_databag = _OtlpRequirerAppData.model_validate({'rules': {}, 'metadata': {}}) assert requirer_databag assert isinstance(requirer_databag.rules, _RulesModel) - # AND the new rule type is ignored - assert 'promql' in requirer_databag.rules.model_dump() - assert 'logql' in requirer_databag.rules.model_dump() + # AND the rule model is created + assert requirer_databag.rules.model_dump().keys() == _RulesModel.model_fields.keys() -def test_rules_compression(otlp_dual_ctx: testing.Context[ops.CharmBase]) -> None: - # GIVEN receive-otlp and send-otlp relations - databag: dict[str, str] = { - 'rules': json.dumps(ALL_RULES, sort_keys=True), - 'metadata': json.dumps(METADATA), - } - receiver = Relation('receive-otlp', remote_app_data=databag) - sender = Relation('send-otlp', remote_app_data={'endpoints': '[]'}) - state = State(relations=[receiver, sender], leader=True, model=MODEL) +def test_rules_compression(otlp_requirer_ctx: testing.Context[ops.CharmBase]): + # GIVEN a send-otlp relation + state = State(relations=[Relation(SEND)], leader=True) # WHEN any event executes the reconciler - state_out = otlp_dual_ctx.run(otlp_dual_ctx.on.update_status(), state=state) - + state_out = otlp_requirer_ctx.run(otlp_requirer_ctx.on.update_status(), state=state) for relation in list(state_out.relations): - if relation.endpoint != 'send-otlp': - continue rules = relation.local_app_data.get('rules', None) assert rules is not None @@ -134,86 +73,107 @@ def test_rules_compression(otlp_dual_ctx: testing.Context[ops.CharmBase]) -> Non decompressed = _decompress(json.loads(rules)) assert decompressed assert isinstance(decompressed, dict) - assert set(ALL_RULES.keys()).issubset(decompressed.keys()) + assert set(_RulesModel.model_fields.keys()).issubset(decompressed.keys()) -@pytest.mark.parametrize( - 'forwarding_enabled, rules, expected_group_counts', - [ - # format , databag_groups, generic_groups, total - # logql , (2) , (0) , (2) - # promql , (2) , (1) , (3) - ( - True, - { - 'logql': {'groups': [LOGQL_ALERT, LOGQL_RECORD]}, - 'promql': {'groups': [PROMQL_ALERT, PROMQL_RECORD]}, - }, - {'logql': 2, 'promql': 3}, - ), - # format , databag_groups, generic_groups, total - # logql , (0) , (0) , (0) - # promql , (2) , (1) , (3) - ( - True, - {'logql': {}, 'promql': {'groups': [PROMQL_ALERT, PROMQL_RECORD]}}, - {'logql': 0, 'promql': 3}, - ), - # format , databag_groups, generic_groups, total - # logql , (2) , (0) , (2) - # promql , (0) , (1) , (1) - ( - True, - {'logql': {'groups': [LOGQL_ALERT, LOGQL_RECORD]}, 'promql': {}}, - {'logql': 2, 'promql': 1}, - ), - ], -) -@pytest.mark.parametrize( - 'metadata', - [METADATA, {}], - ids=['with_metadata', 'without_metadata'], -) -def test_forwarding_otlp_rule_counts( - otlp_dual_ctx: testing.Context[ops.CharmBase], - forwarding_enabled: bool, - rules: dict[str, Any], - expected_group_counts: dict[str, int], - metadata: dict[str, Any], -) -> None: - # GIVEN forwarding of rules is enabled - # * a receive-otlp with rules in the databag - # * two send-otlp relations - databag = {'rules': json.dumps(rules), 'metadata': json.dumps(metadata)} - receiver = Relation('receive-otlp', remote_app_data=databag) - sender_1 = Relation('send-otlp', remote_app_data={'endpoints': '[]'}) - sender_2 = Relation('send-otlp', remote_app_data={'endpoints': '[]'}) - state = State( - relations=[receiver, sender_1, sender_2], - leader=True, - model=MODEL, - config={'forward_alert_rules': forwarding_enabled}, - ) +def test_generic_rule_injection(otlp_requirer_ctx: testing.Context[ops.CharmBase]): + # GIVEN a send-otlp relation + state = State(relations=[Relation(SEND)], leader=True, model=MODEL) # WHEN any event executes the reconciler - state_out = otlp_dual_ctx.run(otlp_dual_ctx.on.update_status(), state=state) - + state_out = otlp_requirer_ctx.run(otlp_requirer_ctx.on.update_status(), state=state) for relation in list(state_out.relations): - if relation.endpoint != 'send-otlp': - continue - + # AND the rules in the databag are decompressed decompressed = _decompress(relation.local_app_data.get('rules')) assert decompressed - requirer_databag: OtlpRequirerAppData = OtlpRequirerAppData.model_validate({ - 'rules': decompressed, - 'metadata': {}, - }) - - # THEN all expected rules exist in the databag - # * databag_groups are included/forwarded - assert ( - len(requirer_databag.rules.logql.get('groups', [])) == expected_group_counts['logql'] - ) - assert ( - len(requirer_databag.rules.promql.get('groups', [])) == expected_group_counts['promql'] - ) + logql_groups = decompressed.get('logql', {}).get('groups', []) + promql_groups = decompressed.get('promql', {}).get('groups', []) + assert logql_groups + assert promql_groups + + # THEN the generic promql rule is in the databag + assert any('AggregatorHostHealth' in g.get('name') for g in promql_groups) + + +def test_metadata(otlp_requirer_ctx: testing.Context[ops.CharmBase]): + # GIVEN a send-otlp relation + state = State(relations=[Relation(SEND)], leader=True, model=MODEL) + + # WHEN any event executes the reconciler + state_out = otlp_requirer_ctx.run(otlp_requirer_ctx.on.update_status(), state=state) + for relation in list(state_out.relations): + # THEN the requirer adds its own metadata to the databag + assert json.loads(relation.local_app_data['metadata']) == { + 'model': 'foo-model', + 'model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', + 'application': 'otlp-requirer', + 'unit': 'otlp-requirer/0', + 'charm_name': 'otlp-requirer', + } + + +@pytest.mark.parametrize( + 'metadata', + [ + {}, + { + 'model': 'foo-model', + 'model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', + 'application': 'otlp-requirer', + 'charm_name': 'otlp-requirer', + 'unit': 'otlp-requirer/0', + }, + ], +) +def test_provider_rules( + otlp_provider_ctx: testing.Context[ops.CharmBase], metadata: dict[str, Any] +): + # GIVEN a requirer offers unlabeled rules (of various types) in the databag + rules = { + 'logql': { + 'groups': [ + {'name': 'test_logql_alert', 'rules': [SINGLE_LOGQL_ALERT]}, + {'name': 'test_logql_record', 'rules': [SINGLE_LOGQL_RECORD]}, + ] + }, + 'promql': { + 'groups': [ + {'name': 'test_promql_alert', 'rules': [SINGLE_PROMQL_ALERT]}, + {'name': 'test_promql_record', 'rules': [SINGLE_PROMQL_RECORD]}, + ] + }, + } + receiver = Relation( + RECEIVE, remote_app_data={'rules': json.dumps(rules), 'metadata': json.dumps(metadata)} + ) + state = State(leader=True, relations=[receiver], model=MODEL) + with otlp_provider_ctx(otlp_provider_ctx.on.update_status(), state=state) as mgr: + # WHEN the provider aggregates the rules from the databag + charm_any = cast('Any', mgr.charm) + logql = OtlpProvider(charm_any, RECEIVE).rules('logql') + promql = OtlpProvider(charm_any, RECEIVE).rules('promql') + assert logql + assert promql + for result in [logql, promql]: + app = metadata['application'] if metadata else 'otlp-provider' + charm = metadata['charm_name'] if metadata else 'otlp-provider' + + # THEN the identifier is present + identifier = 'foo-model_f4d59020_' + app + assert identifier in result + groups = result[identifier].get('groups', []) + assert groups + for group in groups: + for rule in group.get('rules', []): + # AND the rules are labeled with the provider's topology + assert rule['labels']['juju_model'] == 'foo-model' + assert ( + rule['labels']['juju_model_uuid'] == 'f4d59020-c8e7-4053-8044-a2c1e5591c7f' + ) + assert rule['labels']['juju_application'] == app + assert rule['labels']['juju_charm'] == charm + + # AND the expressions are labeled + assert 'juju_model="foo-model"' in rule['expr'] + assert 'juju_model_uuid="f4d59020-c8e7-4053-8044-a2c1e5591c7f"' in rule['expr'] + assert f'juju_application="{app}"' in rule['expr'] From 471297d028b473ae92cca0728106c0f349bcf044 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Thu, 19 Mar 2026 13:23:57 -0400 Subject: [PATCH 02/10] feat: Rules interface --- .gitignore | 1 - .../src/charmlibs/interfaces/otlp/__init__.py | 2 +- .../src/charmlibs/interfaces/otlp/_otlp.py | 96 ++++++++++++------- interfaces/otlp/tests/unit/conftest.py | 36 +++---- interfaces/otlp/tests/unit/helpers.py | 18 ---- 5 files changed, 75 insertions(+), 78 deletions(-) diff --git a/.gitignore b/.gitignore index dcb59288a..2d5d14888 100644 --- a/.gitignore +++ b/.gitignore @@ -25,7 +25,6 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST -cos-tool* # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index 6a67e8aa5..b239ddb16 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -69,7 +69,7 @@ def _on_ingress_ready(self, event): subset of protocols and telemetries, which can be configured at instantiation:: from charmlibs.interfaces.otlp import OtlpRequirer - +TODO: Update these examples class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py index 100267a3c..48ace26d1 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py @@ -24,11 +24,12 @@ import logging from collections import OrderedDict from collections.abc import Sequence +from dataclasses import dataclass, field from pathlib import Path from typing import Any, Final, Literal from cosl.juju_topology import JujuTopology -from cosl.rules import AlertRules, InjectResult, generic_alert_groups +from cosl.rules import InjectResult, Rules, generic_alert_groups from cosl.types import OfficialRuleFileFormat from cosl.utils import LZMABase64 from ops import CharmBase @@ -49,6 +50,47 @@ logger = logging.getLogger(__name__) +@dataclass +class RuleStore: + """An API for users to provide rules of different types to the OtlpRequirer.""" + + topology: JujuTopology + logql: Rules = field(init=False) + promql: Rules = field(init=False) + + def __post_init__(self): + self.logql = Rules(query_type='logql', topology=self.topology) + self.promql = Rules(query_type='promql', topology=self.topology) + + def add_logql( + self, + rule_dict: dict[str, Any], + *, + group_name: str | None = None, + group_name_prefix: str | None = None, + ) -> 'RuleStore': + self.logql.add(rule_dict, group_name=group_name, group_name_prefix=group_name_prefix) + return self + + def add_logql_path(self, dir_path: str | Path, *, recursive: bool = False) -> 'RuleStore': + self.logql.add_path(dir_path, recursive=recursive) + return self + + def add_promql( + self, + rule_dict: dict[str, Any], + *, + group_name: str | None = None, + group_name_prefix: str | None = None, + ) -> 'RuleStore': + self.promql.add(rule_dict, group_name=group_name, group_name_prefix=group_name_prefix) + return self + + def add_promql_path(self, dir_path: str | Path, *, recursive: bool = False) -> 'RuleStore': + self.promql.add_path(dir_path, recursive=recursive) + return self + + class _RulesModel(BaseModel): """Rules of various formats (query languages) to support in the relation databag.""" @@ -132,10 +174,8 @@ class OtlpRequirer: endpoints. telemetries: The telemetries to filter for in the provider's OTLP endpoints. - loki_rules_path: The path to Loki alerting and recording rules provided - by this charm. - prometheus_rules_path: The path to Prometheus alerting and recording - rules provided by this charm. + rules: Rules of different types e.g., logql or promql, that the + requirer will publish for the provider. """ def __init__( @@ -145,10 +185,10 @@ def __init__( protocols: Sequence[Literal['http', 'grpc']] | None = None, telemetries: Sequence[Literal['logs', 'metrics', 'traces']] | None = None, *, - loki_rules_path: str | Path = DEFAULT_LOKI_RULES_RELATIVE_PATH, - prometheus_rules_path: str | Path = DEFAULT_PROM_RULES_RELATIVE_PATH, + rules: RuleStore | None = None, ): self._charm = charm + self._topology = JujuTopology.from_charm(charm) self._relation_name = relation_name self._protocols: list[Literal['http', 'grpc']] = ( list(protocols) if protocols is not None else [] @@ -156,9 +196,7 @@ def __init__( self._telemetries: list[Literal['logs', 'metrics', 'traces']] = ( list(telemetries) if telemetries is not None else [] ) - self._topology = JujuTopology.from_charm(charm) - self._loki_rules_path: str | Path = loki_rules_path - self._prom_rules_path: str | Path = prometheus_rules_path + self._rules = rules if rules is not None else RuleStore(self._topology) def _filter_endpoints(self, endpoints: list[_OtlpEndpoint]) -> list[_OtlpEndpoint]: """Filter out unsupported OtlpEndpoints. @@ -200,42 +238,28 @@ def _favor_modern_endpoints(self, endpoints: list[_OtlpEndpoint]) -> _OtlpEndpoi def publish(self): """Triggers programmatically the update of the relation data. - The rule files exist in separate directories, distinguished by format - (logql|promql), each including alerting and recording rule types. The - charm uses these paths as aggregation points for rules, acting as their - source of truth. For each type of rule, the charm may aggregate rules - from: - - - rules bundled in the charm's source code - - any rules provided by related charms - - Generic, injected rules (not specific to any charm) are always - published. Besides these generic rules, the inclusion of bundled rules - and rules from related charms is the responsibility of the charm using - the library. Including bundled rules and rules from related charms is - achieved by copying these rules to the respective paths within the - charm's filesystem and providing those paths to the OtlpRequirer - constructor. + These rule sources are included when publishing: + - Any rules provided at the instantiation of this class. + - Generic (not specific to any charm) PromQL rules. """ if not self._charm.unit.is_leader(): # Only the leader unit can write to app data. return - # Define the rule types - loki_rules = AlertRules(query_type='logql', topology=self._topology) - prom_rules = AlertRules(query_type='promql', topology=self._topology) + rules = {} + # Loki rules + rules['logql'] = self._rules.logql.as_dict() - # Add rules - prom_rules.add( + # Prometheus rules + self._rules.add_promql( copy.deepcopy(generic_alert_groups.aggregator_rules), group_name_prefix=self._topology.identifier, ) - loki_rules.add_path(self._loki_rules_path, recursive=True) - prom_rules.add_path(self._prom_rules_path, recursive=True) + rules['promql'] = self._rules.promql.as_dict() # Publish to databag databag = _OtlpRequirerAppData.model_validate({ - 'rules': {'logql': loki_rules.as_dict(), 'promql': prom_rules.as_dict()}, + 'rules': rules, 'metadata': self._topology.as_dict(), }) for relation in self._charm.model.relations[self._relation_name]: @@ -328,8 +352,8 @@ def rules(self, query_type: Literal['logql', 'promql']) -> dict[str, dict[str, A following the OfficialRuleFileFormat from cos-lib. """ rules_map: dict[str, dict[str, Any]] = {} - # Instantiate AlertRules with topology to ensure that rules always have an identifier - rules_obj = AlertRules(query_type, self._topology) + # Instantiate Rules with topology to ensure that rules always have an identifier + rules_obj = Rules(query_type, self._topology) for relation in self._charm.model.relations[self._relation_name]: if not relation.data[relation.app]: # The databags haven't initialized yet, continue diff --git a/interfaces/otlp/tests/unit/conftest.py b/interfaces/otlp/tests/unit/conftest.py index 211cad1a2..efec06c4e 100644 --- a/interfaces/otlp/tests/unit/conftest.py +++ b/interfaces/otlp/tests/unit/conftest.py @@ -18,17 +18,18 @@ import logging import socket +from copy import deepcopy from typing import Final, Literal from unittest.mock import patch import ops import pytest +from cosl.juju_topology import JujuTopology from ops import testing from ops.charm import CharmBase -from charmlibs.interfaces.otlp import OtlpProvider, OtlpRequirer -from charmlibs.interfaces.otlp._otlp import DEFAULT_REQUIRER_RELATION_NAME as SEND -from helpers import add_alerts, patch_cos_tool_path +from charmlibs.interfaces.otlp import OtlpProvider, OtlpRequirer, RuleStore +from helpers import patch_cos_tool_path logger = logging.getLogger(__name__) @@ -81,30 +82,21 @@ class OtlpRequirerCharm(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) - self.charm_root = self.charm_dir.absolute() - self.loki_rules_path = self.charm_root.joinpath(*LOKI_RULES_DEST_PATH.split('/')) - self.prometheus_rules_path = self.charm_root.joinpath(*METRICS_RULES_DEST_PATH.split('/')) self.framework.observe(self.on.update_status, self._publish_rules) - def _add_rules_to_disk(self): + def _publish_rules(self, _: ops.EventBase) -> None: with patch_cos_tool_path(): - add_alerts( - alerts={'test_identifier': OFFICIAL_LOGQL_RULES}, dest_path=self.loki_rules_path - ) - add_alerts( - alerts={'test_identifier': OFFICIAL_PROMQL_RULES}, - dest_path=self.prometheus_rules_path, + rules = ( + RuleStore(JujuTopology.from_charm(self)) + .add_logql(deepcopy(SINGLE_LOGQL_ALERT), group_name='test_logql_alert') + .add_logql(deepcopy(SINGLE_LOGQL_RECORD), group_name='test_logql_record') + .add_promql(deepcopy(SINGLE_PROMQL_ALERT), group_name='test_promql_alert') + .add_promql(deepcopy(SINGLE_PROMQL_RECORD), group_name='test_promql_record') + .add_logql(deepcopy(OFFICIAL_LOGQL_RULES)) + .add_promql(deepcopy(OFFICIAL_PROMQL_RULES)) ) - - def _publish_rules(self, _: ops.EventBase) -> None: - self._add_rules_to_disk() OtlpRequirer( - self, - SEND, - ALL_PROTOCOLS, - ALL_TELEMETRIES, - loki_rules_path=self.loki_rules_path, - prometheus_rules_path=self.prometheus_rules_path, + self, protocols=ALL_PROTOCOLS, telemetries=ALL_TELEMETRIES, rules=rules ).publish() diff --git a/interfaces/otlp/tests/unit/helpers.py b/interfaces/otlp/tests/unit/helpers.py index 01b92b68d..3229b841f 100644 --- a/interfaces/otlp/tests/unit/helpers.py +++ b/interfaces/otlp/tests/unit/helpers.py @@ -16,11 +16,9 @@ from collections.abc import Iterator from contextlib import contextmanager from pathlib import Path -from typing import Any from unittest.mock import patch import requests -import yaml from cosl import CosTool as _CosTool logger = logging.getLogger(__name__) @@ -50,19 +48,3 @@ def patch_cos_tool_path() -> Iterator[None]: with patch.object(target=_CosTool, attribute='_path', new=str(cos_path)): yield - - -def add_alerts(alerts: dict[str, dict[str, Any]], dest_path: Path) -> None: - """Save the alerts to files in the specified destination folder. - - For K8s charms, alerts are saved in the charm container. - - Args: - alerts: Dictionary of alerts to save to disk - dest_path: Path to the folder where alerts will be saved - """ - dest_path.mkdir(parents=True, exist_ok=True) - for topology_identifier, rule in alerts.items(): - rule_file = dest_path.joinpath(f'juju_{topology_identifier}.rules') - rule_file.write_text(yaml.safe_dump(rule)) - logger.debug('updated alert rules file: %s', rule_file.as_posix()) From a7682456a03fbaee3257eff49b638fbf74858f3c Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Thu, 19 Mar 2026 15:27:40 -0400 Subject: [PATCH 03/10] chore: Remove the dual_context fixture for scenario tests --- .../otlp/src/charmlibs/interfaces/otlp/_otlp.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py index 48ace26d1..253ebd82f 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py @@ -246,20 +246,17 @@ def publish(self): # Only the leader unit can write to app data. return - rules = {} - # Loki rules - rules['logql'] = self._rules.logql.as_dict() - - # Prometheus rules self._rules.add_promql( copy.deepcopy(generic_alert_groups.aggregator_rules), group_name_prefix=self._topology.identifier, ) - rules['promql'] = self._rules.promql.as_dict() # Publish to databag databag = _OtlpRequirerAppData.model_validate({ - 'rules': rules, + 'rules': { + 'logql': self._rules.logql.as_dict(), + 'promql': self._rules.promql.as_dict(), + }, 'metadata': self._topology.as_dict(), }) for relation in self._charm.model.relations[self._relation_name]: From 51e2e80ddd7df0bdc3774f58ae946c172260d548 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Thu, 19 Mar 2026 18:03:07 -0400 Subject: [PATCH 04/10] chore: cleanup --- interfaces/otlp/CHANGELOG.md | 5 ++ interfaces/otlp/README.md | 26 ++++++--- .../src/charmlibs/interfaces/otlp/__init__.py | 54 ++++++++++++------- .../src/charmlibs/interfaces/otlp/_otlp.py | 2 +- interfaces/otlp/tests/unit/test_rules.py | 9 ++-- 5 files changed, 65 insertions(+), 31 deletions(-) diff --git a/interfaces/otlp/CHANGELOG.md b/interfaces/otlp/CHANGELOG.md index 9099ebd09..c236b2e83 100644 --- a/interfaces/otlp/CHANGELOG.md +++ b/interfaces/otlp/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.1.0] ### Added + - Initial release of charmlibs.interfaces.otlp - `OtlpRequirer` for consuming OTLP endpoints from a provider relation - `OtlpProvider` for publishing OTLP endpoints to requirer relations @@ -20,4 +21,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Generic aggregator rules automatically included in every requirer's published rule set - Python 3.10+ compatibility +## [0.2.0] + +### Updated +- Replace the requirer's rule path interface with an interface accepting an object containing rules diff --git a/interfaces/otlp/README.md b/interfaces/otlp/README.md index 33a15d7a2..026296404 100644 --- a/interfaces/otlp/README.md +++ b/interfaces/otlp/README.md @@ -6,8 +6,9 @@ OTLP integration library for Juju charms, providing OTLP endpoint information fo ## Features -- **Provider/Requirer pattern**: Enables charms to share OTLP endpoint information and rules +- **Provider/Requirer pattern**: Enables charms to share OTLP endpoint information and rules content - **Define endpoint support**: Providers and requirers define what OTLP protocols and telemetries they support. +- **Rules interface**: Add rules to a 'RuleStore' object and provide that to the requirer for publishing. - **Automatic topology injection**: Inject Juju topology labels into rule expressions and labels with metadata if the labels are not already labeled. ## Getting started @@ -26,9 +27,10 @@ from charmlibs.interfaces.otlp import OtlpProvider class MyOtlpServer(CharmBase): def __init__(self, *args): super().__init__(*args) - self.framework.observe(self.on.ingress_ready, self._on_ingress_ready) + self.framework.observe(self.on.ingress_ready, self._publish_endpoints) + self.framework.observe(self.on.update_status, self._access_rules) - def _on_ingress_ready(self, event): + def _publish_endpoints(self, event): OtlpProvider(self).add_endpoint( protocol="grpc", endpoint="https://my-app.ingress:4317", @@ -39,9 +41,9 @@ class MyOtlpServer(CharmBase): telemetries=["traces"], ).publish() - # optionally, get the alerting and recording rules - promql_rules = OtlpProvider(self).rules("promql") - logql_rules = OtlpProvider(self).rules("logql") + def _access_rules(self, event): + OtlpProvider(self).rules("promql") + OtlpProvider(self).rules("logql") ``` ### Requirer Side @@ -52,6 +54,7 @@ from charmlibs.interfaces.otlp import OtlpRequirer class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) + self.framework.observe(self.on.update_status, self._access_endpoints) self.framework.observe(self.on.update_status, self._publish_rules) def _publish_rules(self, _: ops.EventBase) -> None: @@ -59,9 +62,16 @@ class MyOtlpSender(CharmBase): self, protocols=["grpc", "http"], telemetries=["logs", "metrics", "traces"], - loki_rules_path="./src/loki_alert_rules", - prometheus_rules_path="./src/prometheus_alert_rules", ).publish() + + def _access_endpoints(self, _: ops.EventBase) -> None: + rules = ( + RuleStore(JujuTopology.from_charm(self)) + .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') + .add_promql(SINGLE_PROMQL_RECORD, group_name='test_promql_record') + .add_logql(OFFICIAL_LOGQL_RULES) + ) + OtlpRequirer(self, rules=rules).publish() ``` ## Documentation diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index b239ddb16..68971bee1 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -20,7 +20,7 @@ `requirements `_ of the project. -This library provides a way for charms to share OTLP endpoint information and associated Loki and +This library provides a way for charms to share OTLP endpoint information, and associated Loki and Prometheus rules. This library requires that the charm's workload already supports sending/receiving OTLP data and focuses on communicating those endpoints. @@ -38,9 +38,9 @@ class MyOtlpServer(CharmBase): def __init__(self, *args): super().__init__(*args) - self.framework.observe(self.on.ingress_ready, self._on_ingress_ready) + self.framework.observe(self.on.ingress_ready, self._publish_endpoints) - def _on_ingress_ready(self, event): + def _publish_endpoints(self, event): OtlpProvider(self).add_endpoint( protocol="grpc", endpoint="https://my-app.ingress:4317", @@ -58,9 +58,16 @@ def _on_ingress_ready(self, event): The OtlpProvider also consumes rules from related OtlpRequirer charms, which can be retrieved with the ``rules()`` method:: - # snip ... - promql_rules = OtlpProvider(self).rules("promql") - logql_rules = OtlpProvider(self).rules("logql") + from charmlibs.interfaces.otlp import OtlpProvider + + class MyOtlpServer(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.update_status, self._access_rules) + + def _access_rules(self, event): + OtlpProvider(self).rules("promql") + OtlpProvider(self).rules("logql") Requirer Side (Charms requiring OTLP endpoints) ----------------------------------------------- @@ -69,7 +76,7 @@ def _on_ingress_ready(self, event): subset of protocols and telemetries, which can be configured at instantiation:: from charmlibs.interfaces.otlp import OtlpRequirer -TODO: Update these examples + class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -80,8 +87,6 @@ def _publish_rules(self, _: ops.EventBase) -> None: self, protocols=["grpc", "http"], telemetries=["logs", "metrics", "traces"], - loki_rules_path="./src/loki_alert_rules", - prometheus_rules_path="./src/prometheus_alert_rules", ).publish() Given the defined, supported protocols and telemetries, the OtlpRequirer will filter out @@ -91,14 +96,24 @@ def _publish_rules(self, _: ops.EventBase) -> None: That means an endpoint supporting the `gRPC` protocol will be selected over one supporting `HTTP`. Unknown protocols will receive the lowest priority. -The OtlpRequirer also publishes rules to related OtlpProvider charms with the ``publish()`` -method:: +The OtlpRequirer also publishes user-defined and generic (applied to all charms) rules to related +OtlpProvider charms with the ``publish()`` method:: - # snip ... - OtlpRequirer(...).publish() + from charmlibs.interfaces.otlp import OtlpRequirer -It is the charm's responsibility to manage the rules in the ``loki_rules_path`` and -``prometheus_rules_path`` directories, which will be forwarded to the related OtlpProvider charms. + class MyOtlpSender(CharmBase): + def __init__(self, framework: ops.Framework): + super().__init__(framework) + self.framework.observe(self.on.update_status, self._access_endpoints) + + def _access_endpoints(self, _: ops.EventBase) -> None: + rules = ( + RuleStore(JujuTopology.from_charm(self)) + .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') + .add_promql(SINGLE_PROMQL_RECORD, group_name='test_promql_record') + .add_logql(OFFICIAL_LOGQL_RULES) + ) + OtlpRequirer(self, rules=rules).publish() Relation Data Format ==================== @@ -119,14 +134,17 @@ def _publish_rules(self, _: ops.EventBase) -> None: }, ] -The OtlpRequirer offers compressed rules in the relation databag under the ``rules`` key. The -charm's metadata is included under the ``metadata`` key for the provider to know the source of the -rules:: +The OtlpRequirer offers compressed rules in the relation databag under the ``rules`` key, which +have this structure when decompressed:: "rules": { "promql": {...}, "logql": {...}, } + +The charm's metadata is included under the ``metadata`` key for the provider to know the source of +the rules:: + "metadata": { "model": "my-model", "model_uuid": "f4d59020-c8e7-4053-8044-a2c1e5591c7f", diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py index 253ebd82f..2a155579d 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py @@ -333,7 +333,7 @@ def publish(self) -> None: for relation in self._charm.model.relations[self._relation_name]: relation.save(databag, self._charm.app) - def rules(self, query_type: Literal['logql', 'promql']) -> dict[str, dict[str, Any]]: + def rules(self, query_type: Literal['logql', 'promql']) -> dict[str, OfficialRuleFileFormat]: """Fetch rules for all relations of the desired query and rule types. This method returns all rules of the desired query and rule types diff --git a/interfaces/otlp/tests/unit/test_rules.py b/interfaces/otlp/tests/unit/test_rules.py index 60199e5d2..5a8a2e638 100644 --- a/interfaces/otlp/tests/unit/test_rules.py +++ b/interfaces/otlp/tests/unit/test_rules.py @@ -166,12 +166,13 @@ def test_provider_rules( for group in groups: for rule in group.get('rules', []): # AND the rules are labeled with the provider's topology - assert rule['labels']['juju_model'] == 'foo-model' + assert rule.get('labels', {}).get('juju_model') == 'foo-model' assert ( - rule['labels']['juju_model_uuid'] == 'f4d59020-c8e7-4053-8044-a2c1e5591c7f' + rule.get('labels', {}).get('juju_model_uuid') + == 'f4d59020-c8e7-4053-8044-a2c1e5591c7f' ) - assert rule['labels']['juju_application'] == app - assert rule['labels']['juju_charm'] == charm + assert rule.get('labels', {}).get('juju_application') == app + assert rule.get('labels', {}).get('juju_charm') == charm # AND the expressions are labeled assert 'juju_model="foo-model"' in rule['expr'] From 4239b5bf328a290641cc240290f0be423d3a43d9 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Mon, 23 Mar 2026 13:58:46 -0400 Subject: [PATCH 05/10] chore: cosl==1.7.0 --- interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py | 2 +- interfaces/otlp/uv.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py index 2a155579d..2a484f5d9 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/_otlp.py @@ -348,7 +348,7 @@ def rules(self, query_type: Literal['logql', 'promql']) -> dict[str, OfficialRul a mapping of relation ID to a dictionary of alert rule groups following the OfficialRuleFileFormat from cos-lib. """ - rules_map: dict[str, dict[str, Any]] = {} + rules_map: dict[str, OfficialRuleFileFormat] = {} # Instantiate Rules with topology to ensure that rules always have an identifier rules_obj = Rules(query_type, self._topology) for relation in self._charm.model.relations[self._relation_name]: diff --git a/interfaces/otlp/uv.lock b/interfaces/otlp/uv.lock index 5804e5001..02862b0cf 100644 --- a/interfaces/otlp/uv.lock +++ b/interfaces/otlp/uv.lock @@ -152,7 +152,7 @@ wheels = [ [[package]] name = "cosl" -version = "1.6.1" +version = "1.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ops" }, @@ -161,9 +161,9 @@ dependencies = [ { name = "tenacity" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e5/98/1a1f8aa7965ede9abfeb649b83375baf4e2f523778f90b841281cbe3603a/cosl-1.6.1.tar.gz", hash = "sha256:f96a6a978dfdee4a3b460cc48fa18514663bbc1c3a4f323315e3dbe3e6a2a596", size = 149512, upload-time = "2026-03-09T21:44:46.744Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/86/a1756838edef0bb2e82bbb51d1164dda731334dc0494f3c69d006c7f6429/cosl-1.7.0.tar.gz", hash = "sha256:ab6e1f74a9ddcd8f55fe36b7bedb4c0fe983b5b35776094b31a31e48e63808b6", size = 149830, upload-time = "2026-03-23T17:55:25.872Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/8f/3ca0f470fbc7b26ed33b5fe5815e38b6a628b8bb4df961924cae38755c46/cosl-1.6.1-py3-none-any.whl", hash = "sha256:12db85a81317c5b056171642098be91c09e78e04875ed1262b99681dea43b533", size = 37800, upload-time = "2026-03-09T21:44:45.373Z" }, + { url = "https://files.pythonhosted.org/packages/35/3d/30d2a98fa06c72abb79aceb06f8e6a9848e806d25f86f4f327afa9b13a0b/cosl-1.7.0-py3-none-any.whl", hash = "sha256:a3469c228a0c89418a5d0c9c2c72b520b4f9973fab35ac193b1de9adfac8a4fb", size = 37833, upload-time = "2026-03-23T17:55:24.592Z" }, ] [[package]] From f44fdcdb888c2a3262cca9d1649632018ffbd6bc Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Mon, 23 Mar 2026 15:03:17 -0400 Subject: [PATCH 06/10] chore --- .gitignore | 1 + interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 2d5d14888..dcb59288a 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST +cos-tool* # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index 68971bee1..f8a9d381d 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -158,6 +158,7 @@ def _access_endpoints(self, _: ops.EventBase) -> None: OtlpEndpoint, OtlpProvider, OtlpRequirer, + RuleStore, ) from ._version import __version__ as __version__ @@ -167,4 +168,5 @@ def _access_endpoints(self, _: ops.EventBase) -> None: 'OtlpEndpoint', 'OtlpProvider', 'OtlpRequirer', + 'RuleStore', ] From 04f9687030370c2f53b529d3d6e0f435ddfa3aa8 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Mon, 23 Mar 2026 16:29:26 -0400 Subject: [PATCH 07/10] chore: docs cleanup --- interfaces/otlp/README.md | 16 ++++++++-------- .../src/charmlibs/interfaces/otlp/__init__.py | 12 ++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/interfaces/otlp/README.md b/interfaces/otlp/README.md index 026296404..92a737361 100644 --- a/interfaces/otlp/README.md +++ b/interfaces/otlp/README.md @@ -49,7 +49,7 @@ class MyOtlpServer(CharmBase): ### Requirer Side ```python -from charmlibs.interfaces.otlp import OtlpRequirer +from charmlibs.interfaces.otlp import OtlpEndpoint, OtlpRequirer class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): @@ -58,13 +58,6 @@ class MyOtlpSender(CharmBase): self.framework.observe(self.on.update_status, self._publish_rules) def _publish_rules(self, _: ops.EventBase) -> None: - OtlpRequirer( - self, - protocols=["grpc", "http"], - telemetries=["logs", "metrics", "traces"], - ).publish() - - def _access_endpoints(self, _: ops.EventBase) -> None: rules = ( RuleStore(JujuTopology.from_charm(self)) .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') @@ -72,6 +65,13 @@ class MyOtlpSender(CharmBase): .add_logql(OFFICIAL_LOGQL_RULES) ) OtlpRequirer(self, rules=rules).publish() + + def _access_endpoints(self, _: ops.EventBase) -> dict[int, OtlpEndpoint]: + OtlpRequirer( + self, + protocols=["grpc", "http"], + telemetries=["logs", "metrics", "traces"], + ).endpoints ``` ## Documentation diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index f8a9d381d..56b5c88de 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -75,19 +75,19 @@ def _access_rules(self, event): To consume OTLP endpoints, use the ``OtlpRequirer`` class. The OTLP sender may only support a subset of protocols and telemetries, which can be configured at instantiation:: - from charmlibs.interfaces.otlp import OtlpRequirer + from charmlibs.interfaces.otlp import OtlpEndpoint, OtlpRequirer class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) - self.framework.observe(self.on.update_status, self._publish_rules) + self.framework.observe(self.on.update_status, self._access_endpoints) - def _publish_rules(self, _: ops.EventBase) -> None: + def _access_endpoints(self, _: ops.EventBase) -> dict[int, OtlpEndpoint]: OtlpRequirer( self, protocols=["grpc", "http"], telemetries=["logs", "metrics", "traces"], - ).publish() + ).endpoints Given the defined, supported protocols and telemetries, the OtlpRequirer will filter out unsupported endpoints and prune unsupported telemetries. After filtering, requirer selection @@ -104,9 +104,9 @@ def _publish_rules(self, _: ops.EventBase) -> None: class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) - self.framework.observe(self.on.update_status, self._access_endpoints) + self.framework.observe(self.on.update_status, self._publish_rules) - def _access_endpoints(self, _: ops.EventBase) -> None: + def _publish_rules(self, _: ops.EventBase) -> None: rules = ( RuleStore(JujuTopology.from_charm(self)) .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') From 877a7be1bfd489a87b4c865f165b373f328e5530 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Mon, 23 Mar 2026 16:40:48 -0400 Subject: [PATCH 08/10] chore: doc updates --- interfaces/otlp/README.md | 6 +++--- interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/interfaces/otlp/README.md b/interfaces/otlp/README.md index 92a737361..9c8b84cb2 100644 --- a/interfaces/otlp/README.md +++ b/interfaces/otlp/README.md @@ -49,7 +49,7 @@ class MyOtlpServer(CharmBase): ### Requirer Side ```python -from charmlibs.interfaces.otlp import OtlpEndpoint, OtlpRequirer +from charmlibs.interfaces.otlp import OtlpRequirer class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): @@ -57,7 +57,7 @@ class MyOtlpSender(CharmBase): self.framework.observe(self.on.update_status, self._access_endpoints) self.framework.observe(self.on.update_status, self._publish_rules) - def _publish_rules(self, _: ops.EventBase) -> None: + def _publish_rules(self, _: ops.EventBase): rules = ( RuleStore(JujuTopology.from_charm(self)) .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') @@ -66,7 +66,7 @@ class MyOtlpSender(CharmBase): ) OtlpRequirer(self, rules=rules).publish() - def _access_endpoints(self, _: ops.EventBase) -> dict[int, OtlpEndpoint]: + def _access_endpoints(self, _: ops.EventBase): OtlpRequirer( self, protocols=["grpc", "http"], diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index 56b5c88de..976c07dca 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -75,14 +75,14 @@ def _access_rules(self, event): To consume OTLP endpoints, use the ``OtlpRequirer`` class. The OTLP sender may only support a subset of protocols and telemetries, which can be configured at instantiation:: - from charmlibs.interfaces.otlp import OtlpEndpoint, OtlpRequirer + from charmlibs.interfaces.otlp import OtlpRequirer class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) self.framework.observe(self.on.update_status, self._access_endpoints) - def _access_endpoints(self, _: ops.EventBase) -> dict[int, OtlpEndpoint]: + def _access_endpoints(self, _: ops.EventBase): OtlpRequirer( self, protocols=["grpc", "http"], @@ -106,7 +106,7 @@ def __init__(self, framework: ops.Framework): super().__init__(framework) self.framework.observe(self.on.update_status, self._publish_rules) - def _publish_rules(self, _: ops.EventBase) -> None: + def _publish_rules(self, _: ops.EventBase): rules = ( RuleStore(JujuTopology.from_charm(self)) .add_logql(SINGLE_LOGQL_ALERT, group_name='test_logql_alert') From da44299e07f0f49b795b533d007c15f979efbf0c Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Tue, 24 Mar 2026 13:56:21 -0400 Subject: [PATCH 09/10] chore: PR review --- interfaces/otlp/tests/unit/test_rules.py | 26 ++++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/interfaces/otlp/tests/unit/test_rules.py b/interfaces/otlp/tests/unit/test_rules.py index 5a8a2e638..67b896c21 100644 --- a/interfaces/otlp/tests/unit/test_rules.py +++ b/interfaces/otlp/tests/unit/test_rules.py @@ -22,7 +22,10 @@ SINGLE_PROMQL_RECORD, ) -MODEL = Model('foo-model', uuid='f4d59020-c8e7-4053-8044-a2c1e5591c7f') +MODEL_NAME = 'foo-model' +MODEL_UUID = 'f4d59020-c8e7-4053-8044-a2c1e5591c7f' +MODEL_SHORT_UUID = 'f4d59020' +MODEL = Model(MODEL_NAME, uuid=MODEL_UUID) def _decompress(rules: str | None) -> dict[str, Any]: @@ -104,8 +107,8 @@ def test_metadata(otlp_requirer_ctx: testing.Context[ops.CharmBase]): for relation in list(state_out.relations): # THEN the requirer adds its own metadata to the databag assert json.loads(relation.local_app_data['metadata']) == { - 'model': 'foo-model', - 'model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', + 'model': MODEL_NAME, + 'model_uuid': MODEL_UUID, 'application': 'otlp-requirer', 'unit': 'otlp-requirer/0', 'charm_name': 'otlp-requirer', @@ -117,8 +120,8 @@ def test_metadata(otlp_requirer_ctx: testing.Context[ops.CharmBase]): [ {}, { - 'model': 'foo-model', - 'model_uuid': 'f4d59020-c8e7-4053-8044-a2c1e5591c7f', + 'model': MODEL_NAME, + 'model_uuid': MODEL_UUID, 'application': 'otlp-requirer', 'charm_name': 'otlp-requirer', 'unit': 'otlp-requirer/0', @@ -159,22 +162,19 @@ def test_provider_rules( charm = metadata['charm_name'] if metadata else 'otlp-provider' # THEN the identifier is present - identifier = 'foo-model_f4d59020_' + app + identifier = f'{MODEL_NAME}_{MODEL_SHORT_UUID}_{app}' assert identifier in result groups = result[identifier].get('groups', []) assert groups for group in groups: for rule in group.get('rules', []): # AND the rules are labeled with the provider's topology - assert rule.get('labels', {}).get('juju_model') == 'foo-model' - assert ( - rule.get('labels', {}).get('juju_model_uuid') - == 'f4d59020-c8e7-4053-8044-a2c1e5591c7f' - ) + assert rule.get('labels', {}).get('juju_model') == MODEL_NAME + assert rule.get('labels', {}).get('juju_model_uuid') == MODEL_UUID assert rule.get('labels', {}).get('juju_application') == app assert rule.get('labels', {}).get('juju_charm') == charm # AND the expressions are labeled - assert 'juju_model="foo-model"' in rule['expr'] - assert 'juju_model_uuid="f4d59020-c8e7-4053-8044-a2c1e5591c7f"' in rule['expr'] + assert f'juju_model="{MODEL_NAME}"' in rule['expr'] + assert f'juju_model_uuid="{MODEL_UUID}"' in rule['expr'] assert f'juju_application="{app}"' in rule['expr'] From a91840c7121e0cc0b37f036b725b09b3a3129d05 Mon Sep 17 00:00:00 2001 From: Michael Thamm Date: Tue, 24 Mar 2026 14:06:08 -0400 Subject: [PATCH 10/10] Import RuleStore in the docs Co-authored-by: Sina P <55766091+sinapah@users.noreply.github.com> --- interfaces/otlp/README.md | 2 +- interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/interfaces/otlp/README.md b/interfaces/otlp/README.md index 9c8b84cb2..1bcb3db88 100644 --- a/interfaces/otlp/README.md +++ b/interfaces/otlp/README.md @@ -49,7 +49,7 @@ class MyOtlpServer(CharmBase): ### Requirer Side ```python -from charmlibs.interfaces.otlp import OtlpRequirer +from charmlibs.interfaces.otlp import OtlpRequirer, RulesStore class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework): diff --git a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py index 976c07dca..7624dfee6 100644 --- a/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py +++ b/interfaces/otlp/src/charmlibs/interfaces/otlp/__init__.py @@ -99,7 +99,7 @@ def _access_endpoints(self, _: ops.EventBase): The OtlpRequirer also publishes user-defined and generic (applied to all charms) rules to related OtlpProvider charms with the ``publish()`` method:: - from charmlibs.interfaces.otlp import OtlpRequirer + from charmlibs.interfaces.otlp import OtlpRequirer, RulesStore class MyOtlpSender(CharmBase): def __init__(self, framework: ops.Framework):