diff --git a/pyproject.toml b/pyproject.toml index edb94ddd..41684a2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ dependencies = [ "numpy>=2.0.0,<3", "aioquic>=1.2.0,<2", "pyyaml>=6.0.0,<7", + "prometheus-client>=0.21.0,<1", ] [project.license] diff --git a/src/lean_spec/__main__.py b/src/lean_spec/__main__.py index a3a1ae92..f5944709 100644 --- a/src/lean_spec/__main__.py +++ b/src/lean_spec/__main__.py @@ -19,6 +19,7 @@ --validator-keys Path to validator keys directory --node-id Node identifier for validator assignment (default: lean_spec_0) --is-aggregator Enable aggregator mode for attestation aggregation (default: false) + --api-port Port for API server and Prometheus /metrics (default: 5052, 0 to disable) """ from __future__ import annotations @@ -32,6 +33,7 @@ from pathlib import Path from typing import Final +from lean_spec.subspecs.api import ApiServerConfig from lean_spec.subspecs.chain.config import ATTESTATION_COMMITTEE_COUNT from lean_spec.subspecs.containers import Block, BlockBody, Checkpoint, State from lean_spec.subspecs.containers.block.types import AggregatedAttestations @@ -39,6 +41,7 @@ from lean_spec.subspecs.containers.validator import SubnetId from lean_spec.subspecs.forkchoice import Store from lean_spec.subspecs.genesis import GenesisConfig +from lean_spec.subspecs.metrics import registry as metrics from lean_spec.subspecs.networking.client import LiveNetworkEventSource from lean_spec.subspecs.networking.enr import ENR from lean_spec.subspecs.networking.gossipsub import GossipTopic @@ -161,6 +164,7 @@ def _init_from_genesis( event_source: LiveNetworkEventSource, validator_registry: ValidatorRegistry | None = None, is_aggregator: bool = False, + api_port: int | None = None, ) -> Node: """ Initialize a node from genesis configuration. @@ -170,6 +174,7 @@ def _init_from_genesis( event_source: Network transport for the node. validator_registry: Optional registry with validator secret keys. is_aggregator: Enable aggregator mode for attestation aggregation. + api_port: Port for API server and /metrics. None disables the API. Returns: A fully initialized Node starting from genesis. @@ -192,6 +197,7 @@ def _init_from_genesis( validator_registry=validator_registry, fork_digest=GOSSIP_FORK_DIGEST, is_aggregator=is_aggregator, + api_config=ApiServerConfig(port=api_port) if api_port is not None else None, ) # Create and return the node. @@ -204,6 +210,7 @@ async def _init_from_checkpoint( event_source: LiveNetworkEventSource, validator_registry: ValidatorRegistry | None = None, is_aggregator: bool = False, + api_port: int | None = None, ) -> Node | None: """ Initialize a node from a checkpoint state fetched from a remote node. @@ -231,6 +238,7 @@ async def _init_from_checkpoint( event_source: Network transport for the node. validator_registry: Optional registry with validator secret keys. is_aggregator: Enable aggregator mode for attestation aggregation. + api_port: Port for API server and /metrics. None disables the API. Returns: A fully initialized Node if successful, None if checkpoint sync failed. @@ -298,6 +306,7 @@ async def _init_from_checkpoint( validator_registry=validator_registry, fork_digest=GOSSIP_FORK_DIGEST, is_aggregator=is_aggregator, + api_config=ApiServerConfig(port=api_port) if api_port is not None else None, ) # Create node and inject checkpoint store. @@ -390,6 +399,7 @@ async def run_node( node_id: str = "lean_spec_0", genesis_time_now: bool = False, is_aggregator: bool = False, + api_port: int | None = 5052, ) -> None: """ Run the lean consensus node. @@ -403,7 +413,9 @@ async def run_node( node_id: Node identifier for validator assignment. genesis_time_now: Override genesis time to current time for testing. is_aggregator: Enable aggregator mode for attestation aggregation. + api_port: Port for API server (health, fork_choice, /metrics). None or 0 disables. """ + metrics.init(name="leanspec-node", version="0.0.1") logger.info("Loading genesis from %s", genesis_path) genesis = GenesisConfig.from_yaml_file(genesis_path) @@ -506,6 +518,8 @@ async def run_node( # - Starts from block 0 with initial validator set # - Must process every block to reach current head # - Only practical for new or small networks + api_port_int: int | None = api_port if api_port and api_port > 0 else None + node: Node | None if checkpoint_sync_url is not None: node = await _init_from_checkpoint( @@ -514,6 +528,7 @@ async def run_node( event_source=event_source, validator_registry=validator_registry, is_aggregator=is_aggregator, + api_port=api_port_int, ) if node is None: # Checkpoint sync failed. Exit rather than falling back. @@ -527,6 +542,7 @@ async def run_node( event_source=event_source, validator_registry=validator_registry, is_aggregator=is_aggregator, + api_port=api_port_int, ) logger.info("Node initialized, peer_id=%s", event_source.connection_manager.peer_id) @@ -662,6 +678,13 @@ def main() -> None: action="store_true", help="Enable aggregator mode (node performs attestation aggregation)", ) + parser.add_argument( + "--api-port", + type=int, + default=5052, + metavar="PORT", + help="Port for API server and /metrics (default: 5052). Set 0 to disable.", + ) args = parser.parse_args() @@ -680,6 +703,7 @@ def main() -> None: node_id=args.node_id, genesis_time_now=args.genesis_time_now, is_aggregator=args.is_aggregator, + api_port=args.api_port, ) ) except KeyboardInterrupt: diff --git a/src/lean_spec/subspecs/api/endpoints/__init__.py b/src/lean_spec/subspecs/api/endpoints/__init__.py index 05654044..dddd6951 100644 --- a/src/lean_spec/subspecs/api/endpoints/__init__.py +++ b/src/lean_spec/subspecs/api/endpoints/__init__.py @@ -1,10 +1,11 @@ """API endpoint specifications.""" -from . import checkpoints, fork_choice, health, states +from . import checkpoints, fork_choice, health, metrics, states __all__ = [ "checkpoints", "fork_choice", "health", + "metrics", "states", ] diff --git a/src/lean_spec/subspecs/api/endpoints/metrics.py b/src/lean_spec/subspecs/api/endpoints/metrics.py new file mode 100644 index 00000000..5bc9cecc --- /dev/null +++ b/src/lean_spec/subspecs/api/endpoints/metrics.py @@ -0,0 +1,19 @@ +"""Metrics endpoint (Prometheus exposition).""" + +from __future__ import annotations + +from aiohttp import web + +from lean_spec.subspecs.metrics.registry import get_metrics_output + +CONTENT_TYPE = "text/plain; version=0.0.4; charset=utf-8" + + +async def handle(_request: web.Request) -> web.Response: + """ + Handle Prometheus metrics scrape request. + + Returns metrics in Prometheus text exposition format. + """ + body = get_metrics_output() + return web.Response(body=body, content_type=CONTENT_TYPE) diff --git a/src/lean_spec/subspecs/api/routes.py b/src/lean_spec/subspecs/api/routes.py index a06eefa5..840f5584 100644 --- a/src/lean_spec/subspecs/api/routes.py +++ b/src/lean_spec/subspecs/api/routes.py @@ -6,12 +6,13 @@ from aiohttp import web -from .endpoints import checkpoints, fork_choice, health, states +from .endpoints import checkpoints, fork_choice, health, metrics, states ROUTES: dict[str, Callable[[web.Request], Awaitable[web.Response]]] = { "/lean/v0/health": health.handle, "/lean/v0/states/finalized": states.handle_finalized, "/lean/v0/checkpoints/justified": checkpoints.handle_justified, "/lean/v0/fork_choice": fork_choice.handle, + "/metrics": metrics.handle, } """All API routes mapped to their handlers.""" diff --git a/src/lean_spec/subspecs/containers/block/types.py b/src/lean_spec/subspecs/containers/block/types.py index cd513e38..ee2b4124 100644 --- a/src/lean_spec/subspecs/containers/block/types.py +++ b/src/lean_spec/subspecs/containers/block/types.py @@ -2,10 +2,14 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from collections.abc import Iterator +from typing import TYPE_CHECKING, Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import CoreSchema, core_schema from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof -from lean_spec.types import Bytes32, SSZList +from lean_spec.types import ZERO_HASH, Bytes32, SSZList from ...chain.config import VALIDATOR_REGISTRY_LIMIT from ..attestation import AggregatedAttestation @@ -13,8 +17,112 @@ if TYPE_CHECKING: from .block import Block -BlockLookup = dict[Bytes32, "Block"] -"""Mapping from block root to Block objects.""" + +class BlockLookup(dict[Bytes32, "Block"]): + """ + Index of all known blocks, keyed by block root. + + The fork choice store uses this mapping to navigate the block tree. + Every block the node has received and validated appears here. + + Blockchains can fork when two valid blocks reference the same parent. + This creates a tree structure rather than a single chain. + Walking this tree is essential for: + + - Determining ancestor relationships between blocks + - Measuring reorganization depth when the head changes + - Resolving which chain is canonical + + Supports Pydantic validation so it can be used in store models. + """ + + @classmethod + def __get_pydantic_core_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> CoreSchema: + """Define Pydantic validation: accept plain dicts and coerce.""" + return core_schema.no_info_plain_validator_function( + cls._validate, + serialization=core_schema.plain_serializer_function_ser_schema(dict), + ) + + @classmethod + def _validate(cls, v: Any) -> BlockLookup: + if isinstance(v, cls): + return v + if isinstance(v, dict): + return cls(v) + raise ValueError(f"expected dict or BlockLookup, got {type(v)}") + + def __or__(self, other: dict[Bytes32, Block]) -> BlockLookup: + """Merge with another dict, preserving the BlockLookup type.""" + return BlockLookup(super().__or__(other)) + + def ancestors(self, root: Bytes32) -> Iterator[Bytes32]: + """ + Walk the chain backward from a block toward genesis. + + Each block points to its parent via parent_root. + This method follows those links, yielding each block root + along the way. The walk stops when it reaches: + + - A block whose parent is the zero hash (genesis boundary) + - A block whose parent is not in the lookup (pruned history) + + Fork choice relies on ancestor walks to compare chains. + Two blocks share a common ancestor if their ancestor sets + overlap. The point where they diverge defines a fork. + + Args: + root: Starting block root. Walk proceeds toward genesis. + + Yields: + Block roots from the starting block back to the oldest + reachable ancestor (inclusive on both ends). + """ + while root in self: + yield root + + # Follow the parent link one step back. + # + # A zero-hash parent means this block sits at the genesis + # boundary. No further ancestors exist. + parent = self[root].parent_root + if parent == ZERO_HASH: + break + root = parent + + def reorg_depth(self, old_head: Bytes32, new_head: Bytes32) -> int: + """ + Count how many blocks the old head must revert to reach the new chain. + + A reorganization (reorg) happens when fork choice switches to a + different chain. The depth measures how many blocks on the old + chain are abandoned. Deeper reorgs are more disruptive because + more transactions and attestations are reverted. + + The algorithm finds the common ancestor by collecting the new + chain's ancestors, then counting old-chain blocks that are not + in that set. + + Args: + old_head: The previous canonical head block root. + new_head: The new canonical head block root. + + Returns: + Number of old-chain blocks between old_head and the common + ancestor (exclusive of the common ancestor itself). + Returns 0 when both heads are the same. + """ + # Collect the full ancestry of the new head. + # + # This set lets us identify the common ancestor efficiently. + ancestors_of_new = set(self.ancestors(new_head)) + + # Count old-chain blocks not shared with the new chain. + # + # Each such block represents one slot of reverted history. + return sum(1 for root in self.ancestors(old_head) if root not in ancestors_of_new) class AggregatedAttestations(SSZList[AggregatedAttestation]): diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index f758410b..da982167 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -6,6 +6,7 @@ __all__ = ["GossipSignatureEntry", "Store"] +import time from collections import defaultdict from typing import NamedTuple @@ -29,6 +30,7 @@ from lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation from lean_spec.subspecs.containers.block import BlockLookup from lean_spec.subspecs.containers.slot import Slot +from lean_spec.subspecs.metrics import registry as metrics from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import ( AggregatedSignatureProof, @@ -113,7 +115,7 @@ class Store(StrictBaseModel): Fork choice will never revert finalized history. """ - blocks: BlockLookup = {} + blocks: BlockLookup = BlockLookup() """ Mapping from block root to Block objects. @@ -220,7 +222,7 @@ def from_anchor( safe_target=anchor_root, latest_justified=state.latest_justified.model_copy(update={"root": anchor_root}), latest_finalized=state.latest_finalized.model_copy(update={"root": anchor_root}), - blocks={anchor_root: anchor_block}, + blocks=BlockLookup({anchor_root: anchor_block}), states={anchor_root: state}, validator_id=validator_id, ) @@ -519,6 +521,9 @@ def on_block( if block_root in self.blocks: return self + t0 = time.perf_counter() + old_head = self.head + # Verify parent chain is available # # The parent state must exist before processing this block. @@ -533,7 +538,11 @@ def on_block( valid_signatures = signed_block_with_attestation.verify_signatures(parent_state, scheme) # Execute state transition function to compute post-block state + state_transition_start = time.perf_counter() post_state = parent_state.state_transition(block, valid_signatures) + metrics.lean_state_transition_time_seconds.observe( + time.perf_counter() - state_transition_start + ) # Propagate any checkpoint advances from the post-state. latest_justified = max( @@ -613,8 +622,32 @@ def on_block( if store.latest_finalized.slot > self.latest_finalized.slot: store = store.prune_stale_attestation_data() + metrics.lean_fork_choice_block_processing_time_seconds.observe(time.perf_counter() - t0) + store._record_metrics(old_head) + return store + def _record_metrics(self, old_head: Bytes32) -> None: + """ + Publish Prometheus metrics reflecting the current store state. + + Called after every block processing round. Updates: + + - Head and safe-target slot gauges + - Justified and finalized slot gauges + - Reorg counter and depth histogram (only when the head actually changed) + """ + metrics.lean_head_slot.set(int(self.blocks[self.head].slot)) + metrics.lean_safe_target_slot.set(int(self.blocks[self.safe_target].slot)) + metrics.lean_latest_justified_slot.set(int(self.latest_justified.slot)) + metrics.lean_latest_finalized_slot.set(int(self.latest_finalized.slot)) + + if self.head != old_head: + metrics.lean_fork_choice_reorgs_total.inc() + metrics.lean_fork_choice_reorg_depth.observe( + self.blocks.reorg_depth(old_head, self.head) + ) + def extract_attestations_from_aggregated_payloads( self, aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] ) -> dict[ValidatorIndex, AttestationData]: diff --git a/src/lean_spec/subspecs/metrics/__init__.py b/src/lean_spec/subspecs/metrics/__init__.py new file mode 100644 index 00000000..9877c6ce --- /dev/null +++ b/src/lean_spec/subspecs/metrics/__init__.py @@ -0,0 +1,13 @@ +""" +Prometheus metrics for the lean consensus node. + +Metric names and types follow the leanMetrics spec: +https://github.com/leanEthereum/leanMetrics/blob/main/metrics.md +""" + +from .registry import get_metrics_output, registry + +__all__ = [ + "get_metrics_output", + "registry", +] diff --git a/src/lean_spec/subspecs/metrics/registry.py b/src/lean_spec/subspecs/metrics/registry.py new file mode 100644 index 00000000..d4d551c4 --- /dev/null +++ b/src/lean_spec/subspecs/metrics/registry.py @@ -0,0 +1,326 @@ +""" +Prometheus metric definitions aligned with the leanMetrics spec. + +Names, types, and buckets match +https://github.com/leanEthereum/leanMetrics/blob/main/metrics.md + +This module uses the null object pattern for zero-cost metrics before +initialization. Every metric attribute starts as a silent no-op stub. +After initialization, stubs are replaced with real Prometheus objects. + +This design gives consumers a stable API at import time. +No "is metrics enabled?" checks are needed anywhere in the codebase. +Code that records metrics works identically whether the Prometheus +subsystem is active or not. +""" + +from __future__ import annotations + +import time +from typing import TYPE_CHECKING + +from prometheus_client import ( + REGISTRY, + Counter, + Gauge, + Histogram, + generate_latest, +) + +if TYPE_CHECKING: + from prometheus_client import CollectorRegistry + +# Histogram bucket boundaries from the leanMetrics spec. +# +# Each tuple defines the upper bounds for a Prometheus histogram. +# Values are chosen to capture the expected latency distributions +# for each operation category. + +FORK_CHOICE_BLOCK_BUCKETS = (0.005, 0.01, 0.025, 0.05, 0.1, 1, 1.25, 1.5, 2, 4) +"""Seconds. Block processing in fork choice is typically sub-second.""" + +ATTESTATION_VALIDATION_BUCKETS = (0.005, 0.01, 0.025, 0.05, 0.1, 1) +"""Seconds. Attestation validation is fast, most finish under 100ms.""" + +STATE_TRANSITION_BUCKETS = (0.25, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 4) +"""Seconds. State transitions are heavier, spanning sub-second to multi-second.""" + +REORG_DEPTH_BUCKETS = (1, 2, 3, 5, 7, 10, 20, 30, 50, 100) +"""Block count. Reorg depths above 10 are rare and signal network issues.""" + + +class _NoOpMetric: + """ + Null object that absorbs all metric operations without side effects. + + This stub mirrors the subset of the Prometheus metric interface that + consumers actually use: + + - Gauge operations: set, inc + - Histogram operations: observe + - Label selection: labels (returns another no-op for chaining) + + A single shared instance serves all uninitialized metric attributes. + This avoids allocating one stub per metric and keeps memory overhead + near zero. + """ + + def set(self, value: float) -> None: # noqa: ARG002 + """Accept and discard a gauge value.""" + + def inc(self, amount: float = 1) -> None: # noqa: ARG002 + """Accept and discard a counter increment.""" + + def observe(self, amount: float) -> None: # noqa: ARG002 + """Accept and discard a histogram observation.""" + + def labels(self, **kwargs: str) -> _NoOpMetric: # noqa: ARG002 + """ + Return self to support chained label selection. + + Prometheus metrics with labels require a selection step before + recording. Returning self allows the full chain to complete + silently. + """ + return self + + +_NOOP = _NoOpMetric() +"""Shared no-op instance used by all uninitialized metric attributes.""" + + +class MetricsRegistry: + """ + Central holder for all Prometheus metrics in a lean node. + + Attributes start as no-op stubs and become real Prometheus objects + after initialization. This two-phase lifecycle means: + + - Importing the module is always safe and cheap. + - Recording metrics works at any point in the node lifetime. + - No conditional "is metrics ready?" logic pollutes call sites. + + A single module-level instance acts as the singleton. + Consumers import that instance and use qualified attribute access. + """ + + _initialized: bool = False + + # Node info + lean_node_info: Gauge | _NoOpMetric = _NOOP + """Labeled gauge exposing node name and version. Always set to 1.""" + lean_node_start_time_seconds: Gauge | _NoOpMetric = _NOOP + """Unix timestamp recorded once at node startup.""" + + # Fork choice + lean_head_slot: Gauge | _NoOpMetric = _NOOP + """Slot of the current chain head selected by fork choice.""" + lean_current_slot: Gauge | _NoOpMetric = _NOOP + """Wall-clock slot derived from genesis time and the slot interval.""" + lean_safe_target_slot: Gauge | _NoOpMetric = _NOOP + """Slot of the highest target that has been deemed safe.""" + lean_fork_choice_block_processing_time_seconds: Histogram | _NoOpMetric = _NOOP + """Latency of integrating a new block into the fork choice store.""" + lean_attestations_valid_total: Counter | _NoOpMetric = _NOOP + """Running count of attestations that passed all validation checks.""" + lean_attestations_invalid_total: Counter | _NoOpMetric = _NOOP + """Running count of attestations rejected during validation.""" + lean_attestation_validation_time_seconds: Histogram | _NoOpMetric = _NOOP + """Latency of a single attestation validation pass.""" + lean_fork_choice_reorgs_total: Counter | _NoOpMetric = _NOOP + """Running count of chain head reorganizations.""" + lean_fork_choice_reorg_depth: Histogram | _NoOpMetric = _NOOP + """Number of blocks rolled back during each reorg event.""" + + # State transition + lean_latest_justified_slot: Gauge | _NoOpMetric = _NOOP + """Slot of the most recently justified checkpoint.""" + lean_latest_finalized_slot: Gauge | _NoOpMetric = _NOOP + """Slot of the most recently finalized checkpoint.""" + lean_state_transition_time_seconds: Histogram | _NoOpMetric = _NOOP + """Latency of applying a full state transition for one slot.""" + + # Validator + lean_validators_count: Gauge | _NoOpMetric = _NOOP + """Number of validator keys managed by this node.""" + + # Network + lean_connected_peers: Gauge | _NoOpMetric = _NOOP + """Current number of active peer connections.""" + + def init( + self, + name: str = "leanspec-node", + version: str = "0.0.1", + registry: CollectorRegistry | None = None, + ) -> None: + """ + Replace all no-op stubs with real Prometheus metric objects. + + Call once at node startup. The method is idempotent. + Repeated calls after the first are silently ignored. + This prevents double-registration errors in Prometheus. + + Metric categories created: + + - Node info: identity gauge and start timestamp + - Fork choice: head/current/safe slots, block processing, + attestation validation, reorg tracking + - State transition: justified/finalized slots, transition time + - Validator: managed validator count + - Network: connected peer count + + Args: + name: Human-readable node name exposed in the info gauge. + version: Node version exposed in the info gauge. + registry: Prometheus collector registry. Falls back to the + global default registry when not provided. + """ + # Guard against repeated initialization. + if self._initialized: + return + reg = registry or REGISTRY + + # Node info (leanMetrics: Node Info Metrics) + # + # The info gauge is always 1. Labels carry the identity metadata. + self.lean_node_info = Gauge( + "lean_node_info", + "Node information (always 1).", + ["name", "version"], + registry=reg, + ) + self.lean_node_info.labels(name=name, version=version).set(1) + self.lean_node_start_time_seconds = Gauge( + "lean_node_start_time_seconds", + "Start timestamp.", + registry=reg, + ) + self.lean_node_start_time_seconds.set(time.time()) + + # Fork choice (leanMetrics: Fork-Choice Metrics) + self.lean_head_slot = Gauge( + "lean_head_slot", + "Latest slot of the lean chain.", + registry=reg, + ) + self.lean_current_slot = Gauge( + "lean_current_slot", + "Current slot of the lean chain.", + registry=reg, + ) + self.lean_safe_target_slot = Gauge( + "lean_safe_target_slot", + "Safe target slot.", + registry=reg, + ) + self.lean_fork_choice_block_processing_time_seconds = Histogram( + "lean_fork_choice_block_processing_time_seconds", + "Time taken to process block in fork choice.", + buckets=FORK_CHOICE_BLOCK_BUCKETS, + registry=reg, + ) + self.lean_attestations_valid_total = Counter( + "lean_attestations_valid_total", + "Total number of valid attestations.", + ["source"], + registry=reg, + ) + self.lean_attestations_invalid_total = Counter( + "lean_attestations_invalid_total", + "Total number of invalid attestations.", + ["source"], + registry=reg, + ) + self.lean_attestation_validation_time_seconds = Histogram( + "lean_attestation_validation_time_seconds", + "Time taken to validate attestation.", + buckets=ATTESTATION_VALIDATION_BUCKETS, + registry=reg, + ) + self.lean_fork_choice_reorgs_total = Counter( + "lean_fork_choice_reorgs_total", + "Total number of fork choice reorgs.", + registry=reg, + ) + self.lean_fork_choice_reorg_depth = Histogram( + "lean_fork_choice_reorg_depth", + "Depth of fork choice reorgs (in blocks).", + buckets=REORG_DEPTH_BUCKETS, + registry=reg, + ) + + # State transition (leanMetrics: State Transition Metrics) + self.lean_latest_justified_slot = Gauge( + "lean_latest_justified_slot", + "Latest justified slot.", + registry=reg, + ) + self.lean_latest_finalized_slot = Gauge( + "lean_latest_finalized_slot", + "Latest finalized slot.", + registry=reg, + ) + self.lean_state_transition_time_seconds = Histogram( + "lean_state_transition_time_seconds", + "Time to process state transition.", + buckets=STATE_TRANSITION_BUCKETS, + registry=reg, + ) + + # Validator (leanMetrics: Validator Metrics) + self.lean_validators_count = Gauge( + "lean_validators_count", + "Number of validators managed by a node.", + registry=reg, + ) + self.lean_validators_count.set(0) + + # Network (leanMetrics: Network Metrics) + self.lean_connected_peers = Gauge( + "lean_connected_peers", + "Number of connected peers.", + registry=reg, + ) + self.lean_connected_peers.set(0) + + self._initialized = True + + def reset(self) -> None: + """ + Restore all metrics to their initial no-op state. + + Intended exclusively for test teardown. + Production code should never call this. + + Clears all instance overrides so attributes fall back to + the class-level no-op defaults. + """ + self.__dict__.clear() + + +registry = MetricsRegistry() +""" +Module-level singleton shared by all consumers. + +Import this instance and use qualified attribute access +throughout the codebase. +""" + + +def get_metrics_output(registry: CollectorRegistry | None = None) -> bytes: + """ + Serialize all registered metrics into Prometheus text exposition format. + + Typically called by an HTTP handler to serve the ``/metrics`` endpoint. + The output is ready to return as a response body. + + Args: + registry: Prometheus collector registry to export. Falls back to + the global default registry when not provided. + + Returns: + UTF-8 encoded bytes in Prometheus text exposition format. + """ + reg = registry or REGISTRY + return generate_latest(reg) diff --git a/src/lean_spec/subspecs/node/node.py b/src/lean_spec/subspecs/node/node.py index b221a64a..a002f40c 100644 --- a/src/lean_spec/subspecs/node/node.py +++ b/src/lean_spec/subspecs/node/node.py @@ -30,11 +30,13 @@ from lean_spec.subspecs.chain.service import ChainService from lean_spec.subspecs.containers import Block, BlockBody, SignedBlockWithAttestation, State from lean_spec.subspecs.containers.attestation import SignedAttestation +from lean_spec.subspecs.containers.block import BlockLookup from lean_spec.subspecs.containers.block.types import AggregatedAttestations from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.containers.state import Validators from lean_spec.subspecs.containers.validator import ValidatorIndex from lean_spec.subspecs.forkchoice import Store +from lean_spec.subspecs.metrics import registry as metrics from lean_spec.subspecs.networking import NetworkService from lean_spec.subspecs.networking.client.event_source import EventSource from lean_spec.subspecs.ssz.hash import hash_tree_root @@ -394,7 +396,7 @@ def _try_load_from_database( safe_target=head_root, latest_justified=justified, latest_finalized=finalized, - blocks={head_root: head_block}, + blocks=BlockLookup({head_root: head_block}), states={head_root: head_state}, validator_id=validator_id, ) @@ -459,6 +461,8 @@ async def _log_justified_finalized_periodically(self) -> None: Runs every _JUSTIFIED_FINALIZED_LOG_INTERVAL_SEC seconds to aid monitoring and debugging of consensus progress. + Also updates Prometheus gauges for current_slot, connected_peers, + and validators_count (on scrape). """ while not self._shutdown.is_set(): await asyncio.sleep(_JUSTIFIED_FINALIZED_LOG_INTERVAL_SEC) @@ -468,6 +472,16 @@ async def _log_justified_finalized_periodically(self) -> None: peers_connected = sum( 1 for p in self.sync_service.peer_manager.get_all_peers() if p.is_connected() ) + metrics.lean_current_slot.set(int(self.clock.current_slot())) + metrics.lean_connected_peers.set(peers_connected) + metrics.lean_head_slot.set(int(store.blocks[store.head].slot)) + metrics.lean_safe_target_slot.set(int(store.blocks[store.safe_target].slot)) + metrics.lean_latest_justified_slot.set(int(store.latest_justified.slot)) + metrics.lean_latest_finalized_slot.set(int(store.latest_finalized.slot)) + count = ( + len(self.validator_service.registry) if self.validator_service is not None else 0 + ) + metrics.lean_validators_count.set(count) j = store.latest_justified f = store.latest_finalized j_root = j.root.hex() if hasattr(j.root, "hex") else str(j.root) diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index ba7e3b39..155f0a99 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -38,6 +38,7 @@ import asyncio import logging +import time from collections.abc import Callable, Coroutine from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any @@ -51,6 +52,7 @@ ) from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.forkchoice.store import Store +from lean_spec.subspecs.metrics import registry as metrics from lean_spec.subspecs.networking.reqresp.message import Status from lean_spec.subspecs.networking.transport.peer_id import PeerId from lean_spec.subspecs.ssz.hash import hash_tree_root @@ -509,11 +511,14 @@ async def on_gossip_attestation( # The store validates the signature and updates branch weights. # Invalid attestations (bad signature, unknown target) are rejected. # Validation failures are logged but don't crash the event loop. + t0 = time.perf_counter() try: self.store = self.store.on_gossip_attestation( signed_attestation=attestation, is_aggregator=is_aggregator_role, ) + metrics.lean_attestation_validation_time_seconds.observe(time.perf_counter() - t0) + metrics.lean_attestations_valid_total.labels(source="gossip").inc() logger.info( "Attestation from peer %s slot=%s validator=%s: validation and signature ok", peer_str, @@ -521,6 +526,7 @@ async def on_gossip_attestation( validator_id, ) except (AssertionError, KeyError) as e: + metrics.lean_attestations_invalid_total.labels(source="gossip").inc() logger.warning( "Attestation from peer %s slot=%s validator=%s: validation or signature failed: %s", peer_str, diff --git a/tests/lean_spec/subspecs/containers/block/__init__.py b/tests/lean_spec/subspecs/containers/block/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/lean_spec/subspecs/containers/block/test_block_lookup.py b/tests/lean_spec/subspecs/containers/block/test_block_lookup.py new file mode 100644 index 00000000..f953bd2f --- /dev/null +++ b/tests/lean_spec/subspecs/containers/block/test_block_lookup.py @@ -0,0 +1,247 @@ +"""Tests for BlockLookup mapping type.""" + +from __future__ import annotations + +import pytest + +from lean_spec.subspecs.containers.block.block import Block, BlockBody +from lean_spec.subspecs.containers.block.types import ( + AggregatedAttestations, + BlockLookup, +) +from lean_spec.subspecs.containers.slot import Slot +from lean_spec.subspecs.containers.validator import ValidatorIndex +from lean_spec.types import ZERO_HASH, Bytes32 + + +def _root(seed: int) -> Bytes32: + """Create a deterministic 32-byte root from a seed.""" + return Bytes32(bytes([seed % 256]) * 32) + + +def _block(parent_root: Bytes32, slot: int = 1) -> Block: + """Create a minimal block with the given parent root.""" + return Block( + slot=Slot(slot), + proposer_index=ValidatorIndex(0), + parent_root=parent_root, + state_root=ZERO_HASH, + body=BlockBody(attestations=AggregatedAttestations(data=[])), + ) + + +def _chain(length: int) -> tuple[BlockLookup, list[Bytes32]]: + """Build a linear chain of blocks and return the lookup plus ordered roots. + + The first block's parent is ZERO_HASH (genesis). + Returns roots ordered from genesis-child to tip. + """ + roots: list[Bytes32] = [] + lookup = BlockLookup() + parent = ZERO_HASH + for i in range(1, length + 1): + root = _root(i) + lookup[root] = _block(parent, slot=i) + roots.append(root) + parent = root + return lookup, roots + + +class TestAncestors: + """Tests for BlockLookup.ancestors.""" + + def test_linear_chain_walks_back_to_genesis(self) -> None: + """Ancestors of the tip of a 3-block chain returns all roots tip-first.""" + lookup, roots = _chain(3) + + assert list(lookup.ancestors(roots[-1])) == list(reversed(roots)) + + def test_single_block_pointing_to_genesis(self) -> None: + """A single block whose parent is ZERO_HASH yields just that block's root.""" + root = _root(1) + lookup = BlockLookup({root: _block(ZERO_HASH)}) + + assert list(lookup.ancestors(root)) == [root] + + def test_root_not_in_lookup_returns_empty(self) -> None: + """An unknown root produces no ancestors.""" + lookup = BlockLookup() + + assert list(lookup.ancestors(_root(99))) == [] + + def test_genesis_block_yields_only_itself(self) -> None: + """A block whose parent_root is ZERO_HASH stops immediately after yielding.""" + root = _root(10) + lookup = BlockLookup({root: _block(ZERO_HASH, slot=0)}) + + assert list(lookup.ancestors(root)) == [root] + + def test_two_block_chain(self) -> None: + """A two-block chain returns both roots in reverse order.""" + lookup, roots = _chain(2) + + assert list(lookup.ancestors(roots[-1])) == [roots[1], roots[0]] + + def test_ancestors_from_middle_of_chain(self) -> None: + """Starting from a mid-chain root only walks back from that point.""" + lookup, roots = _chain(4) + + assert list(lookup.ancestors(roots[1])) == [roots[1], roots[0]] + + +class TestReorgDepth: + """Tests for BlockLookup.reorg_depth.""" + + def test_same_head_is_zero(self) -> None: + """No reorg when old and new head are the same block.""" + lookup, roots = _chain(3) + head = roots[-1] + + assert lookup.reorg_depth(head, head) == 0 + + def test_simple_fork(self) -> None: + """Two branches diverging from a common ancestor. + + Chain: genesis -> A -> B (old head) + \\-> C -> D (new head) + + Old chain has 1 block (B) past the fork point A. + """ + root_a = _root(1) + root_b = _root(2) + root_c = _root(3) + root_d = _root(4) + + lookup = BlockLookup( + { + root_a: _block(ZERO_HASH, slot=1), + root_b: _block(root_a, slot=2), + root_c: _block(root_a, slot=2), + root_d: _block(root_c, slot=3), + } + ) + + assert lookup.reorg_depth(old_head=root_b, new_head=root_d) == 1 + + def test_deeper_fork(self) -> None: + """Old chain has multiple blocks past the fork point. + + Chain: genesis -> A -> B -> C (old head) + \\-> D (new head) + + Old chain has 2 blocks (B, C) past the fork point A. + """ + root_a = _root(1) + root_b = _root(2) + root_c = _root(3) + root_d = _root(4) + + lookup = BlockLookup( + { + root_a: _block(ZERO_HASH, slot=1), + root_b: _block(root_a, slot=2), + root_c: _block(root_b, slot=3), + root_d: _block(root_a, slot=2), + } + ) + + assert lookup.reorg_depth(old_head=root_c, new_head=root_d) == 2 + + def test_old_head_not_in_lookup(self) -> None: + """Old head absent from lookup produces depth 0.""" + lookup, roots = _chain(2) + + assert lookup.reorg_depth(old_head=_root(99), new_head=roots[-1]) == 0 + + def test_new_head_not_in_lookup(self) -> None: + """New head absent means no common ancestors, so all old ancestors are counted.""" + lookup, roots = _chain(3) + + assert lookup.reorg_depth(old_head=roots[-1], new_head=_root(99)) == 3 + + def test_both_share_genesis_as_common_ancestor(self) -> None: + """Two independent chains from genesis diverge at the very first block. + + Chain: genesis -> A -> B (old head) + genesis -> C -> D (new head) + + Old chain has 2 blocks (A, B) with no overlap with new chain. + """ + root_a = _root(1) + root_b = _root(2) + root_c = _root(3) + root_d = _root(4) + + lookup = BlockLookup( + { + root_a: _block(ZERO_HASH, slot=1), + root_b: _block(root_a, slot=2), + root_c: _block(ZERO_HASH, slot=1), + root_d: _block(root_c, slot=2), + } + ) + + assert lookup.reorg_depth(old_head=root_b, new_head=root_d) == 2 + + +class TestValidation: + """Tests for BlockLookup Pydantic validation.""" + + def test_blocklookup_instance_passes_through(self) -> None: + """An existing BlockLookup is returned as-is.""" + original = BlockLookup() + result = BlockLookup._validate(original) + + assert result is original + + def test_plain_dict_coerced_to_blocklookup(self) -> None: + """A plain dict is wrapped into a BlockLookup.""" + root = _root(1) + block = _block(ZERO_HASH) + result = BlockLookup._validate({root: block}) + + assert isinstance(result, BlockLookup) + assert result == BlockLookup({root: block}) + + def test_invalid_type_raises_valueerror(self) -> None: + """Non-dict, non-BlockLookup input raises ValueError.""" + with pytest.raises(ValueError, match=r"expected dict or BlockLookup"): + BlockLookup._validate("not a dict") + + def test_invalid_type_list_raises_valueerror(self) -> None: + """A list also raises ValueError.""" + with pytest.raises(ValueError, match=r"expected dict or BlockLookup"): + BlockLookup._validate([1, 2, 3]) + + +class TestDictBehavior: + """Tests for standard dict operations on BlockLookup.""" + + def test_len_and_contains(self) -> None: + """BlockLookup supports len() and 'in' checks.""" + lookup, roots = _chain(3) + + assert len(lookup) == 3 + assert roots[0] in lookup + assert _root(99) not in lookup + + def test_iteration_yields_keys(self) -> None: + """Iterating over a BlockLookup yields its keys.""" + lookup, roots = _chain(2) + + assert set(lookup) == set(roots) + + def test_getitem_returns_block(self) -> None: + """Subscript access returns the stored Block.""" + root = _root(1) + block = _block(ZERO_HASH) + lookup = BlockLookup({root: block}) + + assert lookup[root] == block + + def test_empty_lookup(self) -> None: + """An empty BlockLookup has length 0 and no keys.""" + lookup = BlockLookup() + + assert len(lookup) == 0 + assert list(lookup) == [] diff --git a/tests/lean_spec/subspecs/forkchoice/test_record_metrics.py b/tests/lean_spec/subspecs/forkchoice/test_record_metrics.py new file mode 100644 index 00000000..8ac2e03a --- /dev/null +++ b/tests/lean_spec/subspecs/forkchoice/test_record_metrics.py @@ -0,0 +1,213 @@ +"""Tests for Store._record_metrics.""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import Any + +import pytest +from prometheus_client import CollectorRegistry, Counter, Gauge + +from lean_spec.subspecs.metrics import registry as metrics +from lean_spec.types import Bytes32 +from tests.lean_spec.helpers import make_checkpoint, make_store + + +@pytest.fixture +def fresh_registry() -> CollectorRegistry: + """Create an isolated Prometheus registry for each test.""" + return CollectorRegistry() + + +@pytest.fixture(autouse=True) +def _reset_metrics() -> Iterator[None]: + """Ensure metrics are uninitialized before and after each test.""" + metrics.reset() + yield + metrics.reset() + + +def _init_metrics(registry: CollectorRegistry) -> None: + """Initialize metrics with the given isolated registry.""" + metrics.init(registry=registry) + + +def _get_gauge_value(gauge: Any) -> float: + """Read the current value of a Prometheus Gauge.""" + assert isinstance(gauge, Gauge) + return gauge._value.get() + + +def _get_counter_value(counter: Any) -> float: + """Read the current value of a Prometheus Counter.""" + assert isinstance(counter, Counter) + return counter._value.get() + + +class TestRecordMetricsUninitialized: + """Tests for _record_metrics when metrics are not initialized.""" + + def test_noop_when_metrics_not_initialized(self) -> None: + """No errors are raised when metrics are not initialized.""" + store = make_store(num_validators=3) + # Should not raise even though metric objects are no-op stubs + store._record_metrics(store.head) + + +class TestRecordMetricsNoReorg: + """Tests for _record_metrics when head has not changed.""" + + def test_sets_head_slot_gauge(self, fresh_registry: CollectorRegistry) -> None: + """Head slot gauge is set to the slot of the current head block.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + expected_slot = int(store.blocks[store.head].slot) + assert _get_gauge_value(metrics.lean_head_slot) == expected_slot + + def test_sets_safe_target_slot_gauge(self, fresh_registry: CollectorRegistry) -> None: + """Safe target slot gauge is set to the slot of the safe target block.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + expected_slot = int(store.blocks[store.safe_target].slot) + assert _get_gauge_value(metrics.lean_safe_target_slot) == expected_slot + + def test_sets_latest_justified_slot_gauge(self, fresh_registry: CollectorRegistry) -> None: + """Latest justified slot gauge is set from the store's latest_justified checkpoint.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + expected_slot = int(store.latest_justified.slot) + assert _get_gauge_value(metrics.lean_latest_justified_slot) == expected_slot + + def test_sets_latest_finalized_slot_gauge(self, fresh_registry: CollectorRegistry) -> None: + """Latest finalized slot gauge is set from the store's latest_finalized checkpoint.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + expected_slot = int(store.latest_finalized.slot) + assert _get_gauge_value(metrics.lean_latest_finalized_slot) == expected_slot + + def test_does_not_increment_reorg_counter_when_head_unchanged( + self, fresh_registry: CollectorRegistry + ) -> None: + """Reorg counter stays at zero when old_head equals current head.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + assert _get_counter_value(metrics.lean_fork_choice_reorgs_total) == 0 + + def test_all_gauges_set_correctly_for_genesis_store( + self, fresh_registry: CollectorRegistry + ) -> None: + """All four gauges reflect genesis values (slot 0) on a fresh store.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(store.head) + + assert _get_gauge_value(metrics.lean_head_slot) == 0 + assert _get_gauge_value(metrics.lean_safe_target_slot) == 0 + assert _get_gauge_value(metrics.lean_latest_justified_slot) == 0 + assert _get_gauge_value(metrics.lean_latest_finalized_slot) == 0 + + +class TestRecordMetricsWithReorg: + """Tests for _record_metrics when head has changed (reorg detected).""" + + def test_increments_reorg_counter_on_head_change( + self, fresh_registry: CollectorRegistry + ) -> None: + """Reorg counter increments by one when head differs from old_head.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + fake_old_head = Bytes32(b"\xaa" * 32) + + # Add a fake block so reorg_depth can walk the chain + # The old_head won't be in blocks, so reorg_depth returns 0 ancestors + # But the counter should still increment + store._record_metrics(fake_old_head) + + assert _get_counter_value(metrics.lean_fork_choice_reorgs_total) == 1 + + def test_observes_reorg_depth(self, fresh_registry: CollectorRegistry) -> None: + """Reorg depth histogram receives the depth from blocks.reorg_depth.""" + _init_metrics(fresh_registry) + + # Build a store with two branches to produce a real reorg depth + store = make_store(num_validators=3) + + # Passing a different old_head triggers reorg path + fake_old_head = Bytes32(b"\xbb" * 32) + store._record_metrics(fake_old_head) + + # fake_old_head is not in blocks, so reorg_depth walks 0 ancestors -> depth 0 + # The histogram should have recorded exactly one observation + histogram = metrics.lean_fork_choice_reorg_depth + assert not isinstance(histogram, type(metrics).__mro__[0]) + assert histogram._sum.get() == 0 # type: ignore[union-attr] + + def test_reorg_counter_increments_multiple_times( + self, fresh_registry: CollectorRegistry + ) -> None: + """Reorg counter accumulates across multiple calls.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(Bytes32(b"\x01" * 32)) + store._record_metrics(Bytes32(b"\x02" * 32)) + store._record_metrics(Bytes32(b"\x03" * 32)) + + assert _get_counter_value(metrics.lean_fork_choice_reorgs_total) == 3 + + def test_gauges_still_set_during_reorg(self, fresh_registry: CollectorRegistry) -> None: + """Gauge values are updated even when a reorg is detected.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store._record_metrics(Bytes32(b"\xff" * 32)) + + assert _get_gauge_value(metrics.lean_head_slot) == int(store.blocks[store.head].slot) + assert _get_gauge_value(metrics.lean_safe_target_slot) == int( + store.blocks[store.safe_target].slot + ) + assert _get_gauge_value(metrics.lean_latest_justified_slot) == int( + store.latest_justified.slot + ) + assert _get_gauge_value(metrics.lean_latest_finalized_slot) == int( + store.latest_finalized.slot + ) + + +class TestRecordMetricsWithNonGenesisSlots: + """Tests for _record_metrics with non-zero checkpoint slots.""" + + def test_gauges_reflect_updated_checkpoint_slots( + self, fresh_registry: CollectorRegistry + ) -> None: + """Gauge values match non-zero justified and finalized slots.""" + _init_metrics(fresh_registry) + store = make_store(num_validators=3) + + store = store.model_copy( + update={ + "latest_justified": make_checkpoint(root_seed=1, slot=10), + "latest_finalized": make_checkpoint(root_seed=2, slot=5), + } + ) + + store._record_metrics(store.head) + + assert _get_gauge_value(metrics.lean_latest_justified_slot) == 10 + assert _get_gauge_value(metrics.lean_latest_finalized_slot) == 5 diff --git a/tests/lean_spec/subspecs/forkchoice/test_validator.py b/tests/lean_spec/subspecs/forkchoice/test_validator.py index bda94cd4..85fd708b 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_validator.py +++ b/tests/lean_spec/subspecs/forkchoice/test_validator.py @@ -14,6 +14,7 @@ SignedAttestation, ValidatorIndex, ) +from lean_spec.subspecs.containers.block import BlockLookup from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.forkchoice import GossipSignatureEntry, Store from lean_spec.subspecs.ssz.hash import hash_tree_root @@ -389,7 +390,7 @@ def test_produce_block_missing_parent_state(self) -> None: safe_target=Bytes32(b"nonexistent" + b"\x00" * 21), latest_justified=checkpoint, latest_finalized=checkpoint, - blocks={}, # No blocks + blocks=BlockLookup(), # No blocks states={}, # No states validator_id=TEST_VALIDATOR_ID, ) diff --git a/uv.lock b/uv.lock index 84fc8429..30b571c5 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.12" [manifest] @@ -880,6 +880,7 @@ dependencies = [ { name = "httpx" }, { name = "lean-multisig-py" }, { name = "numpy" }, + { name = "prometheus-client" }, { name = "pydantic" }, { name = "pyyaml" }, { name = "typing-extensions" }, @@ -940,6 +941,7 @@ requires-dist = [ { name = "httpx", specifier = ">=0.28.0,<1" }, { name = "lean-multisig-py", git = "https://github.com/anshalshukla/leanMultisig-py?branch=devnet2" }, { name = "numpy", specifier = ">=2.0.0,<3" }, + { name = "prometheus-client", specifier = ">=0.21.0,<1" }, { name = "pydantic", specifier = ">=2.12.0,<3" }, { name = "pyyaml", specifier = ">=6.0.0,<7" }, { name = "typing-extensions", specifier = ">=4.4" }, @@ -1514,6 +1516,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] +[[package]] +name = "prometheus-client" +version = "0.24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/58/a794d23feb6b00fc0c72787d7e87d872a6730dd9ed7c7b3e954637d8f280/prometheus_client-0.24.1.tar.gz", hash = "sha256:7e0ced7fbbd40f7b84962d5d2ab6f17ef88a72504dcf7c0b40737b43b2a461f9", size = 85616, upload-time = "2026-01-14T15:26:26.965Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/c3/24a2f845e3917201628ecaba4f18bab4d18a337834c1df2a159ee9d22a42/prometheus_client-0.24.1-py3-none-any.whl", hash = "sha256:150db128af71a5c2482b36e588fc8a6b95e498750da4b17065947c16070f4055", size = 64057, upload-time = "2026-01-14T15:26:24.42Z" }, +] + [[package]] name = "prompt-toolkit" version = "3.0.52"