feat(memory): implement metrics and observability (#100)

Add comprehensive metrics collector for memory system with:
- Counter metrics: operations, retrievals, cache hits/misses, consolidations,
  episodes recorded, patterns/anomalies/insights detected
- Gauge metrics: item counts, memory size, cache size, procedure success rates,
  active sessions, pending consolidations
- Histogram metrics: working memory latency, retrieval latency, consolidation
  duration, embedding latency
- Prometheus format export
- Summary and cache stats helpers

31 tests covering all metric types, singleton pattern, and edge cases.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-05 11:00:53 +01:00
parent 997cfaa03a
commit 57680c3772
4 changed files with 1029 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
# tests/unit/services/memory/metrics/__init__.py
"""Tests for Memory Metrics."""

View File

@@ -0,0 +1,470 @@
# tests/unit/services/memory/metrics/test_collector.py
"""Tests for Memory Metrics Collector."""
import pytest
from app.services.memory.metrics.collector import (
MemoryMetrics,
MetricType,
MetricValue,
get_memory_metrics,
record_memory_operation,
record_retrieval,
reset_memory_metrics,
)
@pytest.fixture
def metrics() -> MemoryMetrics:
"""Create a fresh metrics instance for each test."""
return MemoryMetrics()
@pytest.fixture(autouse=True)
def reset_singleton() -> None:
"""Reset singleton before each test."""
reset_memory_metrics()
class TestMemoryMetrics:
"""Tests for MemoryMetrics class."""
@pytest.mark.asyncio
async def test_inc_operations(self, metrics: MemoryMetrics) -> None:
"""Should increment operation counters."""
await metrics.inc_operations("get", "working", "session", True)
await metrics.inc_operations("get", "working", "session", True)
await metrics.inc_operations("set", "working", "session", True)
summary = await metrics.get_summary()
assert summary["total_operations"] == 3
assert summary["successful_operations"] == 3
@pytest.mark.asyncio
async def test_inc_operations_failure(self, metrics: MemoryMetrics) -> None:
"""Should track failed operations."""
await metrics.inc_operations("get", "working", None, True)
await metrics.inc_operations("get", "working", None, False)
summary = await metrics.get_summary()
assert summary["total_operations"] == 2
assert summary["successful_operations"] == 1
assert summary["operation_success_rate"] == 0.5
@pytest.mark.asyncio
async def test_inc_retrieval(self, metrics: MemoryMetrics) -> None:
"""Should increment retrieval counters."""
await metrics.inc_retrieval("episodic", "similarity", 5)
await metrics.inc_retrieval("episodic", "temporal", 3)
await metrics.inc_retrieval("semantic", "similarity", 10)
summary = await metrics.get_summary()
assert summary["total_retrievals"] == 3
@pytest.mark.asyncio
async def test_cache_hit_miss(self, metrics: MemoryMetrics) -> None:
"""Should track cache hits and misses."""
await metrics.inc_cache_hit("hot")
await metrics.inc_cache_hit("hot")
await metrics.inc_cache_hit("hot")
await metrics.inc_cache_miss("hot")
summary = await metrics.get_summary()
assert summary["cache_hit_rate"] == 0.75
@pytest.mark.asyncio
async def test_cache_stats(self, metrics: MemoryMetrics) -> None:
"""Should provide detailed cache stats."""
await metrics.inc_cache_hit("hot")
await metrics.inc_cache_hit("hot")
await metrics.inc_cache_miss("hot")
await metrics.inc_cache_hit("embedding")
await metrics.inc_cache_miss("embedding")
await metrics.inc_cache_miss("embedding")
stats = await metrics.get_cache_stats()
assert stats["hot"]["hits"] == 2
assert stats["hot"]["misses"] == 1
assert stats["hot"]["hit_rate"] == pytest.approx(0.6667, rel=0.01)
assert stats["embedding"]["hits"] == 1
assert stats["embedding"]["misses"] == 2
assert stats["embedding"]["hit_rate"] == pytest.approx(0.3333, rel=0.01)
@pytest.mark.asyncio
async def test_inc_consolidation(self, metrics: MemoryMetrics) -> None:
"""Should increment consolidation counter."""
await metrics.inc_consolidation("working_to_episodic", True)
await metrics.inc_consolidation("episodic_to_semantic", True)
await metrics.inc_consolidation("prune", False)
summary = await metrics.get_summary()
assert summary["total_consolidations"] == 3
@pytest.mark.asyncio
async def test_inc_episodes_recorded(self, metrics: MemoryMetrics) -> None:
"""Should track episodes by outcome."""
await metrics.inc_episodes_recorded("success")
await metrics.inc_episodes_recorded("success")
await metrics.inc_episodes_recorded("failure")
summary = await metrics.get_summary()
assert summary["total_episodes_recorded"] == 3
@pytest.mark.asyncio
async def test_inc_patterns_insights_anomalies(
self, metrics: MemoryMetrics
) -> None:
"""Should track reflection metrics."""
await metrics.inc_patterns_detected("recurring_success")
await metrics.inc_patterns_detected("action_sequence")
await metrics.inc_insights_generated("optimization")
await metrics.inc_anomalies_detected("unusual_duration")
summary = await metrics.get_summary()
assert summary["patterns_detected"] == 2
assert summary["insights_generated"] == 1
assert summary["anomalies_detected"] == 1
@pytest.mark.asyncio
async def test_set_memory_items_count(self, metrics: MemoryMetrics) -> None:
"""Should set memory item count gauge."""
await metrics.set_memory_items_count("episodic", "project", 100)
await metrics.set_memory_items_count("semantic", "project", 50)
all_metrics = await metrics.get_all_metrics()
gauge_metrics = [
m for m in all_metrics if m.name == "memory_items_count"
]
assert len(gauge_metrics) == 2
@pytest.mark.asyncio
async def test_set_memory_size_bytes(self, metrics: MemoryMetrics) -> None:
"""Should set memory size gauge."""
await metrics.set_memory_size_bytes("working", "session", 1024)
all_metrics = await metrics.get_all_metrics()
size_metrics = [m for m in all_metrics if m.name == "memory_size_bytes"]
assert len(size_metrics) == 1
assert size_metrics[0].value == 1024.0
@pytest.mark.asyncio
async def test_set_procedure_success_rate(self, metrics: MemoryMetrics) -> None:
"""Should set procedure success rate gauge."""
await metrics.set_procedure_success_rate("code_review", 0.85)
all_metrics = await metrics.get_all_metrics()
rate_metrics = [
m for m in all_metrics if m.name == "memory_procedure_success_rate"
]
assert len(rate_metrics) == 1
assert rate_metrics[0].value == 0.85
@pytest.mark.asyncio
async def test_set_active_sessions(self, metrics: MemoryMetrics) -> None:
"""Should set active sessions gauge."""
await metrics.set_active_sessions(5)
summary = await metrics.get_summary()
assert summary["active_sessions"] == 5
@pytest.mark.asyncio
async def test_observe_working_latency(self, metrics: MemoryMetrics) -> None:
"""Should record working memory latency histogram."""
await metrics.observe_working_latency(0.005) # 5ms
await metrics.observe_working_latency(0.003) # 3ms
await metrics.observe_working_latency(0.010) # 10ms
all_metrics = await metrics.get_all_metrics()
count_metric = next(
(m for m in all_metrics if m.name == "memory_working_latency_seconds_count"),
None,
)
sum_metric = next(
(m for m in all_metrics if m.name == "memory_working_latency_seconds_sum"),
None,
)
assert count_metric is not None
assert count_metric.value == 3
assert sum_metric is not None
assert sum_metric.value == pytest.approx(0.018, rel=0.01)
@pytest.mark.asyncio
async def test_observe_retrieval_latency(self, metrics: MemoryMetrics) -> None:
"""Should record retrieval latency histogram."""
await metrics.observe_retrieval_latency(0.050) # 50ms
await metrics.observe_retrieval_latency(0.075) # 75ms
summary = await metrics.get_summary()
assert summary["avg_retrieval_latency_ms"] == pytest.approx(62.5, rel=0.01)
@pytest.mark.asyncio
async def test_observe_consolidation_duration(
self, metrics: MemoryMetrics
) -> None:
"""Should record consolidation duration histogram."""
await metrics.observe_consolidation_duration(5.0)
await metrics.observe_consolidation_duration(10.0)
all_metrics = await metrics.get_all_metrics()
count_metric = next(
(
m
for m in all_metrics
if m.name == "memory_consolidation_duration_seconds_count"
),
None,
)
assert count_metric is not None
assert count_metric.value == 2
@pytest.mark.asyncio
async def test_get_all_metrics(self, metrics: MemoryMetrics) -> None:
"""Should return all metrics as MetricValue objects."""
await metrics.inc_operations("get", "working", None, True)
await metrics.set_active_sessions(3)
await metrics.observe_retrieval_latency(0.05)
all_metrics = await metrics.get_all_metrics()
assert len(all_metrics) >= 3
# Check we have different metric types
counter_metrics = [m for m in all_metrics if m.metric_type == MetricType.COUNTER]
gauge_metrics = [m for m in all_metrics if m.metric_type == MetricType.GAUGE]
assert len(counter_metrics) >= 1
assert len(gauge_metrics) >= 1
@pytest.mark.asyncio
async def test_get_prometheus_format(self, metrics: MemoryMetrics) -> None:
"""Should export metrics in Prometheus format."""
await metrics.inc_operations("get", "working", "session", True)
await metrics.set_active_sessions(5)
prometheus_output = await metrics.get_prometheus_format()
assert "# TYPE memory_operations_total counter" in prometheus_output
assert "memory_operations_total{" in prometheus_output
assert "# TYPE memory_active_sessions gauge" in prometheus_output
assert "memory_active_sessions 5" in prometheus_output
@pytest.mark.asyncio
async def test_get_summary(self, metrics: MemoryMetrics) -> None:
"""Should return summary dictionary."""
await metrics.inc_operations("get", "working", None, True)
await metrics.inc_retrieval("episodic", "similarity", 5)
await metrics.inc_cache_hit("hot")
await metrics.inc_consolidation("prune", True)
summary = await metrics.get_summary()
assert "total_operations" in summary
assert "total_retrievals" in summary
assert "cache_hit_rate" in summary
assert "total_consolidations" in summary
assert "operation_success_rate" in summary
@pytest.mark.asyncio
async def test_reset(self, metrics: MemoryMetrics) -> None:
"""Should reset all metrics."""
await metrics.inc_operations("get", "working", None, True)
await metrics.set_active_sessions(5)
await metrics.observe_retrieval_latency(0.05)
await metrics.reset()
summary = await metrics.get_summary()
assert summary["total_operations"] == 0
assert summary["active_sessions"] == 0
class TestMetricValue:
"""Tests for MetricValue dataclass."""
def test_creates_metric_value(self) -> None:
"""Should create metric value with defaults."""
metric = MetricValue(
name="test_metric",
metric_type=MetricType.COUNTER,
value=42.0,
)
assert metric.name == "test_metric"
assert metric.metric_type == MetricType.COUNTER
assert metric.value == 42.0
assert metric.labels == {}
assert metric.timestamp is not None
def test_creates_metric_value_with_labels(self) -> None:
"""Should create metric value with labels."""
metric = MetricValue(
name="test_metric",
metric_type=MetricType.GAUGE,
value=100.0,
labels={"scope": "project", "type": "episodic"},
)
assert metric.labels == {"scope": "project", "type": "episodic"}
class TestSingleton:
"""Tests for singleton pattern."""
@pytest.mark.asyncio
async def test_get_memory_metrics_singleton(self) -> None:
"""Should return same instance."""
metrics1 = await get_memory_metrics()
metrics2 = await get_memory_metrics()
assert metrics1 is metrics2
@pytest.mark.asyncio
async def test_reset_singleton(self) -> None:
"""Should reset singleton instance."""
metrics1 = await get_memory_metrics()
await metrics1.inc_operations("get", "working", None, True)
reset_memory_metrics()
metrics2 = await get_memory_metrics()
summary = await metrics2.get_summary()
assert metrics1 is not metrics2
assert summary["total_operations"] == 0
class TestConvenienceFunctions:
"""Tests for convenience functions."""
@pytest.mark.asyncio
async def test_record_memory_operation(self) -> None:
"""Should record memory operation."""
await record_memory_operation(
operation="get",
memory_type="working",
scope="session",
success=True,
latency_ms=5.0,
)
metrics = await get_memory_metrics()
summary = await metrics.get_summary()
assert summary["total_operations"] == 1
@pytest.mark.asyncio
async def test_record_retrieval(self) -> None:
"""Should record retrieval operation."""
await record_retrieval(
memory_type="episodic",
strategy="similarity",
results_count=10,
latency_ms=50.0,
)
metrics = await get_memory_metrics()
summary = await metrics.get_summary()
assert summary["total_retrievals"] == 1
assert summary["avg_retrieval_latency_ms"] == pytest.approx(50.0, rel=0.01)
class TestHistogramBuckets:
"""Tests for histogram bucket behavior."""
@pytest.mark.asyncio
async def test_histogram_buckets_populated(self, metrics: MemoryMetrics) -> None:
"""Should populate histogram buckets correctly."""
# Add values to different buckets
await metrics.observe_retrieval_latency(0.005) # <= 0.01
await metrics.observe_retrieval_latency(0.030) # <= 0.05
await metrics.observe_retrieval_latency(0.080) # <= 0.1
await metrics.observe_retrieval_latency(0.500) # <= 0.5
await metrics.observe_retrieval_latency(2.000) # <= 2.5
prometheus_output = await metrics.get_prometheus_format()
# Check that histogram buckets are in output
assert "memory_retrieval_latency_seconds_bucket" in prometheus_output
assert 'le="0.01"' in prometheus_output
assert 'le="+Inf"' in prometheus_output
@pytest.mark.asyncio
async def test_histogram_count_and_sum(self, metrics: MemoryMetrics) -> None:
"""Should track histogram count and sum."""
await metrics.observe_retrieval_latency(0.1)
await metrics.observe_retrieval_latency(0.2)
await metrics.observe_retrieval_latency(0.3)
prometheus_output = await metrics.get_prometheus_format()
assert "memory_retrieval_latency_seconds_count 3" in prometheus_output
assert "memory_retrieval_latency_seconds_sum 0.6" in prometheus_output
class TestLabelParsing:
"""Tests for label parsing."""
@pytest.mark.asyncio
async def test_parse_labels_in_output(self, metrics: MemoryMetrics) -> None:
"""Should correctly parse labels in output."""
await metrics.inc_operations("get", "episodic", "project", True)
all_metrics = await metrics.get_all_metrics()
op_metric = next(
(m for m in all_metrics if m.name == "memory_operations_total"), None
)
assert op_metric is not None
assert op_metric.labels["operation"] == "get"
assert op_metric.labels["memory_type"] == "episodic"
assert op_metric.labels["scope"] == "project"
assert op_metric.labels["success"] == "true"
class TestEdgeCases:
"""Tests for edge cases."""
@pytest.mark.asyncio
async def test_empty_metrics(self, metrics: MemoryMetrics) -> None:
"""Should handle empty metrics gracefully."""
summary = await metrics.get_summary()
assert summary["total_operations"] == 0
assert summary["operation_success_rate"] == 1.0 # Default when no ops
assert summary["cache_hit_rate"] == 0.0
assert summary["avg_retrieval_latency_ms"] == 0.0
@pytest.mark.asyncio
async def test_concurrent_operations(self, metrics: MemoryMetrics) -> None:
"""Should handle concurrent operations safely."""
import asyncio
async def increment_ops() -> None:
for _ in range(100):
await metrics.inc_operations("get", "working", None, True)
# Run multiple concurrent tasks
await asyncio.gather(
increment_ops(),
increment_ops(),
increment_ops(),
)
summary = await metrics.get_summary()
assert summary["total_operations"] == 300
@pytest.mark.asyncio
async def test_prometheus_format_empty(self, metrics: MemoryMetrics) -> None:
"""Should return valid format with no metrics."""
prometheus_output = await metrics.get_prometheus_format()
# Should just have histogram bucket definitions
assert "# TYPE memory_retrieval_latency_seconds histogram" in prometheus_output