style(memory): apply ruff formatting and linting fixes
Auto-fixed linting errors and formatting issues: - Removed unused imports (F401): pytest, Any, AnalysisType, MemoryType, OutcomeType - Removed unused variable (F841): hooks variable in test - Applied consistent formatting across memory service and test files 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -133,9 +133,7 @@ class TestMemoryContextSource:
|
||||
)
|
||||
|
||||
assert result.by_type["working"] == 2
|
||||
assert all(
|
||||
c.memory_subtype == MemorySubtype.WORKING for c in result.contexts
|
||||
)
|
||||
assert all(c.memory_subtype == MemorySubtype.WORKING for c in result.contexts)
|
||||
|
||||
@patch("app.services.memory.integration.context_source.EpisodicMemory")
|
||||
async def test_fetch_episodic_memory(
|
||||
@@ -252,11 +250,10 @@ class TestMemoryContextSource:
|
||||
context_source: MemoryContextSource,
|
||||
) -> None:
|
||||
"""Results should be sorted by relevance score."""
|
||||
with patch.object(
|
||||
context_source, "_fetch_episodic"
|
||||
) as mock_ep, patch.object(
|
||||
context_source, "_fetch_semantic"
|
||||
) as mock_sem:
|
||||
with (
|
||||
patch.object(context_source, "_fetch_episodic") as mock_ep,
|
||||
patch.object(context_source, "_fetch_semantic") as mock_sem,
|
||||
):
|
||||
# Create contexts with different relevance scores
|
||||
from app.services.context.types.memory import MemoryContext
|
||||
|
||||
|
||||
@@ -105,6 +105,7 @@ class TestLifecycleHooks:
|
||||
|
||||
def test_register_spawn_hook(self, lifecycle_hooks: LifecycleHooks) -> None:
|
||||
"""Should register spawn hook."""
|
||||
|
||||
async def my_hook(event: LifecycleEvent) -> None:
|
||||
pass
|
||||
|
||||
@@ -115,7 +116,7 @@ class TestLifecycleHooks:
|
||||
|
||||
def test_register_all_hooks(self, lifecycle_hooks: LifecycleHooks) -> None:
|
||||
"""Should register hooks for all event types."""
|
||||
hooks = [
|
||||
[
|
||||
lifecycle_hooks.on_spawn(AsyncMock()),
|
||||
lifecycle_hooks.on_pause(AsyncMock()),
|
||||
lifecycle_hooks.on_resume(AsyncMock()),
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"""Tests for MemoryToolService."""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
@@ -14,11 +13,6 @@ from app.services.memory.mcp.service import (
|
||||
ToolResult,
|
||||
get_memory_tool_service,
|
||||
)
|
||||
from app.services.memory.mcp.tools import (
|
||||
AnalysisType,
|
||||
MemoryType,
|
||||
OutcomeType,
|
||||
)
|
||||
from app.services.memory.types import Outcome
|
||||
|
||||
pytestmark = pytest.mark.asyncio(loop_scope="function")
|
||||
@@ -192,7 +186,9 @@ class TestMemoryToolService:
|
||||
context: ToolContext,
|
||||
) -> None:
|
||||
"""Remember should store in episodic memory."""
|
||||
with patch("app.services.memory.mcp.service.EpisodicMemory") as mock_episodic_cls:
|
||||
with patch(
|
||||
"app.services.memory.mcp.service.EpisodicMemory"
|
||||
) as mock_episodic_cls:
|
||||
# Setup mock
|
||||
mock_episode = MagicMock()
|
||||
mock_episode.id = uuid4()
|
||||
@@ -260,7 +256,9 @@ class TestMemoryToolService:
|
||||
context: ToolContext,
|
||||
) -> None:
|
||||
"""Remember should store facts in semantic memory."""
|
||||
with patch("app.services.memory.mcp.service.SemanticMemory") as mock_semantic_cls:
|
||||
with patch(
|
||||
"app.services.memory.mcp.service.SemanticMemory"
|
||||
) as mock_semantic_cls:
|
||||
mock_fact = MagicMock()
|
||||
mock_fact.id = uuid4()
|
||||
|
||||
@@ -311,7 +309,9 @@ class TestMemoryToolService:
|
||||
context: ToolContext,
|
||||
) -> None:
|
||||
"""Remember should store procedures in procedural memory."""
|
||||
with patch("app.services.memory.mcp.service.ProceduralMemory") as mock_procedural_cls:
|
||||
with patch(
|
||||
"app.services.memory.mcp.service.ProceduralMemory"
|
||||
) as mock_procedural_cls:
|
||||
mock_procedure = MagicMock()
|
||||
mock_procedure.id = uuid4()
|
||||
|
||||
@@ -530,15 +530,21 @@ class TestMemoryToolService:
|
||||
mock_working_cls.for_session = AsyncMock(return_value=mock_working)
|
||||
|
||||
mock_episodic = AsyncMock()
|
||||
mock_episodic.get_recent = AsyncMock(return_value=[MagicMock() for _ in range(10)])
|
||||
mock_episodic.get_recent = AsyncMock(
|
||||
return_value=[MagicMock() for _ in range(10)]
|
||||
)
|
||||
mock_episodic_cls.create = AsyncMock(return_value=mock_episodic)
|
||||
|
||||
mock_semantic = AsyncMock()
|
||||
mock_semantic.search_facts = AsyncMock(return_value=[MagicMock() for _ in range(5)])
|
||||
mock_semantic.search_facts = AsyncMock(
|
||||
return_value=[MagicMock() for _ in range(5)]
|
||||
)
|
||||
mock_semantic_cls.create = AsyncMock(return_value=mock_semantic)
|
||||
|
||||
mock_procedural = AsyncMock()
|
||||
mock_procedural.find_matching = AsyncMock(return_value=[MagicMock() for _ in range(3)])
|
||||
mock_procedural.find_matching = AsyncMock(
|
||||
return_value=[MagicMock() for _ in range(3)]
|
||||
)
|
||||
mock_procedural_cls.create = AsyncMock(return_value=mock_procedural)
|
||||
|
||||
result = await service.execute_tool(
|
||||
@@ -603,8 +609,12 @@ class TestMemoryToolService:
|
||||
) -> None:
|
||||
"""Record outcome should store outcome and update procedure."""
|
||||
with (
|
||||
patch("app.services.memory.mcp.service.EpisodicMemory") as mock_episodic_cls,
|
||||
patch("app.services.memory.mcp.service.ProceduralMemory") as mock_procedural_cls,
|
||||
patch(
|
||||
"app.services.memory.mcp.service.EpisodicMemory"
|
||||
) as mock_episodic_cls,
|
||||
patch(
|
||||
"app.services.memory.mcp.service.ProceduralMemory"
|
||||
) as mock_procedural_cls,
|
||||
):
|
||||
mock_episode = MagicMock()
|
||||
mock_episode.id = uuid4()
|
||||
|
||||
@@ -358,10 +358,12 @@ class TestMemoryToolDefinition:
|
||||
)
|
||||
|
||||
# Valid args
|
||||
validated = tool.validate_args({
|
||||
"memory_type": "working",
|
||||
"content": "Test content",
|
||||
})
|
||||
validated = tool.validate_args(
|
||||
{
|
||||
"memory_type": "working",
|
||||
"content": "Test content",
|
||||
}
|
||||
)
|
||||
assert isinstance(validated, RememberArgs)
|
||||
|
||||
# Invalid args
|
||||
@@ -417,4 +419,6 @@ class TestToolDefinitions:
|
||||
"""All tool schemas should have properties defined."""
|
||||
for name, tool in MEMORY_TOOL_DEFINITIONS.items():
|
||||
schema = tool.to_mcp_format()
|
||||
assert "properties" in schema["inputSchema"], f"Tool {name} missing properties"
|
||||
assert "properties" in schema["inputSchema"], (
|
||||
f"Tool {name} missing properties"
|
||||
)
|
||||
|
||||
@@ -134,9 +134,7 @@ class TestMemoryMetrics:
|
||||
await metrics.set_memory_items_count("semantic", "project", 50)
|
||||
|
||||
all_metrics = await metrics.get_all_metrics()
|
||||
gauge_metrics = [
|
||||
m for m in all_metrics if m.name == "memory_items_count"
|
||||
]
|
||||
gauge_metrics = [m for m in all_metrics if m.name == "memory_items_count"]
|
||||
|
||||
assert len(gauge_metrics) == 2
|
||||
|
||||
@@ -181,7 +179,11 @@ class TestMemoryMetrics:
|
||||
|
||||
all_metrics = await metrics.get_all_metrics()
|
||||
count_metric = next(
|
||||
(m for m in all_metrics if m.name == "memory_working_latency_seconds_count"),
|
||||
(
|
||||
m
|
||||
for m in all_metrics
|
||||
if m.name == "memory_working_latency_seconds_count"
|
||||
),
|
||||
None,
|
||||
)
|
||||
sum_metric = next(
|
||||
@@ -204,9 +206,7 @@ class TestMemoryMetrics:
|
||||
assert summary["avg_retrieval_latency_ms"] == pytest.approx(62.5, rel=0.01)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_observe_consolidation_duration(
|
||||
self, metrics: MemoryMetrics
|
||||
) -> None:
|
||||
async def test_observe_consolidation_duration(self, metrics: MemoryMetrics) -> None:
|
||||
"""Should record consolidation duration histogram."""
|
||||
await metrics.observe_consolidation_duration(5.0)
|
||||
await metrics.observe_consolidation_duration(10.0)
|
||||
@@ -236,7 +236,9 @@ class TestMemoryMetrics:
|
||||
assert len(all_metrics) >= 3
|
||||
|
||||
# Check we have different metric types
|
||||
counter_metrics = [m for m in all_metrics if m.metric_type == MetricType.COUNTER]
|
||||
counter_metrics = [
|
||||
m for m in all_metrics if m.metric_type == MetricType.COUNTER
|
||||
]
|
||||
gauge_metrics = [m for m in all_metrics if m.metric_type == MetricType.GAUGE]
|
||||
|
||||
assert len(counter_metrics) >= 1
|
||||
|
||||
@@ -153,8 +153,7 @@ class TestPatternDetection:
|
||||
|
||||
# Should find recurring success pattern for 'build' task
|
||||
success_patterns = [
|
||||
p for p in patterns
|
||||
if p.pattern_type == PatternType.RECURRING_SUCCESS
|
||||
p for p in patterns if p.pattern_type == PatternType.RECURRING_SUCCESS
|
||||
]
|
||||
assert len(success_patterns) >= 1
|
||||
assert any(p.name.find("build") >= 0 for p in success_patterns)
|
||||
@@ -193,8 +192,7 @@ class TestPatternDetection:
|
||||
patterns = await reflection.analyze_patterns(project_id, time_range)
|
||||
|
||||
failure_patterns = [
|
||||
p for p in patterns
|
||||
if p.pattern_type == PatternType.RECURRING_FAILURE
|
||||
p for p in patterns if p.pattern_type == PatternType.RECURRING_FAILURE
|
||||
]
|
||||
assert len(failure_patterns) >= 1
|
||||
|
||||
@@ -229,8 +227,7 @@ class TestPatternDetection:
|
||||
patterns = await reflection.analyze_patterns(project_id, time_range)
|
||||
|
||||
action_patterns = [
|
||||
p for p in patterns
|
||||
if p.pattern_type == PatternType.ACTION_SEQUENCE
|
||||
p for p in patterns if p.pattern_type == PatternType.ACTION_SEQUENCE
|
||||
]
|
||||
assert len(action_patterns) >= 1
|
||||
|
||||
@@ -438,8 +435,7 @@ class TestAnomalyDetection:
|
||||
anomalies = await reflection.detect_anomalies(project_id, baseline_days=30)
|
||||
|
||||
duration_anomalies = [
|
||||
a for a in anomalies
|
||||
if a.anomaly_type == AnomalyType.UNUSUAL_DURATION
|
||||
a for a in anomalies if a.anomaly_type == AnomalyType.UNUSUAL_DURATION
|
||||
]
|
||||
assert len(duration_anomalies) >= 1
|
||||
|
||||
@@ -475,8 +471,7 @@ class TestAnomalyDetection:
|
||||
anomalies = await reflection.detect_anomalies(project_id, baseline_days=30)
|
||||
|
||||
outcome_anomalies = [
|
||||
a for a in anomalies
|
||||
if a.anomaly_type == AnomalyType.UNEXPECTED_OUTCOME
|
||||
a for a in anomalies if a.anomaly_type == AnomalyType.UNEXPECTED_OUTCOME
|
||||
]
|
||||
assert len(outcome_anomalies) >= 1
|
||||
|
||||
@@ -510,8 +505,7 @@ class TestAnomalyDetection:
|
||||
anomalies = await reflection.detect_anomalies(project_id, baseline_days=30)
|
||||
|
||||
token_anomalies = [
|
||||
a for a in anomalies
|
||||
if a.anomaly_type == AnomalyType.UNUSUAL_TOKEN_USAGE
|
||||
a for a in anomalies if a.anomaly_type == AnomalyType.UNUSUAL_TOKEN_USAGE
|
||||
]
|
||||
assert len(token_anomalies) >= 1
|
||||
|
||||
@@ -650,9 +644,7 @@ class TestInsightGeneration:
|
||||
|
||||
insights = await reflection.generate_insights(project_id)
|
||||
|
||||
trend_insights = [
|
||||
i for i in insights if i.insight_type == InsightType.TREND
|
||||
]
|
||||
trend_insights = [i for i in insights if i.insight_type == InsightType.TREND]
|
||||
assert len(trend_insights) >= 1
|
||||
|
||||
async def test_insights_sorted_by_priority(
|
||||
@@ -662,10 +654,7 @@ class TestInsightGeneration:
|
||||
"""Should sort insights by priority."""
|
||||
project_id = uuid4()
|
||||
|
||||
episodes = [
|
||||
create_mock_episode(outcome=Outcome.SUCCESS)
|
||||
for _ in range(10)
|
||||
]
|
||||
episodes = [create_mock_episode(outcome=Outcome.SUCCESS) for _ in range(10)]
|
||||
|
||||
mock_episodic = MagicMock()
|
||||
mock_episodic.get_recent = AsyncMock(return_value=episodes)
|
||||
|
||||
Reference in New Issue
Block a user