feat(memory): add semantic memory implementation (Issue #91)

Implements semantic memory with fact storage, retrieval, and verification:

Core functionality:
- SemanticMemory class for fact storage/retrieval
- Fact storage as subject-predicate-object triples
- Duplicate detection with reinforcement
- Semantic search with text-based fallback
- Entity-based retrieval
- Confidence scoring and decay
- Conflict resolution

Supporting modules:
- FactExtractor: Pattern-based fact extraction from episodes
- FactVerifier: Contradiction detection and reliability scoring

Test coverage:
- 47 unit tests covering all modules
- extraction.py: 99% coverage
- verification.py: 95% coverage
- memory.py: 78% coverage

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-05 02:23:06 +01:00
parent 3554efe66a
commit e946787a61
8 changed files with 2447 additions and 1 deletions

View File

@@ -0,0 +1,298 @@
# tests/unit/services/memory/semantic/test_verification.py
"""Unit tests for fact verification."""
from datetime import UTC, datetime
from unittest.mock import AsyncMock, MagicMock
from uuid import uuid4
import pytest
from app.services.memory.semantic.verification import (
FactConflict,
FactVerifier,
VerificationResult,
)
def create_mock_fact_model(
subject="FastAPI",
predicate="uses",
obj="Starlette",
confidence=0.8,
project_id=None,
):
"""Create a mock fact model for testing."""
mock = MagicMock()
mock.id = uuid4()
mock.project_id = project_id
mock.subject = subject
mock.predicate = predicate
mock.object = obj
mock.confidence = confidence
mock.source_episode_ids = []
mock.first_learned = datetime.now(UTC)
mock.last_reinforced = datetime.now(UTC)
mock.reinforcement_count = 1
mock.embedding = None
mock.created_at = datetime.now(UTC)
mock.updated_at = datetime.now(UTC)
return mock
class TestFactConflict:
"""Tests for FactConflict dataclass."""
def test_to_dict(self) -> None:
"""Test converting conflict to dictionary."""
conflict = FactConflict(
fact_a_id=uuid4(),
fact_b_id=uuid4(),
conflict_type="contradiction",
description="Test conflict",
suggested_resolution="Keep higher confidence",
)
result = conflict.to_dict()
assert "fact_a_id" in result
assert "fact_b_id" in result
assert result["conflict_type"] == "contradiction"
assert result["description"] == "Test conflict"
class TestVerificationResult:
"""Tests for VerificationResult dataclass."""
def test_default_values(self) -> None:
"""Test default values."""
result = VerificationResult(is_valid=True)
assert result.is_valid is True
assert result.confidence_adjustment == 0.0
assert result.conflicts == []
assert result.supporting_facts == []
assert result.messages == []
class TestFactVerifier:
"""Tests for FactVerifier class."""
@pytest.fixture
def mock_session(self) -> AsyncMock:
"""Create a mock database session."""
session = AsyncMock()
mock_result = MagicMock()
mock_result.scalars.return_value.all.return_value = []
session.execute.return_value = mock_result
return session
@pytest.fixture
def verifier(self, mock_session: AsyncMock) -> FactVerifier:
"""Create a fact verifier."""
return FactVerifier(session=mock_session)
@pytest.mark.asyncio
async def test_verify_fact_valid(
self,
verifier: FactVerifier,
) -> None:
"""Test verifying a valid fact with no conflicts."""
result = await verifier.verify_fact(
subject="Python",
predicate="is_a",
obj="programming language",
)
assert result.is_valid is True
assert len(result.conflicts) == 0
@pytest.mark.asyncio
async def test_verify_fact_with_support(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test verifying a fact with supporting facts."""
# Mock finding supporting facts
supporting = [create_mock_fact_model()]
# First query: contradictions (empty)
contradiction_result = MagicMock()
contradiction_result.scalars.return_value.all.return_value = []
# Second query: supporting facts
support_result = MagicMock()
support_result.scalars.return_value.all.return_value = supporting
mock_session.execute.side_effect = [contradiction_result, support_result]
result = await verifier.verify_fact(
subject="Python",
predicate="uses",
obj="dynamic typing",
)
assert result.is_valid is True
assert len(result.supporting_facts) >= 1
assert result.confidence_adjustment > 0
@pytest.mark.asyncio
async def test_verify_fact_with_contradiction(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test verifying a fact with contradictions."""
# Mock finding contradicting fact
contradicting = create_mock_fact_model(
subject="Python",
predicate="does_not_use",
obj="static typing",
)
contradiction_result = MagicMock()
contradiction_result.scalars.return_value.all.return_value = [contradicting]
support_result = MagicMock()
support_result.scalars.return_value.all.return_value = []
mock_session.execute.side_effect = [contradiction_result, support_result]
result = await verifier.verify_fact(
subject="Python",
predicate="uses",
obj="static typing",
)
assert result.is_valid is False
assert len(result.conflicts) >= 1
assert result.confidence_adjustment < 0
def test_get_opposite_predicates(
self,
verifier: FactVerifier,
) -> None:
"""Test getting opposite predicates."""
opposites = verifier._get_opposite_predicates("uses")
assert "does_not_use" in opposites
def test_get_opposite_predicates_unknown(
self,
verifier: FactVerifier,
) -> None:
"""Test getting opposites for unknown predicate."""
opposites = verifier._get_opposite_predicates("unknown_predicate")
assert opposites == []
@pytest.mark.asyncio
async def test_find_all_conflicts_empty(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test finding all conflicts in empty fact base."""
mock_result = MagicMock()
mock_result.scalars.return_value.all.return_value = []
mock_session.execute.return_value = mock_result
conflicts = await verifier.find_all_conflicts()
assert conflicts == []
@pytest.mark.asyncio
async def test_find_all_conflicts_no_conflicts(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test finding conflicts when there are none."""
# Two facts with different subjects
fact1 = create_mock_fact_model(subject="Python", predicate="uses")
fact2 = create_mock_fact_model(subject="JavaScript", predicate="uses")
mock_result = MagicMock()
mock_result.scalars.return_value.all.return_value = [fact1, fact2]
mock_session.execute.return_value = mock_result
conflicts = await verifier.find_all_conflicts()
assert conflicts == []
@pytest.mark.asyncio
async def test_find_all_conflicts_with_contradiction(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test finding contradicting facts."""
# Two contradicting facts
fact1 = create_mock_fact_model(
subject="Python",
predicate="best_practice",
obj="Use type hints",
)
fact2 = create_mock_fact_model(
subject="Python",
predicate="anti_pattern",
obj="Use type hints",
)
mock_result = MagicMock()
mock_result.scalars.return_value.all.return_value = [fact1, fact2]
mock_session.execute.return_value = mock_result
conflicts = await verifier.find_all_conflicts()
assert len(conflicts) == 1
assert conflicts[0].conflict_type == "contradiction"
@pytest.mark.asyncio
async def test_get_fact_reliability_score_not_found(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test reliability score for non-existent fact."""
mock_result = MagicMock()
mock_result.scalar_one_or_none.return_value = None
mock_session.execute.return_value = mock_result
score = await verifier.get_fact_reliability_score(uuid4())
assert score == 0.0
@pytest.mark.asyncio
async def test_get_fact_reliability_score(
self,
verifier: FactVerifier,
mock_session: AsyncMock,
) -> None:
"""Test calculating reliability score."""
fact = create_mock_fact_model(confidence=0.8)
fact.reinforcement_count = 5
# Query 1: Get fact
fact_result = MagicMock()
fact_result.scalar_one_or_none.return_value = fact
# Query 2: Supporting facts
support_result = MagicMock()
support_result.scalars.return_value.all.return_value = []
# Query 3: Contradictions
conflict_result = MagicMock()
conflict_result.scalars.return_value.all.return_value = []
mock_session.execute.side_effect = [
fact_result,
support_result,
conflict_result,
]
score = await verifier.get_fact_reliability_score(fact.id)
# Score should be >= confidence (0.8) due to reinforcement bonus
assert score >= 0.8
assert score <= 1.0