Files
syndarix/backend/tests/services/context/test_adapters.py
Felipe Cardoso 7828d35e06 feat(context): implement model adapters for Claude and OpenAI (#83)
Phase 5 of Context Management Engine - Model Adapters:

- Add ModelAdapter abstract base class with model matching
- Add DefaultAdapter for unknown models (plain text)
- Add ClaudeAdapter with XML-based formatting:
  - <system_instructions> for system context
  - <reference_documents>/<document> for knowledge
  - <conversation_history>/<message> for chat
  - <tool_results>/<tool_result> for tool outputs
  - XML escaping for special characters
- Add OpenAIAdapter with markdown formatting:
  - ## headers for sections
  - ### Source headers for documents
  - **ROLE** bold labels for conversation
  - Code blocks for tool outputs
- Add get_adapter() factory function for model selection

Tests: 33 new tests, 256 total context tests passing

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-04 02:36:32 +01:00

522 lines
18 KiB
Python

"""Tests for model adapters."""
import pytest
from app.services.context.adapters import (
ClaudeAdapter,
DefaultAdapter,
ModelAdapter,
OpenAIAdapter,
get_adapter,
)
from app.services.context.types import (
ContextType,
ConversationContext,
KnowledgeContext,
MessageRole,
SystemContext,
TaskContext,
ToolContext,
)
class TestGetAdapter:
"""Tests for get_adapter function."""
def test_claude_models(self) -> None:
"""Test that Claude models get ClaudeAdapter."""
assert isinstance(get_adapter("claude-3-sonnet"), ClaudeAdapter)
assert isinstance(get_adapter("claude-3-opus"), ClaudeAdapter)
assert isinstance(get_adapter("claude-3-haiku"), ClaudeAdapter)
assert isinstance(get_adapter("claude-2"), ClaudeAdapter)
assert isinstance(get_adapter("anthropic/claude-3-sonnet"), ClaudeAdapter)
def test_openai_models(self) -> None:
"""Test that OpenAI models get OpenAIAdapter."""
assert isinstance(get_adapter("gpt-4"), OpenAIAdapter)
assert isinstance(get_adapter("gpt-4-turbo"), OpenAIAdapter)
assert isinstance(get_adapter("gpt-3.5-turbo"), OpenAIAdapter)
assert isinstance(get_adapter("openai/gpt-4"), OpenAIAdapter)
assert isinstance(get_adapter("o1-mini"), OpenAIAdapter)
assert isinstance(get_adapter("o3-mini"), OpenAIAdapter)
def test_unknown_models(self) -> None:
"""Test that unknown models get DefaultAdapter."""
assert isinstance(get_adapter("llama-2"), DefaultAdapter)
assert isinstance(get_adapter("mistral-7b"), DefaultAdapter)
assert isinstance(get_adapter("custom-model"), DefaultAdapter)
class TestModelAdapterBase:
"""Tests for ModelAdapter base class."""
def test_get_type_order(self) -> None:
"""Test default type ordering."""
adapter = DefaultAdapter()
order = adapter.get_type_order()
assert order == [
ContextType.SYSTEM,
ContextType.TASK,
ContextType.KNOWLEDGE,
ContextType.CONVERSATION,
ContextType.TOOL,
]
def test_group_by_type(self) -> None:
"""Test grouping contexts by type."""
adapter = DefaultAdapter()
contexts = [
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System 2", source="system"),
]
grouped = adapter.group_by_type(contexts)
assert len(grouped[ContextType.SYSTEM]) == 2
assert len(grouped[ContextType.TASK]) == 1
assert len(grouped[ContextType.KNOWLEDGE]) == 1
assert ContextType.CONVERSATION not in grouped
def test_matches_model_default(self) -> None:
"""Test that DefaultAdapter matches all models."""
assert DefaultAdapter.matches_model("anything")
assert DefaultAdapter.matches_model("claude-3")
assert DefaultAdapter.matches_model("gpt-4")
class TestDefaultAdapter:
"""Tests for DefaultAdapter."""
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = DefaultAdapter()
result = adapter.format([])
assert result == ""
def test_format_system(self) -> None:
"""Test formatting system context."""
adapter = DefaultAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
assert "You are helpful." in result
def test_format_task(self) -> None:
"""Test formatting task context."""
adapter = DefaultAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "Task:" in result
assert "Write a function." in result
def test_format_knowledge(self) -> None:
"""Test formatting knowledge context."""
adapter = DefaultAdapter()
contexts = [
KnowledgeContext(content="Documentation here.", source="docs"),
]
result = adapter.format(contexts)
assert "Reference Information:" in result
assert "Documentation here." in result
def test_format_conversation(self) -> None:
"""Test formatting conversation context."""
adapter = DefaultAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
),
]
result = adapter.format(contexts)
assert "Previous Conversation:" in result
assert "Hello!" in result
def test_format_tool(self) -> None:
"""Test formatting tool context."""
adapter = DefaultAdapter()
contexts = [
ToolContext(
content="Result: success",
source="tool",
metadata={"tool_name": "search"},
),
]
result = adapter.format(contexts)
assert "Tool Results:" in result
assert "Result: success" in result
class TestClaudeAdapter:
"""Tests for ClaudeAdapter."""
def test_matches_model(self) -> None:
"""Test model matching."""
assert ClaudeAdapter.matches_model("claude-3-sonnet")
assert ClaudeAdapter.matches_model("claude-3-opus")
assert ClaudeAdapter.matches_model("anthropic/claude-3-haiku")
assert not ClaudeAdapter.matches_model("gpt-4")
assert not ClaudeAdapter.matches_model("llama-2")
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = ClaudeAdapter()
result = adapter.format([])
assert result == ""
def test_format_system_uses_xml(self) -> None:
"""Test that system context uses XML tags."""
adapter = ClaudeAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
assert "<system_instructions>" in result
assert "</system_instructions>" in result
assert "You are helpful." in result
def test_format_task_uses_xml(self) -> None:
"""Test that task context uses XML tags."""
adapter = ClaudeAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "<current_task>" in result
assert "</current_task>" in result
assert "Write a function." in result
def test_format_knowledge_uses_document_tags(self) -> None:
"""Test that knowledge uses document XML tags."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Documentation here.",
source="docs/api.md",
relevance_score=0.9,
),
]
result = adapter.format(contexts)
assert "<reference_documents>" in result
assert "</reference_documents>" in result
assert '<document source="docs/api.md"' in result
assert "</document>" in result
assert "Documentation here." in result
def test_format_knowledge_with_score(self) -> None:
"""Test that knowledge includes relevance score."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source="docs/api.md",
metadata={"relevance_score": 0.95},
),
]
result = adapter.format(contexts)
assert 'relevance="0.95"' in result
def test_format_conversation_uses_message_tags(self) -> None:
"""Test that conversation uses message XML tags."""
adapter = ClaudeAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ConversationContext(
content="Hi there!",
source="chat",
role=MessageRole.ASSISTANT,
metadata={"role": "assistant"},
),
]
result = adapter.format(contexts)
assert "<conversation_history>" in result
assert "</conversation_history>" in result
assert '<message role="user">' in result
assert '<message role="assistant">' in result
assert "Hello!" in result
assert "Hi there!" in result
def test_format_tool_uses_tool_result_tags(self) -> None:
"""Test that tool results use tool_result XML tags."""
adapter = ClaudeAdapter()
contexts = [
ToolContext(
content='{"status": "ok"}',
source="tool",
metadata={"tool_name": "search", "status": "success"},
),
]
result = adapter.format(contexts)
assert "<tool_results>" in result
assert "</tool_results>" in result
assert '<tool_result name="search"' in result
assert 'status="success"' in result
assert "</tool_result>" in result
def test_format_multiple_types_in_order(self) -> None:
"""Test that multiple types are formatted in correct order."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
]
result = adapter.format(contexts)
# Find positions
system_pos = result.find("<system_instructions>")
task_pos = result.find("<current_task>")
knowledge_pos = result.find("<reference_documents>")
# Verify order
assert system_pos < task_pos < knowledge_pos
def test_escape_xml_in_source(self) -> None:
"""Test that XML special chars are escaped in source."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source='path/with"quotes&stuff.md',
),
]
result = adapter.format(contexts)
assert "&quot;" in result
assert "&amp;" in result
class TestOpenAIAdapter:
"""Tests for OpenAIAdapter."""
def test_matches_model(self) -> None:
"""Test model matching."""
assert OpenAIAdapter.matches_model("gpt-4")
assert OpenAIAdapter.matches_model("gpt-4-turbo")
assert OpenAIAdapter.matches_model("gpt-3.5-turbo")
assert OpenAIAdapter.matches_model("openai/gpt-4")
assert OpenAIAdapter.matches_model("o1-preview")
assert OpenAIAdapter.matches_model("o3-mini")
assert not OpenAIAdapter.matches_model("claude-3")
assert not OpenAIAdapter.matches_model("llama-2")
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = OpenAIAdapter()
result = adapter.format([])
assert result == ""
def test_format_system_plain(self) -> None:
"""Test that system content is plain."""
adapter = OpenAIAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
# System content should be plain without headers
assert "You are helpful." in result
assert "##" not in result # No markdown headers for system
def test_format_task_uses_markdown(self) -> None:
"""Test that task uses markdown headers."""
adapter = OpenAIAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "## Current Task" in result
assert "Write a function." in result
def test_format_knowledge_uses_markdown(self) -> None:
"""Test that knowledge uses markdown with source headers."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(
content="Documentation here.",
source="docs/api.md",
relevance_score=0.9,
),
]
result = adapter.format(contexts)
assert "## Reference Documents" in result
assert "### Source: docs/api.md" in result
assert "Documentation here." in result
def test_format_knowledge_with_score(self) -> None:
"""Test that knowledge includes relevance score."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source="docs/api.md",
metadata={"relevance_score": 0.95},
),
]
result = adapter.format(contexts)
assert "(relevance: 0.95)" in result
def test_format_conversation_uses_bold_roles(self) -> None:
"""Test that conversation uses bold role labels."""
adapter = OpenAIAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ConversationContext(
content="Hi there!",
source="chat",
role=MessageRole.ASSISTANT,
metadata={"role": "assistant"},
),
]
result = adapter.format(contexts)
assert "**USER**:" in result
assert "**ASSISTANT**:" in result
assert "Hello!" in result
assert "Hi there!" in result
def test_format_tool_uses_code_blocks(self) -> None:
"""Test that tool results use code blocks."""
adapter = OpenAIAdapter()
contexts = [
ToolContext(
content='{"status": "ok"}',
source="tool",
metadata={"tool_name": "search", "status": "success"},
),
]
result = adapter.format(contexts)
assert "## Recent Tool Results" in result
assert "### Tool: search (success)" in result
assert "```" in result # Code block
assert '{"status": "ok"}' in result
def test_format_multiple_types_in_order(self) -> None:
"""Test that multiple types are formatted in correct order."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
]
result = adapter.format(contexts)
# System comes first (no header), then task, then knowledge
system_pos = result.find("System")
task_pos = result.find("## Current Task")
knowledge_pos = result.find("## Reference Documents")
assert system_pos < task_pos < knowledge_pos
class TestAdapterIntegration:
"""Integration tests for adapters."""
def test_full_context_formatting_claude(self) -> None:
"""Test formatting a full set of contexts for Claude."""
adapter = ClaudeAdapter()
contexts = [
SystemContext(
content="You are an expert Python developer.",
source="system",
),
TaskContext(
content="Implement user authentication.",
source="task:AUTH-123",
),
KnowledgeContext(
content="JWT tokens provide stateless authentication...",
source="docs/auth/jwt.md",
relevance_score=0.9,
),
ConversationContext(
content="Can you help me implement JWT auth?",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ToolContext(
content='{"file": "auth.py", "status": "created"}',
source="tool",
metadata={"tool_name": "file_create"},
),
]
result = adapter.format(contexts)
# Verify all sections present
assert "<system_instructions>" in result
assert "<current_task>" in result
assert "<reference_documents>" in result
assert "<conversation_history>" in result
assert "<tool_results>" in result
# Verify content
assert "expert Python developer" in result
assert "user authentication" in result
assert "JWT tokens" in result
assert "help me implement" in result
assert "file_create" in result
def test_full_context_formatting_openai(self) -> None:
"""Test formatting a full set of contexts for OpenAI."""
adapter = OpenAIAdapter()
contexts = [
SystemContext(
content="You are an expert Python developer.",
source="system",
),
TaskContext(
content="Implement user authentication.",
source="task:AUTH-123",
),
KnowledgeContext(
content="JWT tokens provide stateless authentication...",
source="docs/auth/jwt.md",
relevance_score=0.9,
),
ConversationContext(
content="Can you help me implement JWT auth?",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ToolContext(
content='{"file": "auth.py", "status": "created"}',
source="tool",
metadata={"tool_name": "file_create"},
),
]
result = adapter.format(contexts)
# Verify all sections present
assert "## Current Task" in result
assert "## Reference Documents" in result
assert "## Recent Tool Results" in result
assert "**USER**:" in result
# Verify content
assert "expert Python developer" in result
assert "user authentication" in result
assert "JWT tokens" in result
assert "help me implement" in result
assert "file_create" in result