feat(context): implement model adapters for Claude and OpenAI (#83)

Phase 5 of Context Management Engine - Model Adapters:

- Add ModelAdapter abstract base class with model matching
- Add DefaultAdapter for unknown models (plain text)
- Add ClaudeAdapter with XML-based formatting:
  - <system_instructions> for system context
  - <reference_documents>/<document> for knowledge
  - <conversation_history>/<message> for chat
  - <tool_results>/<tool_result> for tool outputs
  - XML escaping for special characters
- Add OpenAIAdapter with markdown formatting:
  - ## headers for sections
  - ### Source headers for documents
  - **ROLE** bold labels for conversation
  - Code blocks for tool outputs
- Add get_adapter() factory function for model selection

Tests: 33 new tests, 256 total context tests passing

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-04 02:36:32 +01:00
parent 6b07e62f00
commit 7828d35e06
6 changed files with 1083 additions and 1 deletions

View File

@@ -76,6 +76,15 @@ from .compression import (
TruncationStrategy,
)
# Adapters
from .adapters import (
ClaudeAdapter,
DefaultAdapter,
get_adapter,
ModelAdapter,
OpenAIAdapter,
)
# Prioritization
from .prioritization import (
ContextRanker,
@@ -110,6 +119,12 @@ from .types import (
)
__all__ = [
# Adapters
"ClaudeAdapter",
"DefaultAdapter",
"get_adapter",
"ModelAdapter",
"OpenAIAdapter",
# Assembly
"ContextPipeline",
"PipelineMetrics",

View File

@@ -1,5 +1,35 @@
"""
Model Adapters Module.
Provides model-specific context formatting.
Provides model-specific context formatting adapters.
"""
from .base import DefaultAdapter, ModelAdapter
from .claude import ClaudeAdapter
from .openai import OpenAIAdapter
def get_adapter(model: str) -> ModelAdapter:
"""
Get the appropriate adapter for a model.
Args:
model: Model name
Returns:
Adapter instance for the model
"""
if ClaudeAdapter.matches_model(model):
return ClaudeAdapter()
elif OpenAIAdapter.matches_model(model):
return OpenAIAdapter()
return DefaultAdapter()
__all__ = [
"ClaudeAdapter",
"DefaultAdapter",
"get_adapter",
"ModelAdapter",
"OpenAIAdapter",
]

View File

@@ -0,0 +1,178 @@
"""
Base Model Adapter.
Abstract base class for model-specific context formatting.
"""
from abc import ABC, abstractmethod
from typing import Any
from ..types import BaseContext, ContextType
class ModelAdapter(ABC):
"""
Abstract base adapter for model-specific context formatting.
Each adapter knows how to format contexts for optimal
understanding by a specific LLM family (Claude, OpenAI, etc.).
"""
# Model name patterns this adapter handles
MODEL_PATTERNS: list[str] = []
@classmethod
def matches_model(cls, model: str) -> bool:
"""
Check if this adapter handles the given model.
Args:
model: Model name to check
Returns:
True if this adapter handles the model
"""
model_lower = model.lower()
return any(pattern in model_lower for pattern in cls.MODEL_PATTERNS)
@abstractmethod
def format(
self,
contexts: list[BaseContext],
**kwargs: Any,
) -> str:
"""
Format contexts for the target model.
Args:
contexts: List of contexts to format
**kwargs: Additional formatting options
Returns:
Formatted context string
"""
...
@abstractmethod
def format_type(
self,
contexts: list[BaseContext],
context_type: ContextType,
**kwargs: Any,
) -> str:
"""
Format contexts of a specific type.
Args:
contexts: List of contexts of the same type
context_type: The type of contexts
**kwargs: Additional formatting options
Returns:
Formatted string for this context type
"""
...
def get_type_order(self) -> list[ContextType]:
"""
Get the preferred order of context types.
Returns:
List of context types in preferred order
"""
return [
ContextType.SYSTEM,
ContextType.TASK,
ContextType.KNOWLEDGE,
ContextType.CONVERSATION,
ContextType.TOOL,
]
def group_by_type(
self, contexts: list[BaseContext]
) -> dict[ContextType, list[BaseContext]]:
"""
Group contexts by their type.
Args:
contexts: List of contexts to group
Returns:
Dictionary mapping context type to list of contexts
"""
by_type: dict[ContextType, list[BaseContext]] = {}
for context in contexts:
ct = context.get_type()
if ct not in by_type:
by_type[ct] = []
by_type[ct].append(context)
return by_type
def get_separator(self) -> str:
"""
Get the separator between context sections.
Returns:
Separator string
"""
return "\n\n"
class DefaultAdapter(ModelAdapter):
"""
Default adapter for unknown models.
Uses simple plain-text formatting with minimal structure.
"""
MODEL_PATTERNS: list[str] = [] # Fallback adapter
@classmethod
def matches_model(cls, model: str) -> bool:
"""Always returns True as fallback."""
return True
def format(
self,
contexts: list[BaseContext],
**kwargs: Any,
) -> str:
"""Format contexts as plain text."""
if not contexts:
return ""
by_type = self.group_by_type(contexts)
parts: list[str] = []
for ct in self.get_type_order():
if ct in by_type:
formatted = self.format_type(by_type[ct], ct, **kwargs)
if formatted:
parts.append(formatted)
return self.get_separator().join(parts)
def format_type(
self,
contexts: list[BaseContext],
context_type: ContextType,
**kwargs: Any,
) -> str:
"""Format contexts of a type as plain text."""
if not contexts:
return ""
content = "\n\n".join(c.content for c in contexts)
if context_type == ContextType.SYSTEM:
return content
elif context_type == ContextType.TASK:
return f"Task:\n{content}"
elif context_type == ContextType.KNOWLEDGE:
return f"Reference Information:\n{content}"
elif context_type == ContextType.CONVERSATION:
return f"Previous Conversation:\n{content}"
elif context_type == ContextType.TOOL:
return f"Tool Results:\n{content}"
return content

View File

@@ -0,0 +1,178 @@
"""
Claude Model Adapter.
Provides Claude-specific context formatting using XML tags
which Claude models understand natively.
"""
from typing import Any
from ..types import BaseContext, ContextType
from .base import ModelAdapter
class ClaudeAdapter(ModelAdapter):
"""
Claude-specific context formatting adapter.
Claude models have native understanding of XML structure,
so we use XML tags for clear delineation of context types.
Features:
- XML tags for each context type
- Document structure for knowledge contexts
- Role-based message formatting for conversations
- Tool result wrapping with tool names
"""
MODEL_PATTERNS: list[str] = ["claude", "anthropic"]
def format(
self,
contexts: list[BaseContext],
**kwargs: Any,
) -> str:
"""
Format contexts for Claude models.
Uses XML tags for structured content that Claude
understands natively.
Args:
contexts: List of contexts to format
**kwargs: Additional formatting options
Returns:
XML-structured context string
"""
if not contexts:
return ""
by_type = self.group_by_type(contexts)
parts: list[str] = []
for ct in self.get_type_order():
if ct in by_type:
formatted = self.format_type(by_type[ct], ct, **kwargs)
if formatted:
parts.append(formatted)
return self.get_separator().join(parts)
def format_type(
self,
contexts: list[BaseContext],
context_type: ContextType,
**kwargs: Any,
) -> str:
"""
Format contexts of a specific type for Claude.
Args:
contexts: List of contexts of the same type
context_type: The type of contexts
**kwargs: Additional formatting options
Returns:
XML-formatted string for this context type
"""
if not contexts:
return ""
if context_type == ContextType.SYSTEM:
return self._format_system(contexts)
elif context_type == ContextType.TASK:
return self._format_task(contexts)
elif context_type == ContextType.KNOWLEDGE:
return self._format_knowledge(contexts)
elif context_type == ContextType.CONVERSATION:
return self._format_conversation(contexts)
elif context_type == ContextType.TOOL:
return self._format_tool(contexts)
return "\n".join(c.content for c in contexts)
def _format_system(self, contexts: list[BaseContext]) -> str:
"""Format system contexts."""
content = "\n\n".join(c.content for c in contexts)
return f"<system_instructions>\n{content}\n</system_instructions>"
def _format_task(self, contexts: list[BaseContext]) -> str:
"""Format task contexts."""
content = "\n\n".join(c.content for c in contexts)
return f"<current_task>\n{content}\n</current_task>"
def _format_knowledge(self, contexts: list[BaseContext]) -> str:
"""
Format knowledge contexts as structured documents.
Each knowledge context becomes a document with source attribution.
"""
parts = ["<reference_documents>"]
for ctx in contexts:
source = self._escape_xml(ctx.source)
content = ctx.content
score = ctx.metadata.get("score", ctx.metadata.get("relevance_score", ""))
if score:
parts.append(f'<document source="{source}" relevance="{score}">')
else:
parts.append(f'<document source="{source}">')
parts.append(content)
parts.append("</document>")
parts.append("</reference_documents>")
return "\n".join(parts)
def _format_conversation(self, contexts: list[BaseContext]) -> str:
"""
Format conversation contexts as message history.
Uses role-based message tags for clear turn delineation.
"""
parts = ["<conversation_history>"]
for ctx in contexts:
role = ctx.metadata.get("role", "user")
parts.append(f'<message role="{role}">')
parts.append(ctx.content)
parts.append("</message>")
parts.append("</conversation_history>")
return "\n".join(parts)
def _format_tool(self, contexts: list[BaseContext]) -> str:
"""
Format tool contexts as tool results.
Each tool result is wrapped with the tool name.
"""
parts = ["<tool_results>"]
for ctx in contexts:
tool_name = ctx.metadata.get("tool_name", "unknown")
status = ctx.metadata.get("status", "")
if status:
parts.append(f'<tool_result name="{tool_name}" status="{status}">')
else:
parts.append(f'<tool_result name="{tool_name}">')
parts.append(ctx.content)
parts.append("</tool_result>")
parts.append("</tool_results>")
return "\n".join(parts)
@staticmethod
def _escape_xml(text: str) -> str:
"""Escape XML special characters in attribute values."""
return (
text.replace("&", "&amp;")
.replace("<", "&lt;")
.replace(">", "&gt;")
.replace('"', "&quot;")
.replace("'", "&apos;")
)

View File

@@ -0,0 +1,160 @@
"""
OpenAI Model Adapter.
Provides OpenAI-specific context formatting using markdown
which GPT models understand well.
"""
from typing import Any
from ..types import BaseContext, ContextType
from .base import ModelAdapter
class OpenAIAdapter(ModelAdapter):
"""
OpenAI-specific context formatting adapter.
GPT models work well with markdown formatting,
so we use headers and structured markdown for clarity.
Features:
- Markdown headers for each context type
- Bulleted lists for document sources
- Bold role labels for conversations
- Code blocks for tool outputs
"""
MODEL_PATTERNS: list[str] = ["gpt", "openai", "o1", "o3"]
def format(
self,
contexts: list[BaseContext],
**kwargs: Any,
) -> str:
"""
Format contexts for OpenAI models.
Uses markdown formatting for structured content.
Args:
contexts: List of contexts to format
**kwargs: Additional formatting options
Returns:
Markdown-structured context string
"""
if not contexts:
return ""
by_type = self.group_by_type(contexts)
parts: list[str] = []
for ct in self.get_type_order():
if ct in by_type:
formatted = self.format_type(by_type[ct], ct, **kwargs)
if formatted:
parts.append(formatted)
return self.get_separator().join(parts)
def format_type(
self,
contexts: list[BaseContext],
context_type: ContextType,
**kwargs: Any,
) -> str:
"""
Format contexts of a specific type for OpenAI.
Args:
contexts: List of contexts of the same type
context_type: The type of contexts
**kwargs: Additional formatting options
Returns:
Markdown-formatted string for this context type
"""
if not contexts:
return ""
if context_type == ContextType.SYSTEM:
return self._format_system(contexts)
elif context_type == ContextType.TASK:
return self._format_task(contexts)
elif context_type == ContextType.KNOWLEDGE:
return self._format_knowledge(contexts)
elif context_type == ContextType.CONVERSATION:
return self._format_conversation(contexts)
elif context_type == ContextType.TOOL:
return self._format_tool(contexts)
return "\n".join(c.content for c in contexts)
def _format_system(self, contexts: list[BaseContext]) -> str:
"""Format system contexts."""
content = "\n\n".join(c.content for c in contexts)
return content
def _format_task(self, contexts: list[BaseContext]) -> str:
"""Format task contexts."""
content = "\n\n".join(c.content for c in contexts)
return f"## Current Task\n\n{content}"
def _format_knowledge(self, contexts: list[BaseContext]) -> str:
"""
Format knowledge contexts as structured documents.
Each knowledge context becomes a section with source attribution.
"""
parts = ["## Reference Documents\n"]
for ctx in contexts:
source = ctx.source
score = ctx.metadata.get("score", ctx.metadata.get("relevance_score", ""))
if score:
parts.append(f"### Source: {source} (relevance: {score})\n")
else:
parts.append(f"### Source: {source}\n")
parts.append(ctx.content)
parts.append("")
return "\n".join(parts)
def _format_conversation(self, contexts: list[BaseContext]) -> str:
"""
Format conversation contexts as message history.
Uses bold role labels for clear turn delineation.
"""
parts = []
for ctx in contexts:
role = ctx.metadata.get("role", "user").upper()
parts.append(f"**{role}**: {ctx.content}")
return "\n\n".join(parts)
def _format_tool(self, contexts: list[BaseContext]) -> str:
"""
Format tool contexts as tool results.
Each tool result is in a code block with the tool name.
"""
parts = ["## Recent Tool Results\n"]
for ctx in contexts:
tool_name = ctx.metadata.get("tool_name", "unknown")
status = ctx.metadata.get("status", "")
if status:
parts.append(f"### Tool: {tool_name} ({status})\n")
else:
parts.append(f"### Tool: {tool_name}\n")
parts.append(f"```\n{ctx.content}\n```")
parts.append("")
return "\n".join(parts)

View File

@@ -0,0 +1,521 @@
"""Tests for model adapters."""
import pytest
from app.services.context.adapters import (
ClaudeAdapter,
DefaultAdapter,
ModelAdapter,
OpenAIAdapter,
get_adapter,
)
from app.services.context.types import (
ContextType,
ConversationContext,
KnowledgeContext,
MessageRole,
SystemContext,
TaskContext,
ToolContext,
)
class TestGetAdapter:
"""Tests for get_adapter function."""
def test_claude_models(self) -> None:
"""Test that Claude models get ClaudeAdapter."""
assert isinstance(get_adapter("claude-3-sonnet"), ClaudeAdapter)
assert isinstance(get_adapter("claude-3-opus"), ClaudeAdapter)
assert isinstance(get_adapter("claude-3-haiku"), ClaudeAdapter)
assert isinstance(get_adapter("claude-2"), ClaudeAdapter)
assert isinstance(get_adapter("anthropic/claude-3-sonnet"), ClaudeAdapter)
def test_openai_models(self) -> None:
"""Test that OpenAI models get OpenAIAdapter."""
assert isinstance(get_adapter("gpt-4"), OpenAIAdapter)
assert isinstance(get_adapter("gpt-4-turbo"), OpenAIAdapter)
assert isinstance(get_adapter("gpt-3.5-turbo"), OpenAIAdapter)
assert isinstance(get_adapter("openai/gpt-4"), OpenAIAdapter)
assert isinstance(get_adapter("o1-mini"), OpenAIAdapter)
assert isinstance(get_adapter("o3-mini"), OpenAIAdapter)
def test_unknown_models(self) -> None:
"""Test that unknown models get DefaultAdapter."""
assert isinstance(get_adapter("llama-2"), DefaultAdapter)
assert isinstance(get_adapter("mistral-7b"), DefaultAdapter)
assert isinstance(get_adapter("custom-model"), DefaultAdapter)
class TestModelAdapterBase:
"""Tests for ModelAdapter base class."""
def test_get_type_order(self) -> None:
"""Test default type ordering."""
adapter = DefaultAdapter()
order = adapter.get_type_order()
assert order == [
ContextType.SYSTEM,
ContextType.TASK,
ContextType.KNOWLEDGE,
ContextType.CONVERSATION,
ContextType.TOOL,
]
def test_group_by_type(self) -> None:
"""Test grouping contexts by type."""
adapter = DefaultAdapter()
contexts = [
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System 2", source="system"),
]
grouped = adapter.group_by_type(contexts)
assert len(grouped[ContextType.SYSTEM]) == 2
assert len(grouped[ContextType.TASK]) == 1
assert len(grouped[ContextType.KNOWLEDGE]) == 1
assert ContextType.CONVERSATION not in grouped
def test_matches_model_default(self) -> None:
"""Test that DefaultAdapter matches all models."""
assert DefaultAdapter.matches_model("anything")
assert DefaultAdapter.matches_model("claude-3")
assert DefaultAdapter.matches_model("gpt-4")
class TestDefaultAdapter:
"""Tests for DefaultAdapter."""
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = DefaultAdapter()
result = adapter.format([])
assert result == ""
def test_format_system(self) -> None:
"""Test formatting system context."""
adapter = DefaultAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
assert "You are helpful." in result
def test_format_task(self) -> None:
"""Test formatting task context."""
adapter = DefaultAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "Task:" in result
assert "Write a function." in result
def test_format_knowledge(self) -> None:
"""Test formatting knowledge context."""
adapter = DefaultAdapter()
contexts = [
KnowledgeContext(content="Documentation here.", source="docs"),
]
result = adapter.format(contexts)
assert "Reference Information:" in result
assert "Documentation here." in result
def test_format_conversation(self) -> None:
"""Test formatting conversation context."""
adapter = DefaultAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
),
]
result = adapter.format(contexts)
assert "Previous Conversation:" in result
assert "Hello!" in result
def test_format_tool(self) -> None:
"""Test formatting tool context."""
adapter = DefaultAdapter()
contexts = [
ToolContext(
content="Result: success",
source="tool",
metadata={"tool_name": "search"},
),
]
result = adapter.format(contexts)
assert "Tool Results:" in result
assert "Result: success" in result
class TestClaudeAdapter:
"""Tests for ClaudeAdapter."""
def test_matches_model(self) -> None:
"""Test model matching."""
assert ClaudeAdapter.matches_model("claude-3-sonnet")
assert ClaudeAdapter.matches_model("claude-3-opus")
assert ClaudeAdapter.matches_model("anthropic/claude-3-haiku")
assert not ClaudeAdapter.matches_model("gpt-4")
assert not ClaudeAdapter.matches_model("llama-2")
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = ClaudeAdapter()
result = adapter.format([])
assert result == ""
def test_format_system_uses_xml(self) -> None:
"""Test that system context uses XML tags."""
adapter = ClaudeAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
assert "<system_instructions>" in result
assert "</system_instructions>" in result
assert "You are helpful." in result
def test_format_task_uses_xml(self) -> None:
"""Test that task context uses XML tags."""
adapter = ClaudeAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "<current_task>" in result
assert "</current_task>" in result
assert "Write a function." in result
def test_format_knowledge_uses_document_tags(self) -> None:
"""Test that knowledge uses document XML tags."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Documentation here.",
source="docs/api.md",
relevance_score=0.9,
),
]
result = adapter.format(contexts)
assert "<reference_documents>" in result
assert "</reference_documents>" in result
assert '<document source="docs/api.md"' in result
assert "</document>" in result
assert "Documentation here." in result
def test_format_knowledge_with_score(self) -> None:
"""Test that knowledge includes relevance score."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source="docs/api.md",
metadata={"relevance_score": 0.95},
),
]
result = adapter.format(contexts)
assert 'relevance="0.95"' in result
def test_format_conversation_uses_message_tags(self) -> None:
"""Test that conversation uses message XML tags."""
adapter = ClaudeAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ConversationContext(
content="Hi there!",
source="chat",
role=MessageRole.ASSISTANT,
metadata={"role": "assistant"},
),
]
result = adapter.format(contexts)
assert "<conversation_history>" in result
assert "</conversation_history>" in result
assert '<message role="user">' in result
assert '<message role="assistant">' in result
assert "Hello!" in result
assert "Hi there!" in result
def test_format_tool_uses_tool_result_tags(self) -> None:
"""Test that tool results use tool_result XML tags."""
adapter = ClaudeAdapter()
contexts = [
ToolContext(
content='{"status": "ok"}',
source="tool",
metadata={"tool_name": "search", "status": "success"},
),
]
result = adapter.format(contexts)
assert "<tool_results>" in result
assert "</tool_results>" in result
assert '<tool_result name="search"' in result
assert 'status="success"' in result
assert "</tool_result>" in result
def test_format_multiple_types_in_order(self) -> None:
"""Test that multiple types are formatted in correct order."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
]
result = adapter.format(contexts)
# Find positions
system_pos = result.find("<system_instructions>")
task_pos = result.find("<current_task>")
knowledge_pos = result.find("<reference_documents>")
# Verify order
assert system_pos < task_pos < knowledge_pos
def test_escape_xml_in_source(self) -> None:
"""Test that XML special chars are escaped in source."""
adapter = ClaudeAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source='path/with"quotes&stuff.md',
),
]
result = adapter.format(contexts)
assert "&quot;" in result
assert "&amp;" in result
class TestOpenAIAdapter:
"""Tests for OpenAIAdapter."""
def test_matches_model(self) -> None:
"""Test model matching."""
assert OpenAIAdapter.matches_model("gpt-4")
assert OpenAIAdapter.matches_model("gpt-4-turbo")
assert OpenAIAdapter.matches_model("gpt-3.5-turbo")
assert OpenAIAdapter.matches_model("openai/gpt-4")
assert OpenAIAdapter.matches_model("o1-preview")
assert OpenAIAdapter.matches_model("o3-mini")
assert not OpenAIAdapter.matches_model("claude-3")
assert not OpenAIAdapter.matches_model("llama-2")
def test_format_empty(self) -> None:
"""Test formatting empty context list."""
adapter = OpenAIAdapter()
result = adapter.format([])
assert result == ""
def test_format_system_plain(self) -> None:
"""Test that system content is plain."""
adapter = OpenAIAdapter()
contexts = [
SystemContext(content="You are helpful.", source="system"),
]
result = adapter.format(contexts)
# System content should be plain without headers
assert "You are helpful." in result
assert "##" not in result # No markdown headers for system
def test_format_task_uses_markdown(self) -> None:
"""Test that task uses markdown headers."""
adapter = OpenAIAdapter()
contexts = [
TaskContext(content="Write a function.", source="task"),
]
result = adapter.format(contexts)
assert "## Current Task" in result
assert "Write a function." in result
def test_format_knowledge_uses_markdown(self) -> None:
"""Test that knowledge uses markdown with source headers."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(
content="Documentation here.",
source="docs/api.md",
relevance_score=0.9,
),
]
result = adapter.format(contexts)
assert "## Reference Documents" in result
assert "### Source: docs/api.md" in result
assert "Documentation here." in result
def test_format_knowledge_with_score(self) -> None:
"""Test that knowledge includes relevance score."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(
content="Doc content.",
source="docs/api.md",
metadata={"relevance_score": 0.95},
),
]
result = adapter.format(contexts)
assert "(relevance: 0.95)" in result
def test_format_conversation_uses_bold_roles(self) -> None:
"""Test that conversation uses bold role labels."""
adapter = OpenAIAdapter()
contexts = [
ConversationContext(
content="Hello!",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ConversationContext(
content="Hi there!",
source="chat",
role=MessageRole.ASSISTANT,
metadata={"role": "assistant"},
),
]
result = adapter.format(contexts)
assert "**USER**:" in result
assert "**ASSISTANT**:" in result
assert "Hello!" in result
assert "Hi there!" in result
def test_format_tool_uses_code_blocks(self) -> None:
"""Test that tool results use code blocks."""
adapter = OpenAIAdapter()
contexts = [
ToolContext(
content='{"status": "ok"}',
source="tool",
metadata={"tool_name": "search", "status": "success"},
),
]
result = adapter.format(contexts)
assert "## Recent Tool Results" in result
assert "### Tool: search (success)" in result
assert "```" in result # Code block
assert '{"status": "ok"}' in result
def test_format_multiple_types_in_order(self) -> None:
"""Test that multiple types are formatted in correct order."""
adapter = OpenAIAdapter()
contexts = [
KnowledgeContext(content="Knowledge", source="docs"),
SystemContext(content="System", source="system"),
TaskContext(content="Task", source="task"),
]
result = adapter.format(contexts)
# System comes first (no header), then task, then knowledge
system_pos = result.find("System")
task_pos = result.find("## Current Task")
knowledge_pos = result.find("## Reference Documents")
assert system_pos < task_pos < knowledge_pos
class TestAdapterIntegration:
"""Integration tests for adapters."""
def test_full_context_formatting_claude(self) -> None:
"""Test formatting a full set of contexts for Claude."""
adapter = ClaudeAdapter()
contexts = [
SystemContext(
content="You are an expert Python developer.",
source="system",
),
TaskContext(
content="Implement user authentication.",
source="task:AUTH-123",
),
KnowledgeContext(
content="JWT tokens provide stateless authentication...",
source="docs/auth/jwt.md",
relevance_score=0.9,
),
ConversationContext(
content="Can you help me implement JWT auth?",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ToolContext(
content='{"file": "auth.py", "status": "created"}',
source="tool",
metadata={"tool_name": "file_create"},
),
]
result = adapter.format(contexts)
# Verify all sections present
assert "<system_instructions>" in result
assert "<current_task>" in result
assert "<reference_documents>" in result
assert "<conversation_history>" in result
assert "<tool_results>" in result
# Verify content
assert "expert Python developer" in result
assert "user authentication" in result
assert "JWT tokens" in result
assert "help me implement" in result
assert "file_create" in result
def test_full_context_formatting_openai(self) -> None:
"""Test formatting a full set of contexts for OpenAI."""
adapter = OpenAIAdapter()
contexts = [
SystemContext(
content="You are an expert Python developer.",
source="system",
),
TaskContext(
content="Implement user authentication.",
source="task:AUTH-123",
),
KnowledgeContext(
content="JWT tokens provide stateless authentication...",
source="docs/auth/jwt.md",
relevance_score=0.9,
),
ConversationContext(
content="Can you help me implement JWT auth?",
source="chat",
role=MessageRole.USER,
metadata={"role": "user"},
),
ToolContext(
content='{"file": "auth.py", "status": "created"}',
source="tool",
metadata={"tool_name": "file_create"},
),
]
result = adapter.format(contexts)
# Verify all sections present
assert "## Current Task" in result
assert "## Reference Documents" in result
assert "## Recent Tool Results" in result
assert "**USER**:" in result
# Verify content
assert "expert Python developer" in result
assert "user authentication" in result
assert "JWT tokens" in result
assert "help me implement" in result
assert "file_create" in result