forked from cardosofelipe/fast-next-template
feat(context): implement model adapters for Claude and OpenAI (#83)
Phase 5 of Context Management Engine - Model Adapters: - Add ModelAdapter abstract base class with model matching - Add DefaultAdapter for unknown models (plain text) - Add ClaudeAdapter with XML-based formatting: - <system_instructions> for system context - <reference_documents>/<document> for knowledge - <conversation_history>/<message> for chat - <tool_results>/<tool_result> for tool outputs - XML escaping for special characters - Add OpenAIAdapter with markdown formatting: - ## headers for sections - ### Source headers for documents - **ROLE** bold labels for conversation - Code blocks for tool outputs - Add get_adapter() factory function for model selection Tests: 33 new tests, 256 total context tests passing 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
160
backend/app/services/context/adapters/openai.py
Normal file
160
backend/app/services/context/adapters/openai.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
OpenAI Model Adapter.
|
||||
|
||||
Provides OpenAI-specific context formatting using markdown
|
||||
which GPT models understand well.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from ..types import BaseContext, ContextType
|
||||
from .base import ModelAdapter
|
||||
|
||||
|
||||
class OpenAIAdapter(ModelAdapter):
|
||||
"""
|
||||
OpenAI-specific context formatting adapter.
|
||||
|
||||
GPT models work well with markdown formatting,
|
||||
so we use headers and structured markdown for clarity.
|
||||
|
||||
Features:
|
||||
- Markdown headers for each context type
|
||||
- Bulleted lists for document sources
|
||||
- Bold role labels for conversations
|
||||
- Code blocks for tool outputs
|
||||
"""
|
||||
|
||||
MODEL_PATTERNS: list[str] = ["gpt", "openai", "o1", "o3"]
|
||||
|
||||
def format(
|
||||
self,
|
||||
contexts: list[BaseContext],
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""
|
||||
Format contexts for OpenAI models.
|
||||
|
||||
Uses markdown formatting for structured content.
|
||||
|
||||
Args:
|
||||
contexts: List of contexts to format
|
||||
**kwargs: Additional formatting options
|
||||
|
||||
Returns:
|
||||
Markdown-structured context string
|
||||
"""
|
||||
if not contexts:
|
||||
return ""
|
||||
|
||||
by_type = self.group_by_type(contexts)
|
||||
parts: list[str] = []
|
||||
|
||||
for ct in self.get_type_order():
|
||||
if ct in by_type:
|
||||
formatted = self.format_type(by_type[ct], ct, **kwargs)
|
||||
if formatted:
|
||||
parts.append(formatted)
|
||||
|
||||
return self.get_separator().join(parts)
|
||||
|
||||
def format_type(
|
||||
self,
|
||||
contexts: list[BaseContext],
|
||||
context_type: ContextType,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""
|
||||
Format contexts of a specific type for OpenAI.
|
||||
|
||||
Args:
|
||||
contexts: List of contexts of the same type
|
||||
context_type: The type of contexts
|
||||
**kwargs: Additional formatting options
|
||||
|
||||
Returns:
|
||||
Markdown-formatted string for this context type
|
||||
"""
|
||||
if not contexts:
|
||||
return ""
|
||||
|
||||
if context_type == ContextType.SYSTEM:
|
||||
return self._format_system(contexts)
|
||||
elif context_type == ContextType.TASK:
|
||||
return self._format_task(contexts)
|
||||
elif context_type == ContextType.KNOWLEDGE:
|
||||
return self._format_knowledge(contexts)
|
||||
elif context_type == ContextType.CONVERSATION:
|
||||
return self._format_conversation(contexts)
|
||||
elif context_type == ContextType.TOOL:
|
||||
return self._format_tool(contexts)
|
||||
|
||||
return "\n".join(c.content for c in contexts)
|
||||
|
||||
def _format_system(self, contexts: list[BaseContext]) -> str:
|
||||
"""Format system contexts."""
|
||||
content = "\n\n".join(c.content for c in contexts)
|
||||
return content
|
||||
|
||||
def _format_task(self, contexts: list[BaseContext]) -> str:
|
||||
"""Format task contexts."""
|
||||
content = "\n\n".join(c.content for c in contexts)
|
||||
return f"## Current Task\n\n{content}"
|
||||
|
||||
def _format_knowledge(self, contexts: list[BaseContext]) -> str:
|
||||
"""
|
||||
Format knowledge contexts as structured documents.
|
||||
|
||||
Each knowledge context becomes a section with source attribution.
|
||||
"""
|
||||
parts = ["## Reference Documents\n"]
|
||||
|
||||
for ctx in contexts:
|
||||
source = ctx.source
|
||||
score = ctx.metadata.get("score", ctx.metadata.get("relevance_score", ""))
|
||||
|
||||
if score:
|
||||
parts.append(f"### Source: {source} (relevance: {score})\n")
|
||||
else:
|
||||
parts.append(f"### Source: {source}\n")
|
||||
|
||||
parts.append(ctx.content)
|
||||
parts.append("")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
def _format_conversation(self, contexts: list[BaseContext]) -> str:
|
||||
"""
|
||||
Format conversation contexts as message history.
|
||||
|
||||
Uses bold role labels for clear turn delineation.
|
||||
"""
|
||||
parts = []
|
||||
|
||||
for ctx in contexts:
|
||||
role = ctx.metadata.get("role", "user").upper()
|
||||
parts.append(f"**{role}**: {ctx.content}")
|
||||
|
||||
return "\n\n".join(parts)
|
||||
|
||||
def _format_tool(self, contexts: list[BaseContext]) -> str:
|
||||
"""
|
||||
Format tool contexts as tool results.
|
||||
|
||||
Each tool result is in a code block with the tool name.
|
||||
"""
|
||||
parts = ["## Recent Tool Results\n"]
|
||||
|
||||
for ctx in contexts:
|
||||
tool_name = ctx.metadata.get("tool_name", "unknown")
|
||||
status = ctx.metadata.get("status", "")
|
||||
|
||||
if status:
|
||||
parts.append(f"### Tool: {tool_name} ({status})\n")
|
||||
else:
|
||||
parts.append(f"### Tool: {tool_name}\n")
|
||||
|
||||
parts.append(f"```\n{ctx.content}\n```")
|
||||
parts.append("")
|
||||
|
||||
return "\n".join(parts)
|
||||
Reference in New Issue
Block a user