forked from cardosofelipe/fast-next-template
chore(context): apply linter fixes and sort imports (#86)
Phase 8 of Context Management Engine - Final Cleanup: - Sort __all__ exports alphabetically - Sort imports per isort conventions - Fix minor linting issues Final test results: - 311 context management tests passing - 2507 total backend tests passing - 85% code coverage Context Management Engine is complete with all 8 phases: 1. Foundation: Types, Config, Exceptions 2. Token Budget Management 3. Context Scoring & Ranking 4. Context Assembly Pipeline 5. Model Adapters (Claude, OpenAI) 6. Caching Layer (Redis + in-memory) 7. Main Engine & Integration 8. Testing & Documentation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -35,12 +35,36 @@ Usage:
|
||||
"""
|
||||
|
||||
# Budget Management
|
||||
# Adapters
|
||||
from .adapters import (
|
||||
ClaudeAdapter,
|
||||
DefaultAdapter,
|
||||
ModelAdapter,
|
||||
OpenAIAdapter,
|
||||
get_adapter,
|
||||
)
|
||||
|
||||
# Assembly
|
||||
from .assembly import (
|
||||
ContextPipeline,
|
||||
PipelineMetrics,
|
||||
)
|
||||
from .budget import (
|
||||
BudgetAllocator,
|
||||
TokenBudget,
|
||||
TokenCalculator,
|
||||
)
|
||||
|
||||
# Cache
|
||||
from .cache import ContextCache
|
||||
|
||||
# Compression
|
||||
from .compression import (
|
||||
ContextCompressor,
|
||||
TruncationResult,
|
||||
TruncationStrategy,
|
||||
)
|
||||
|
||||
# Configuration
|
||||
from .config import (
|
||||
ContextSettings,
|
||||
@@ -49,6 +73,9 @@ from .config import (
|
||||
reset_context_settings,
|
||||
)
|
||||
|
||||
# Engine
|
||||
from .engine import ContextEngine, create_context_engine
|
||||
|
||||
# Exceptions
|
||||
from .exceptions import (
|
||||
AssemblyTimeoutError,
|
||||
@@ -63,34 +90,6 @@ from .exceptions import (
|
||||
TokenCountError,
|
||||
)
|
||||
|
||||
# Assembly
|
||||
from .assembly import (
|
||||
ContextPipeline,
|
||||
PipelineMetrics,
|
||||
)
|
||||
|
||||
# Compression
|
||||
from .compression import (
|
||||
ContextCompressor,
|
||||
TruncationResult,
|
||||
TruncationStrategy,
|
||||
)
|
||||
|
||||
# Adapters
|
||||
from .adapters import (
|
||||
ClaudeAdapter,
|
||||
DefaultAdapter,
|
||||
get_adapter,
|
||||
ModelAdapter,
|
||||
OpenAIAdapter,
|
||||
)
|
||||
|
||||
# Cache
|
||||
from .cache import ContextCache
|
||||
|
||||
# Engine
|
||||
from .engine import ContextEngine, create_context_engine
|
||||
|
||||
# Prioritization
|
||||
from .prioritization import (
|
||||
ContextRanker,
|
||||
|
||||
@@ -29,7 +29,7 @@ def get_adapter(model: str) -> ModelAdapter:
|
||||
__all__ = [
|
||||
"ClaudeAdapter",
|
||||
"DefaultAdapter",
|
||||
"get_adapter",
|
||||
"ModelAdapter",
|
||||
"OpenAIAdapter",
|
||||
"get_adapter",
|
||||
]
|
||||
|
||||
@@ -8,7 +8,6 @@ Provides a high-level API for assembling optimized context for LLM requests.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .adapters import get_adapter
|
||||
from .assembly import ContextPipeline
|
||||
from .budget import BudgetAllocator, TokenBudget, TokenCalculator
|
||||
from .cache import ContextCache
|
||||
|
||||
@@ -10,10 +10,10 @@ from typing import TYPE_CHECKING, Any
|
||||
|
||||
from ..budget import TokenBudget, TokenCalculator
|
||||
from ..scoring.composite import CompositeScorer, ScoredContext
|
||||
from ..types import BaseContext, ContextType
|
||||
from ..types import BaseContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.services.mcp.client_manager import MCPClientManager
|
||||
pass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
||||
from ..types import BaseContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.services.mcp.client_manager import MCPClientManager
|
||||
pass
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
|
||||
@@ -6,15 +6,14 @@ Combines multiple scoring strategies with configurable weights.
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .base import BaseScorer
|
||||
from ..config import ContextSettings, get_context_settings
|
||||
from ..types import BaseContext
|
||||
from .priority import PriorityScorer
|
||||
from .recency import RecencyScorer
|
||||
from .relevance import RelevanceScorer
|
||||
from ..config import ContextSettings, get_context_settings
|
||||
from ..types import BaseContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.services.mcp.client_manager import MCPClientManager
|
||||
|
||||
@@ -6,8 +6,8 @@ Scores context based on assigned priority levels.
|
||||
|
||||
from typing import Any
|
||||
|
||||
from ..types import BaseContext, ContextType
|
||||
from .base import BaseScorer
|
||||
from ..types import BaseContext, ContextPriority, ContextType
|
||||
|
||||
|
||||
class PriorityScorer(BaseScorer):
|
||||
|
||||
@@ -6,11 +6,11 @@ More recent content gets higher scores.
|
||||
"""
|
||||
|
||||
import math
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
from .base import BaseScorer
|
||||
from ..types import BaseContext, ContextType
|
||||
from .base import BaseScorer
|
||||
|
||||
|
||||
class RecencyScorer(BaseScorer):
|
||||
|
||||
@@ -9,8 +9,8 @@ import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from ..types import BaseContext, KnowledgeContext
|
||||
from .base import BaseScorer
|
||||
from ..types import BaseContext, ContextType, KnowledgeContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.services.mcp.client_manager import MCPClientManager
|
||||
|
||||
Reference in New Issue
Block a user