feat(memory): #87 project setup & core architecture

Implements Sub-Issue #87 of Issue #62 (Agent Memory System).

Core infrastructure:
- memory/types.py: Type definitions for all memory types (Working, Episodic,
  Semantic, Procedural) with enums for MemoryType, ScopeLevel, Outcome
- memory/config.py: MemorySettings with MEM_ env prefix, thread-safe singleton
- memory/exceptions.py: Comprehensive exception hierarchy for memory operations
- memory/manager.py: MemoryManager facade with placeholder methods

Directory structure:
- working/: Working memory (Redis/in-memory) - to be implemented in #89
- episodic/: Episodic memory (experiences) - to be implemented in #90
- semantic/: Semantic memory (facts) - to be implemented in #91
- procedural/: Procedural memory (skills) - to be implemented in #92
- scoping/: Scope management - to be implemented in #93
- indexing/: Vector indexing - to be implemented in #94
- consolidation/: Memory consolidation - to be implemented in #95

Tests: 71 unit tests for config, types, and exceptions
Docs: Comprehensive implementation plan at docs/architecture/memory-system-plan.md

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-05 01:27:36 +01:00
parent 4b149b8a52
commit 085a748929
17 changed files with 3242 additions and 0 deletions

View File

@@ -0,0 +1,410 @@
"""
Memory System Configuration.
Provides Pydantic settings for the Agent Memory System,
including storage backends, capacity limits, and consolidation policies.
"""
import threading
from functools import lru_cache
from typing import Any
from pydantic import Field, field_validator, model_validator
from pydantic_settings import BaseSettings
class MemorySettings(BaseSettings):
"""
Configuration for the Agent Memory System.
All settings can be overridden via environment variables
with the MEM_ prefix.
"""
# Working Memory Settings
working_memory_backend: str = Field(
default="redis",
description="Backend for working memory: 'redis' or 'memory'",
)
working_memory_default_ttl_seconds: int = Field(
default=3600,
ge=60,
le=86400,
description="Default TTL for working memory items (1 hour default)",
)
working_memory_max_items_per_session: int = Field(
default=1000,
ge=100,
le=100000,
description="Maximum items per session in working memory",
)
working_memory_max_value_size_bytes: int = Field(
default=1048576, # 1MB
ge=1024,
le=104857600, # 100MB
description="Maximum size of a single value in working memory",
)
working_memory_checkpoint_enabled: bool = Field(
default=True,
description="Enable checkpointing for working memory recovery",
)
# Redis Settings (for working memory)
redis_url: str = Field(
default="redis://localhost:6379/0",
description="Redis connection URL",
)
redis_prefix: str = Field(
default="mem",
description="Redis key prefix for memory items",
)
redis_connection_timeout_seconds: int = Field(
default=5,
ge=1,
le=60,
description="Redis connection timeout",
)
# Episodic Memory Settings
episodic_max_episodes_per_project: int = Field(
default=10000,
ge=100,
le=1000000,
description="Maximum episodes to retain per project",
)
episodic_default_importance: float = Field(
default=0.5,
ge=0.0,
le=1.0,
description="Default importance score for new episodes",
)
episodic_retention_days: int = Field(
default=365,
ge=7,
le=3650,
description="Days to retain episodes before archival",
)
# Semantic Memory Settings
semantic_max_facts_per_project: int = Field(
default=50000,
ge=1000,
le=10000000,
description="Maximum facts to retain per project",
)
semantic_confidence_decay_days: int = Field(
default=90,
ge=7,
le=365,
description="Days until confidence decays by 50%",
)
semantic_min_confidence: float = Field(
default=0.1,
ge=0.0,
le=1.0,
description="Minimum confidence before fact is pruned",
)
# Procedural Memory Settings
procedural_max_procedures_per_project: int = Field(
default=1000,
ge=10,
le=100000,
description="Maximum procedures per project",
)
procedural_min_success_rate: float = Field(
default=0.3,
ge=0.0,
le=1.0,
description="Minimum success rate before procedure is pruned",
)
procedural_min_uses_before_suggest: int = Field(
default=3,
ge=1,
le=100,
description="Minimum uses before procedure is suggested",
)
# Embedding Settings
embedding_model: str = Field(
default="text-embedding-3-small",
description="Model to use for embeddings",
)
embedding_dimensions: int = Field(
default=1536,
ge=256,
le=4096,
description="Embedding vector dimensions",
)
embedding_batch_size: int = Field(
default=100,
ge=1,
le=1000,
description="Batch size for embedding generation",
)
embedding_cache_enabled: bool = Field(
default=True,
description="Enable caching of embeddings",
)
# Retrieval Settings
retrieval_default_limit: int = Field(
default=10,
ge=1,
le=100,
description="Default limit for retrieval queries",
)
retrieval_max_limit: int = Field(
default=100,
ge=10,
le=1000,
description="Maximum limit for retrieval queries",
)
retrieval_min_similarity: float = Field(
default=0.5,
ge=0.0,
le=1.0,
description="Minimum similarity score for retrieval",
)
# Consolidation Settings
consolidation_enabled: bool = Field(
default=True,
description="Enable automatic memory consolidation",
)
consolidation_batch_size: int = Field(
default=100,
ge=10,
le=1000,
description="Batch size for consolidation jobs",
)
consolidation_schedule_cron: str = Field(
default="0 3 * * *",
description="Cron expression for nightly consolidation (3 AM)",
)
consolidation_working_to_episodic_delay_minutes: int = Field(
default=30,
ge=5,
le=1440,
description="Minutes after session end before consolidating to episodic",
)
# Pruning Settings
pruning_enabled: bool = Field(
default=True,
description="Enable automatic memory pruning",
)
pruning_min_age_days: int = Field(
default=7,
ge=1,
le=365,
description="Minimum age before memory can be pruned",
)
pruning_importance_threshold: float = Field(
default=0.2,
ge=0.0,
le=1.0,
description="Importance threshold below which memory can be pruned",
)
# Caching Settings
cache_enabled: bool = Field(
default=True,
description="Enable caching for memory retrieval",
)
cache_ttl_seconds: int = Field(
default=300,
ge=10,
le=3600,
description="Cache TTL for retrieval results",
)
cache_max_items: int = Field(
default=10000,
ge=100,
le=1000000,
description="Maximum items in memory cache",
)
# Performance Settings
max_retrieval_time_ms: int = Field(
default=100,
ge=10,
le=5000,
description="Target maximum retrieval time in milliseconds",
)
parallel_retrieval: bool = Field(
default=True,
description="Enable parallel retrieval from multiple memory types",
)
max_parallel_retrievals: int = Field(
default=4,
ge=1,
le=10,
description="Maximum concurrent retrieval operations",
)
@field_validator("working_memory_backend")
@classmethod
def validate_backend(cls, v: str) -> str:
"""Validate working memory backend."""
valid_backends = {"redis", "memory"}
if v not in valid_backends:
raise ValueError(f"backend must be one of: {valid_backends}")
return v
@field_validator("embedding_model")
@classmethod
def validate_embedding_model(cls, v: str) -> str:
"""Validate embedding model name."""
valid_models = {
"text-embedding-3-small",
"text-embedding-3-large",
"text-embedding-ada-002",
}
if v not in valid_models:
raise ValueError(f"embedding_model must be one of: {valid_models}")
return v
@model_validator(mode="after")
def validate_limits(self) -> "MemorySettings":
"""Validate that limits are consistent."""
if self.retrieval_default_limit > self.retrieval_max_limit:
raise ValueError(
f"retrieval_default_limit ({self.retrieval_default_limit}) "
f"cannot exceed retrieval_max_limit ({self.retrieval_max_limit})"
)
return self
def get_working_memory_config(self) -> dict[str, Any]:
"""Get working memory configuration as a dictionary."""
return {
"backend": self.working_memory_backend,
"default_ttl_seconds": self.working_memory_default_ttl_seconds,
"max_items_per_session": self.working_memory_max_items_per_session,
"max_value_size_bytes": self.working_memory_max_value_size_bytes,
"checkpoint_enabled": self.working_memory_checkpoint_enabled,
}
def get_redis_config(self) -> dict[str, Any]:
"""Get Redis configuration as a dictionary."""
return {
"url": self.redis_url,
"prefix": self.redis_prefix,
"connection_timeout_seconds": self.redis_connection_timeout_seconds,
}
def get_embedding_config(self) -> dict[str, Any]:
"""Get embedding configuration as a dictionary."""
return {
"model": self.embedding_model,
"dimensions": self.embedding_dimensions,
"batch_size": self.embedding_batch_size,
"cache_enabled": self.embedding_cache_enabled,
}
def get_consolidation_config(self) -> dict[str, Any]:
"""Get consolidation configuration as a dictionary."""
return {
"enabled": self.consolidation_enabled,
"batch_size": self.consolidation_batch_size,
"schedule_cron": self.consolidation_schedule_cron,
"working_to_episodic_delay_minutes": (
self.consolidation_working_to_episodic_delay_minutes
),
}
def to_dict(self) -> dict[str, Any]:
"""Convert settings to dictionary for logging/debugging."""
return {
"working_memory": self.get_working_memory_config(),
"redis": self.get_redis_config(),
"episodic": {
"max_episodes_per_project": self.episodic_max_episodes_per_project,
"default_importance": self.episodic_default_importance,
"retention_days": self.episodic_retention_days,
},
"semantic": {
"max_facts_per_project": self.semantic_max_facts_per_project,
"confidence_decay_days": self.semantic_confidence_decay_days,
"min_confidence": self.semantic_min_confidence,
},
"procedural": {
"max_procedures_per_project": self.procedural_max_procedures_per_project,
"min_success_rate": self.procedural_min_success_rate,
"min_uses_before_suggest": self.procedural_min_uses_before_suggest,
},
"embedding": self.get_embedding_config(),
"retrieval": {
"default_limit": self.retrieval_default_limit,
"max_limit": self.retrieval_max_limit,
"min_similarity": self.retrieval_min_similarity,
},
"consolidation": self.get_consolidation_config(),
"pruning": {
"enabled": self.pruning_enabled,
"min_age_days": self.pruning_min_age_days,
"importance_threshold": self.pruning_importance_threshold,
},
"cache": {
"enabled": self.cache_enabled,
"ttl_seconds": self.cache_ttl_seconds,
"max_items": self.cache_max_items,
},
"performance": {
"max_retrieval_time_ms": self.max_retrieval_time_ms,
"parallel_retrieval": self.parallel_retrieval,
"max_parallel_retrievals": self.max_parallel_retrievals,
},
}
model_config = {
"env_prefix": "MEM_",
"env_file": ".env",
"env_file_encoding": "utf-8",
"case_sensitive": False,
"extra": "ignore",
}
# Thread-safe singleton pattern
_settings: MemorySettings | None = None
_settings_lock = threading.Lock()
def get_memory_settings() -> MemorySettings:
"""
Get the global MemorySettings instance.
Thread-safe with double-checked locking pattern.
Returns:
MemorySettings instance
"""
global _settings
if _settings is None:
with _settings_lock:
if _settings is None:
_settings = MemorySettings()
return _settings
def reset_memory_settings() -> None:
"""
Reset the global settings instance.
Primarily used for testing.
"""
global _settings
with _settings_lock:
_settings = None
@lru_cache(maxsize=1)
def get_default_settings() -> MemorySettings:
"""
Get default settings (cached).
Use this for read-only access to defaults.
For mutable access, use get_memory_settings().
"""
return MemorySettings()