Files
fast-next-template/backend/app/models/memory/episode.py
Felipe Cardoso bd988f76b0 fix(memory): address review findings from Issue #88
Fixes based on multi-agent review:

Model Improvements:
- Remove duplicate index ix_procedures_agent_type (already indexed via Column)
- Fix postgresql_where to use text() instead of string literal in Fact model
- Add thread-safety to Procedure.success_rate property (snapshot values)

Data Integrity Constraints:
- Add CheckConstraint for Episode: importance_score 0-1, duration >= 0, tokens >= 0
- Add CheckConstraint for Fact: confidence 0-1
- Add CheckConstraint for Procedure: success_count >= 0, failure_count >= 0

Migration Updates:
- Add check constraints creation in upgrade()
- Add check constraints removal in downgrade()

Note: SQLAlchemy Column default=list is correct (callable factory pattern)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-05 01:54:51 +01:00

140 lines
4.0 KiB
Python

# app/models/memory/episode.py
"""
Episode database model.
Stores experiential memories - records of past task executions
with context, actions, outcomes, and lessons learned.
"""
from sqlalchemy import (
BigInteger,
CheckConstraint,
Column,
DateTime,
Enum,
Float,
ForeignKey,
Index,
String,
Text,
)
from sqlalchemy.dialects.postgresql import (
JSONB,
UUID as PGUUID,
)
from sqlalchemy.orm import relationship
from app.models.base import Base, TimestampMixin, UUIDMixin
from .enums import EpisodeOutcome
# Import pgvector type - will be available after migration enables extension
try:
from pgvector.sqlalchemy import Vector # type: ignore[import-not-found]
except ImportError:
# Fallback for environments without pgvector
Vector = None
class Episode(Base, UUIDMixin, TimestampMixin):
"""
Episodic memory model.
Records experiential memories from agent task execution:
- What task was performed
- What actions were taken
- What was the outcome
- What lessons were learned
"""
__tablename__ = "episodes"
# Foreign keys
project_id = Column(
PGUUID(as_uuid=True),
ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
agent_instance_id = Column(
PGUUID(as_uuid=True),
ForeignKey("agent_instances.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
agent_type_id = Column(
PGUUID(as_uuid=True),
ForeignKey("agent_types.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
# Session reference
session_id = Column(String(255), nullable=False, index=True)
# Task information
task_type = Column(String(100), nullable=False, index=True)
task_description = Column(Text, nullable=False)
# Actions taken (list of action dictionaries)
actions = Column(JSONB, default=list, nullable=False)
# Context summary
context_summary = Column(Text, nullable=False)
# Outcome
outcome: Column[EpisodeOutcome] = Column(
Enum(EpisodeOutcome),
nullable=False,
index=True,
)
outcome_details = Column(Text, nullable=True)
# Metrics
duration_seconds = Column(Float, nullable=False, default=0.0)
tokens_used = Column(BigInteger, nullable=False, default=0)
# Learning
lessons_learned = Column(JSONB, default=list, nullable=False)
importance_score = Column(Float, nullable=False, default=0.5, index=True)
# Vector embedding for semantic search
# Using 1536 dimensions for OpenAI text-embedding-3-small
embedding = Column(Vector(1536) if Vector else Text, nullable=True)
# When the episode occurred
occurred_at = Column(DateTime(timezone=True), nullable=False, index=True)
# Relationships
project = relationship("Project", foreign_keys=[project_id])
agent_instance = relationship("AgentInstance", foreign_keys=[agent_instance_id])
agent_type = relationship("AgentType", foreign_keys=[agent_type_id])
__table_args__ = (
# Primary query patterns
Index("ix_episodes_project_task", "project_id", "task_type"),
Index("ix_episodes_project_outcome", "project_id", "outcome"),
Index("ix_episodes_agent_task", "agent_instance_id", "task_type"),
Index("ix_episodes_project_time", "project_id", "occurred_at"),
# For importance-based pruning
Index("ix_episodes_importance_time", "importance_score", "occurred_at"),
# Data integrity constraints
CheckConstraint(
"importance_score >= 0.0 AND importance_score <= 1.0",
name="ck_episodes_importance_range",
),
CheckConstraint(
"duration_seconds >= 0.0",
name="ck_episodes_duration_positive",
),
CheckConstraint(
"tokens_used >= 0",
name="ck_episodes_tokens_positive",
),
)
def __repr__(self) -> str:
return f"<Episode {self.id} task={self.task_type} outcome={self.outcome.value}>"