feat(backend): Add Syndarix domain models with CRUD operations
- Add Project model with slug, description, autonomy level, and settings - Add AgentType model for agent templates with model config and failover - Add AgentInstance model for running agents with status and memory - Add Issue model with external tracker sync (Gitea/GitHub/GitLab) - Add Sprint model with velocity tracking and lifecycle management - Add comprehensive Pydantic schemas with validation - Add full CRUD operations for all models with filtering/sorting - Add 280+ tests for models, schemas, and CRUD operations Implements #23, #24, #25, #26, #27 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
20
backend/app/crud/syndarix/__init__.py
Normal file
20
backend/app/crud/syndarix/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# app/crud/syndarix/__init__.py
|
||||
"""
|
||||
Syndarix CRUD operations.
|
||||
|
||||
This package contains CRUD operations for all Syndarix domain entities.
|
||||
"""
|
||||
|
||||
from .agent_instance import agent_instance
|
||||
from .agent_type import agent_type
|
||||
from .issue import issue
|
||||
from .project import project
|
||||
from .sprint import sprint
|
||||
|
||||
__all__ = [
|
||||
"agent_instance",
|
||||
"agent_type",
|
||||
"issue",
|
||||
"project",
|
||||
"sprint",
|
||||
]
|
||||
346
backend/app/crud/syndarix/agent_instance.py
Normal file
346
backend/app/crud/syndarix/agent_instance.py
Normal file
@@ -0,0 +1,346 @@
|
||||
# app/crud/syndarix/agent_instance.py
|
||||
"""Async CRUD operations for AgentInstance model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from decimal import Decimal
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select, update
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue
|
||||
from app.models.syndarix.enums import AgentStatus
|
||||
from app.schemas.syndarix import AgentInstanceCreate, AgentInstanceUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDAgentInstance(CRUDBase[AgentInstance, AgentInstanceCreate, AgentInstanceUpdate]):
|
||||
"""Async CRUD operations for AgentInstance model."""
|
||||
|
||||
async def create(
|
||||
self, db: AsyncSession, *, obj_in: AgentInstanceCreate
|
||||
) -> AgentInstance:
|
||||
"""Create a new agent instance with error handling."""
|
||||
try:
|
||||
db_obj = AgentInstance(
|
||||
agent_type_id=obj_in.agent_type_id,
|
||||
project_id=obj_in.project_id,
|
||||
status=obj_in.status,
|
||||
current_task=obj_in.current_task,
|
||||
short_term_memory=obj_in.short_term_memory,
|
||||
long_term_memory_ref=obj_in.long_term_memory_ref,
|
||||
session_id=obj_in.session_id,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating agent instance: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Unexpected error creating agent instance: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get an agent instance with full details including related entities.
|
||||
|
||||
Returns:
|
||||
Dictionary with instance and related entity details
|
||||
"""
|
||||
try:
|
||||
# Get instance with joined relationships
|
||||
result = await db.execute(
|
||||
select(AgentInstance)
|
||||
.options(
|
||||
joinedload(AgentInstance.agent_type),
|
||||
joinedload(AgentInstance.project),
|
||||
)
|
||||
.where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
# Get assigned issues count
|
||||
issues_count_result = await db.execute(
|
||||
select(func.count(Issue.id)).where(
|
||||
Issue.assigned_agent_id == instance_id
|
||||
)
|
||||
)
|
||||
assigned_issues_count = issues_count_result.scalar_one()
|
||||
|
||||
return {
|
||||
"instance": instance,
|
||||
"agent_type_name": instance.agent_type.name if instance.agent_type else None,
|
||||
"agent_type_slug": instance.agent_type.slug if instance.agent_type else None,
|
||||
"project_name": instance.project.name if instance.project else None,
|
||||
"project_slug": instance.project.slug if instance.project else None,
|
||||
"assigned_issues_count": assigned_issues_count,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent instance with details {instance_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: AgentStatus | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[AgentInstance], int]:
|
||||
"""Get agent instances for a specific project."""
|
||||
try:
|
||||
query = select(AgentInstance).where(
|
||||
AgentInstance.project_id == project_id
|
||||
)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(AgentInstance.status == status)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply pagination
|
||||
query = query.order_by(AgentInstance.created_at.desc())
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
instances = list(result.scalars().all())
|
||||
|
||||
return instances, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting instances by project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_agent_type(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
status: AgentStatus | None = None,
|
||||
) -> list[AgentInstance]:
|
||||
"""Get all instances of a specific agent type."""
|
||||
try:
|
||||
query = select(AgentInstance).where(
|
||||
AgentInstance.agent_type_id == agent_type_id
|
||||
)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(AgentInstance.status == status)
|
||||
|
||||
query = query.order_by(AgentInstance.created_at.desc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting instances by agent type {agent_type_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def update_status(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
status: AgentStatus,
|
||||
current_task: str | None = None,
|
||||
) -> AgentInstance | None:
|
||||
"""Update the status of an agent instance."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentInstance).where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
instance.status = status
|
||||
instance.last_activity_at = datetime.now(UTC)
|
||||
if current_task is not None:
|
||||
instance.current_task = current_task
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(instance)
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error updating instance status {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def terminate(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
) -> AgentInstance | None:
|
||||
"""Terminate an agent instance."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentInstance).where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
instance.status = AgentStatus.TERMINATED
|
||||
instance.terminated_at = datetime.now(UTC)
|
||||
instance.current_task = None
|
||||
instance.session_id = None
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(instance)
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error terminating instance {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def record_task_completion(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
tokens_used: int,
|
||||
cost_incurred: Decimal,
|
||||
) -> AgentInstance | None:
|
||||
"""Record a completed task and update metrics."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentInstance).where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
instance.tasks_completed += 1
|
||||
instance.tokens_used += tokens_used
|
||||
instance.cost_incurred += cost_incurred
|
||||
instance.last_activity_at = datetime.now(UTC)
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(instance)
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error recording task completion {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_project_metrics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any]:
|
||||
"""Get aggregated metrics for all agents in a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(
|
||||
func.count(AgentInstance.id).label("total_instances"),
|
||||
func.count(AgentInstance.id)
|
||||
.filter(AgentInstance.status == AgentStatus.WORKING)
|
||||
.label("active_instances"),
|
||||
func.count(AgentInstance.id)
|
||||
.filter(AgentInstance.status == AgentStatus.IDLE)
|
||||
.label("idle_instances"),
|
||||
func.sum(AgentInstance.tasks_completed).label("total_tasks"),
|
||||
func.sum(AgentInstance.tokens_used).label("total_tokens"),
|
||||
func.sum(AgentInstance.cost_incurred).label("total_cost"),
|
||||
).where(AgentInstance.project_id == project_id)
|
||||
)
|
||||
row = result.one()
|
||||
|
||||
return {
|
||||
"total_instances": row.total_instances or 0,
|
||||
"active_instances": row.active_instances or 0,
|
||||
"idle_instances": row.idle_instances or 0,
|
||||
"total_tasks_completed": row.total_tasks or 0,
|
||||
"total_tokens_used": row.total_tokens or 0,
|
||||
"total_cost_incurred": row.total_cost or Decimal("0.0000"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting project metrics {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def bulk_terminate_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> int:
|
||||
"""Terminate all active instances in a project."""
|
||||
try:
|
||||
now = datetime.now(UTC)
|
||||
stmt = (
|
||||
update(AgentInstance)
|
||||
.where(
|
||||
AgentInstance.project_id == project_id,
|
||||
AgentInstance.status != AgentStatus.TERMINATED,
|
||||
)
|
||||
.values(
|
||||
status=AgentStatus.TERMINATED,
|
||||
terminated_at=now,
|
||||
current_task=None,
|
||||
session_id=None,
|
||||
updated_at=now,
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(stmt)
|
||||
await db.commit()
|
||||
|
||||
terminated_count = result.rowcount
|
||||
logger.info(
|
||||
f"Bulk terminated {terminated_count} instances in project {project_id}"
|
||||
)
|
||||
return terminated_count
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error bulk terminating instances for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
agent_instance = CRUDAgentInstance(AgentInstance)
|
||||
275
backend/app/crud/syndarix/agent_type.py
Normal file
275
backend/app/crud/syndarix/agent_type.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# app/crud/syndarix/agent_type.py
|
||||
"""Async CRUD operations for AgentType model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, AgentType
|
||||
from app.schemas.syndarix import AgentTypeCreate, AgentTypeUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDAgentType(CRUDBase[AgentType, AgentTypeCreate, AgentTypeUpdate]):
|
||||
"""Async CRUD operations for AgentType model."""
|
||||
|
||||
async def get_by_slug(self, db: AsyncSession, *, slug: str) -> AgentType | None:
|
||||
"""Get agent type by slug."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentType).where(AgentType.slug == slug)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent type by slug {slug}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(
|
||||
self, db: AsyncSession, *, obj_in: AgentTypeCreate
|
||||
) -> AgentType:
|
||||
"""Create a new agent type with error handling."""
|
||||
try:
|
||||
db_obj = AgentType(
|
||||
name=obj_in.name,
|
||||
slug=obj_in.slug,
|
||||
description=obj_in.description,
|
||||
expertise=obj_in.expertise,
|
||||
personality_prompt=obj_in.personality_prompt,
|
||||
primary_model=obj_in.primary_model,
|
||||
fallback_models=obj_in.fallback_models,
|
||||
model_params=obj_in.model_params,
|
||||
mcp_servers=obj_in.mcp_servers,
|
||||
tool_permissions=obj_in.tool_permissions,
|
||||
is_active=obj_in.is_active,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "slug" in error_msg.lower():
|
||||
logger.warning(f"Duplicate slug attempted: {obj_in.slug}")
|
||||
raise ValueError(
|
||||
f"Agent type with slug '{obj_in.slug}' already exists"
|
||||
)
|
||||
logger.error(f"Integrity error creating agent type: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Unexpected error creating agent type: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_filters(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
is_active: bool | None = None,
|
||||
search: str | None = None,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[AgentType], int]:
|
||||
"""
|
||||
Get multiple agent types with filtering, searching, and sorting.
|
||||
|
||||
Returns:
|
||||
Tuple of (agent types list, total count)
|
||||
"""
|
||||
try:
|
||||
query = select(AgentType)
|
||||
|
||||
# Apply filters
|
||||
if is_active is not None:
|
||||
query = query.where(AgentType.is_active == is_active)
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
AgentType.name.ilike(f"%{search}%"),
|
||||
AgentType.slug.ilike(f"%{search}%"),
|
||||
AgentType.description.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count before pagination
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(AgentType, sort_by, AgentType.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
agent_types = list(result.scalars().all())
|
||||
|
||||
return agent_types, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent types with filters: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_with_instance_count(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a single agent type with its instance count.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent_type and instance_count
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentType).where(AgentType.id == agent_type_id)
|
||||
)
|
||||
agent_type = result.scalar_one_or_none()
|
||||
|
||||
if not agent_type:
|
||||
return None
|
||||
|
||||
# Get instance count
|
||||
count_result = await db.execute(
|
||||
select(func.count(AgentInstance.id)).where(
|
||||
AgentInstance.agent_type_id == agent_type_id
|
||||
)
|
||||
)
|
||||
instance_count = count_result.scalar_one()
|
||||
|
||||
return {
|
||||
"agent_type": agent_type,
|
||||
"instance_count": instance_count,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent type with count {agent_type_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_instance_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
is_active: bool | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""
|
||||
Get agent types with instance counts in optimized queries.
|
||||
|
||||
Returns:
|
||||
Tuple of (list of dicts with agent_type and instance_count, total count)
|
||||
"""
|
||||
try:
|
||||
# Get filtered agent types
|
||||
agent_types, total = await self.get_multi_with_filters(
|
||||
db,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
is_active=is_active,
|
||||
search=search,
|
||||
)
|
||||
|
||||
if not agent_types:
|
||||
return [], 0
|
||||
|
||||
agent_type_ids = [at.id for at in agent_types]
|
||||
|
||||
# Get instance counts in bulk
|
||||
counts_result = await db.execute(
|
||||
select(
|
||||
AgentInstance.agent_type_id,
|
||||
func.count(AgentInstance.id).label("count"),
|
||||
)
|
||||
.where(AgentInstance.agent_type_id.in_(agent_type_ids))
|
||||
.group_by(AgentInstance.agent_type_id)
|
||||
)
|
||||
counts = {row.agent_type_id: row.count for row in counts_result}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"agent_type": agent_type,
|
||||
"instance_count": counts.get(agent_type.id, 0),
|
||||
}
|
||||
for agent_type in agent_types
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent types with counts: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_expertise(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
expertise: str,
|
||||
is_active: bool = True,
|
||||
) -> list[AgentType]:
|
||||
"""Get agent types that have a specific expertise."""
|
||||
try:
|
||||
# Use PostgreSQL JSONB contains operator
|
||||
query = select(AgentType).where(
|
||||
AgentType.expertise.contains([expertise.lower()]),
|
||||
AgentType.is_active == is_active,
|
||||
)
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent types by expertise {expertise}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def deactivate(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
) -> AgentType | None:
|
||||
"""Deactivate an agent type (soft delete)."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentType).where(AgentType.id == agent_type_id)
|
||||
)
|
||||
agent_type = result.scalar_one_or_none()
|
||||
|
||||
if not agent_type:
|
||||
return None
|
||||
|
||||
agent_type.is_active = False
|
||||
await db.commit()
|
||||
await db.refresh(agent_type)
|
||||
return agent_type
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error deactivating agent type {agent_type_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
agent_type = CRUDAgentType(AgentType)
|
||||
437
backend/app/crud/syndarix/issue.py
Normal file
437
backend/app/crud/syndarix/issue.py
Normal file
@@ -0,0 +1,437 @@
|
||||
# app/crud/syndarix/issue.py
|
||||
"""Async CRUD operations for Issue model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue
|
||||
from app.models.syndarix.enums import IssuePriority, IssueStatus, SyncStatus
|
||||
from app.schemas.syndarix import IssueCreate, IssueUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDIssue(CRUDBase[Issue, IssueCreate, IssueUpdate]):
|
||||
"""Async CRUD operations for Issue model."""
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: IssueCreate) -> Issue:
|
||||
"""Create a new issue with error handling."""
|
||||
try:
|
||||
db_obj = Issue(
|
||||
project_id=obj_in.project_id,
|
||||
title=obj_in.title,
|
||||
body=obj_in.body,
|
||||
status=obj_in.status,
|
||||
priority=obj_in.priority,
|
||||
labels=obj_in.labels,
|
||||
assigned_agent_id=obj_in.assigned_agent_id,
|
||||
human_assignee=obj_in.human_assignee,
|
||||
sprint_id=obj_in.sprint_id,
|
||||
story_points=obj_in.story_points,
|
||||
external_tracker=obj_in.external_tracker,
|
||||
external_id=obj_in.external_id,
|
||||
external_url=obj_in.external_url,
|
||||
external_number=obj_in.external_number,
|
||||
sync_status=SyncStatus.SYNCED,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating issue: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating issue: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get an issue with full details including related entity names.
|
||||
|
||||
Returns:
|
||||
Dictionary with issue and related entity details
|
||||
"""
|
||||
try:
|
||||
# Get issue with joined relationships
|
||||
result = await db.execute(
|
||||
select(Issue)
|
||||
.options(
|
||||
joinedload(Issue.project),
|
||||
joinedload(Issue.sprint),
|
||||
joinedload(Issue.assigned_agent).joinedload(AgentInstance.agent_type),
|
||||
)
|
||||
.where(Issue.id == issue_id)
|
||||
)
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
return {
|
||||
"issue": issue,
|
||||
"project_name": issue.project.name if issue.project else None,
|
||||
"project_slug": issue.project.slug if issue.project else None,
|
||||
"sprint_name": issue.sprint.name if issue.sprint else None,
|
||||
"assigned_agent_type_name": (
|
||||
issue.assigned_agent.agent_type.name
|
||||
if issue.assigned_agent and issue.assigned_agent.agent_type
|
||||
else None
|
||||
),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue with details {issue_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: IssueStatus | None = None,
|
||||
priority: IssuePriority | None = None,
|
||||
sprint_id: UUID | None = None,
|
||||
assigned_agent_id: UUID | None = None,
|
||||
labels: list[str] | None = None,
|
||||
search: str | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[Issue], int]:
|
||||
"""Get issues for a specific project with filters."""
|
||||
try:
|
||||
query = select(Issue).where(Issue.project_id == project_id)
|
||||
|
||||
# Apply filters
|
||||
if status is not None:
|
||||
query = query.where(Issue.status == status)
|
||||
|
||||
if priority is not None:
|
||||
query = query.where(Issue.priority == priority)
|
||||
|
||||
if sprint_id is not None:
|
||||
query = query.where(Issue.sprint_id == sprint_id)
|
||||
|
||||
if assigned_agent_id is not None:
|
||||
query = query.where(Issue.assigned_agent_id == assigned_agent_id)
|
||||
|
||||
if labels:
|
||||
# Match any of the provided labels
|
||||
for label in labels:
|
||||
query = query.where(Issue.labels.contains([label.lower()]))
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
Issue.title.ilike(f"%{search}%"),
|
||||
Issue.body.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(Issue, sort_by, Issue.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
issues = list(result.scalars().all())
|
||||
|
||||
return issues, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issues by project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
status: IssueStatus | None = None,
|
||||
) -> list[Issue]:
|
||||
"""Get all issues in a sprint."""
|
||||
try:
|
||||
query = select(Issue).where(Issue.sprint_id == sprint_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Issue.status == status)
|
||||
|
||||
query = query.order_by(Issue.priority.desc(), Issue.created_at.asc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issues by sprint {sprint_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def assign_to_agent(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
agent_id: UUID | None,
|
||||
) -> Issue | None:
|
||||
"""Assign an issue to an agent (or unassign if agent_id is None)."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.assigned_agent_id = agent_id
|
||||
issue.human_assignee = None # Clear human assignee when assigning to agent
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error assigning issue {issue_id} to agent {agent_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def assign_to_human(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
human_assignee: str | None,
|
||||
) -> Issue | None:
|
||||
"""Assign an issue to a human (or unassign if human_assignee is None)."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.human_assignee = human_assignee
|
||||
issue.assigned_agent_id = None # Clear agent when assigning to human
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error assigning issue {issue_id} to human {human_assignee}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def close_issue(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Close an issue by setting status and closed_at timestamp."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.status = IssueStatus.CLOSED
|
||||
issue.closed_at = datetime.now(UTC)
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error closing issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def reopen_issue(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Reopen a closed issue."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.status = IssueStatus.OPEN
|
||||
issue.closed_at = None
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error reopening issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def update_sync_status(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
sync_status: SyncStatus,
|
||||
last_synced_at: datetime | None = None,
|
||||
external_updated_at: datetime | None = None,
|
||||
) -> Issue | None:
|
||||
"""Update the sync status of an issue."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.sync_status = sync_status
|
||||
if last_synced_at:
|
||||
issue.last_synced_at = last_synced_at
|
||||
if external_updated_at:
|
||||
issue.external_updated_at = external_updated_at
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error updating sync status for issue {issue_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_project_stats(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any]:
|
||||
"""Get issue statistics for a project."""
|
||||
try:
|
||||
# Get counts by status
|
||||
status_counts = await db.execute(
|
||||
select(Issue.status, func.count(Issue.id).label("count"))
|
||||
.where(Issue.project_id == project_id)
|
||||
.group_by(Issue.status)
|
||||
)
|
||||
by_status = {row.status.value: row.count for row in status_counts}
|
||||
|
||||
# Get counts by priority
|
||||
priority_counts = await db.execute(
|
||||
select(Issue.priority, func.count(Issue.id).label("count"))
|
||||
.where(Issue.project_id == project_id)
|
||||
.group_by(Issue.priority)
|
||||
)
|
||||
by_priority = {row.priority.value: row.count for row in priority_counts}
|
||||
|
||||
# Get story points
|
||||
points_result = await db.execute(
|
||||
select(
|
||||
func.sum(Issue.story_points).label("total"),
|
||||
func.sum(Issue.story_points)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
).where(Issue.project_id == project_id)
|
||||
)
|
||||
points_row = points_result.one()
|
||||
|
||||
total_issues = sum(by_status.values())
|
||||
|
||||
return {
|
||||
"total": total_issues,
|
||||
"open": by_status.get("open", 0),
|
||||
"in_progress": by_status.get("in_progress", 0),
|
||||
"in_review": by_status.get("in_review", 0),
|
||||
"blocked": by_status.get("blocked", 0),
|
||||
"closed": by_status.get("closed", 0),
|
||||
"by_priority": by_priority,
|
||||
"total_story_points": points_row.total,
|
||||
"completed_story_points": points_row.completed,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue stats for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_external_id(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
external_tracker: str,
|
||||
external_id: str,
|
||||
) -> Issue | None:
|
||||
"""Get an issue by its external tracker ID."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Issue).where(
|
||||
Issue.external_tracker == external_tracker,
|
||||
Issue.external_id == external_id,
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue by external ID {external_tracker}:{external_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_pending_sync(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID | None = None,
|
||||
limit: int = 100,
|
||||
) -> list[Issue]:
|
||||
"""Get issues that need to be synced with external tracker."""
|
||||
try:
|
||||
query = select(Issue).where(
|
||||
Issue.external_tracker.isnot(None),
|
||||
Issue.sync_status.in_([SyncStatus.PENDING, SyncStatus.ERROR]),
|
||||
)
|
||||
|
||||
if project_id:
|
||||
query = query.where(Issue.project_id == project_id)
|
||||
|
||||
query = query.order_by(Issue.updated_at.asc()).limit(limit)
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting pending sync issues: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
issue = CRUDIssue(Issue)
|
||||
309
backend/app/crud/syndarix/project.py
Normal file
309
backend/app/crud/syndarix/project.py
Normal file
@@ -0,0 +1,309 @@
|
||||
# app/crud/syndarix/project.py
|
||||
"""Async CRUD operations for Project model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue, Project, Sprint
|
||||
from app.models.syndarix.enums import ProjectStatus, SprintStatus
|
||||
from app.schemas.syndarix import ProjectCreate, ProjectUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDProject(CRUDBase[Project, ProjectCreate, ProjectUpdate]):
|
||||
"""Async CRUD operations for Project model."""
|
||||
|
||||
async def get_by_slug(self, db: AsyncSession, *, slug: str) -> Project | None:
|
||||
"""Get project by slug."""
|
||||
try:
|
||||
result = await db.execute(select(Project).where(Project.slug == slug))
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project by slug {slug}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: ProjectCreate) -> Project:
|
||||
"""Create a new project with error handling."""
|
||||
try:
|
||||
db_obj = Project(
|
||||
name=obj_in.name,
|
||||
slug=obj_in.slug,
|
||||
description=obj_in.description,
|
||||
autonomy_level=obj_in.autonomy_level,
|
||||
status=obj_in.status,
|
||||
settings=obj_in.settings or {},
|
||||
owner_id=obj_in.owner_id,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "slug" in error_msg.lower():
|
||||
logger.warning(f"Duplicate slug attempted: {obj_in.slug}")
|
||||
raise ValueError(f"Project with slug '{obj_in.slug}' already exists")
|
||||
logger.error(f"Integrity error creating project: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating project: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_multi_with_filters(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
status: ProjectStatus | None = None,
|
||||
owner_id: UUID | None = None,
|
||||
search: str | None = None,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[Project], int]:
|
||||
"""
|
||||
Get multiple projects with filtering, searching, and sorting.
|
||||
|
||||
Returns:
|
||||
Tuple of (projects list, total count)
|
||||
"""
|
||||
try:
|
||||
query = select(Project)
|
||||
|
||||
# Apply filters
|
||||
if status is not None:
|
||||
query = query.where(Project.status == status)
|
||||
|
||||
if owner_id is not None:
|
||||
query = query.where(Project.owner_id == owner_id)
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
Project.name.ilike(f"%{search}%"),
|
||||
Project.slug.ilike(f"%{search}%"),
|
||||
Project.description.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count before pagination
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(Project, sort_by, Project.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
projects = list(result.scalars().all())
|
||||
|
||||
return projects, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting projects with filters: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_with_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a single project with agent and issue counts.
|
||||
|
||||
Returns:
|
||||
Dictionary with project, agent_count, issue_count, active_sprint_name
|
||||
"""
|
||||
try:
|
||||
# Get project
|
||||
result = await db.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
|
||||
if not project:
|
||||
return None
|
||||
|
||||
# Get agent count
|
||||
agent_count_result = await db.execute(
|
||||
select(func.count(AgentInstance.id)).where(
|
||||
AgentInstance.project_id == project_id
|
||||
)
|
||||
)
|
||||
agent_count = agent_count_result.scalar_one()
|
||||
|
||||
# Get issue count
|
||||
issue_count_result = await db.execute(
|
||||
select(func.count(Issue.id)).where(Issue.project_id == project_id)
|
||||
)
|
||||
issue_count = issue_count_result.scalar_one()
|
||||
|
||||
# Get active sprint name
|
||||
active_sprint_result = await db.execute(
|
||||
select(Sprint.name).where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
active_sprint_name = active_sprint_result.scalar_one_or_none()
|
||||
|
||||
return {
|
||||
"project": project,
|
||||
"agent_count": agent_count,
|
||||
"issue_count": issue_count,
|
||||
"active_sprint_name": active_sprint_name,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting project with counts {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
status: ProjectStatus | None = None,
|
||||
owner_id: UUID | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""
|
||||
Get projects with agent/issue counts in optimized queries.
|
||||
|
||||
Returns:
|
||||
Tuple of (list of dicts with project and counts, total count)
|
||||
"""
|
||||
try:
|
||||
# Get filtered projects
|
||||
projects, total = await self.get_multi_with_filters(
|
||||
db,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
status=status,
|
||||
owner_id=owner_id,
|
||||
search=search,
|
||||
)
|
||||
|
||||
if not projects:
|
||||
return [], 0
|
||||
|
||||
project_ids = [p.id for p in projects]
|
||||
|
||||
# Get agent counts in bulk
|
||||
agent_counts_result = await db.execute(
|
||||
select(
|
||||
AgentInstance.project_id,
|
||||
func.count(AgentInstance.id).label("count"),
|
||||
)
|
||||
.where(AgentInstance.project_id.in_(project_ids))
|
||||
.group_by(AgentInstance.project_id)
|
||||
)
|
||||
agent_counts = {row.project_id: row.count for row in agent_counts_result}
|
||||
|
||||
# Get issue counts in bulk
|
||||
issue_counts_result = await db.execute(
|
||||
select(
|
||||
Issue.project_id,
|
||||
func.count(Issue.id).label("count"),
|
||||
)
|
||||
.where(Issue.project_id.in_(project_ids))
|
||||
.group_by(Issue.project_id)
|
||||
)
|
||||
issue_counts = {row.project_id: row.count for row in issue_counts_result}
|
||||
|
||||
# Get active sprint names
|
||||
active_sprints_result = await db.execute(
|
||||
select(Sprint.project_id, Sprint.name).where(
|
||||
Sprint.project_id.in_(project_ids),
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
active_sprints = {
|
||||
row.project_id: row.name for row in active_sprints_result
|
||||
}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"project": project,
|
||||
"agent_count": agent_counts.get(project.id, 0),
|
||||
"issue_count": issue_counts.get(project.id, 0),
|
||||
"active_sprint_name": active_sprints.get(project.id),
|
||||
}
|
||||
for project in projects
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting projects with counts: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_projects_by_owner(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
owner_id: UUID,
|
||||
status: ProjectStatus | None = None,
|
||||
) -> list[Project]:
|
||||
"""Get all projects owned by a specific user."""
|
||||
try:
|
||||
query = select(Project).where(Project.owner_id == owner_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Project.status == status)
|
||||
|
||||
query = query.order_by(Project.created_at.desc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting projects by owner {owner_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def archive_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> Project | None:
|
||||
"""Archive a project by setting status to ARCHIVED."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Project).where(Project.id == project_id)
|
||||
)
|
||||
project = result.scalar_one_or_none()
|
||||
|
||||
if not project:
|
||||
return None
|
||||
|
||||
project.status = ProjectStatus.ARCHIVED
|
||||
await db.commit()
|
||||
await db.refresh(project)
|
||||
return project
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error archiving project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
project = CRUDProject(Project)
|
||||
406
backend/app/crud/syndarix/sprint.py
Normal file
406
backend/app/crud/syndarix/sprint.py
Normal file
@@ -0,0 +1,406 @@
|
||||
# app/crud/syndarix/sprint.py
|
||||
"""Async CRUD operations for Sprint model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import date
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import Issue, Sprint
|
||||
from app.models.syndarix.enums import IssueStatus, SprintStatus
|
||||
from app.schemas.syndarix import SprintCreate, SprintUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDSprint(CRUDBase[Sprint, SprintCreate, SprintUpdate]):
|
||||
"""Async CRUD operations for Sprint model."""
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: SprintCreate) -> Sprint:
|
||||
"""Create a new sprint with error handling."""
|
||||
try:
|
||||
db_obj = Sprint(
|
||||
project_id=obj_in.project_id,
|
||||
name=obj_in.name,
|
||||
number=obj_in.number,
|
||||
goal=obj_in.goal,
|
||||
start_date=obj_in.start_date,
|
||||
end_date=obj_in.end_date,
|
||||
status=obj_in.status,
|
||||
planned_points=obj_in.planned_points,
|
||||
completed_points=obj_in.completed_points,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating sprint: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating sprint: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a sprint with full details including issue counts.
|
||||
|
||||
Returns:
|
||||
Dictionary with sprint and related details
|
||||
"""
|
||||
try:
|
||||
# Get sprint with joined project
|
||||
result = await db.execute(
|
||||
select(Sprint)
|
||||
.options(joinedload(Sprint.project))
|
||||
.where(Sprint.id == sprint_id)
|
||||
)
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
# Get issue counts
|
||||
issue_counts = await db.execute(
|
||||
select(
|
||||
func.count(Issue.id).label("total"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.OPEN)
|
||||
.label("open"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
).where(Issue.sprint_id == sprint_id)
|
||||
)
|
||||
counts = issue_counts.one()
|
||||
|
||||
return {
|
||||
"sprint": sprint,
|
||||
"project_name": sprint.project.name if sprint.project else None,
|
||||
"project_slug": sprint.project.slug if sprint.project else None,
|
||||
"issue_count": counts.total,
|
||||
"open_issues": counts.open,
|
||||
"completed_issues": counts.completed,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprint with details {sprint_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: SprintStatus | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[Sprint], int]:
|
||||
"""Get sprints for a specific project."""
|
||||
try:
|
||||
query = select(Sprint).where(Sprint.project_id == project_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Sprint.status == status)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting (by number descending - newest first)
|
||||
query = query.order_by(Sprint.number.desc())
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
sprints = list(result.scalars().all())
|
||||
|
||||
return sprints, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprints by project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_active_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Get the currently active sprint for a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Sprint).where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting active sprint for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_next_sprint_number(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> int:
|
||||
"""Get the next sprint number for a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(func.max(Sprint.number)).where(Sprint.project_id == project_id)
|
||||
)
|
||||
max_number = result.scalar_one_or_none()
|
||||
return (max_number or 0) + 1
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting next sprint number for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def start_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
start_date: date | None = None,
|
||||
) -> Sprint | None:
|
||||
"""Start a planned sprint."""
|
||||
try:
|
||||
result = await db.execute(select(Sprint).where(Sprint.id == sprint_id))
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status != SprintStatus.PLANNED:
|
||||
raise ValueError(
|
||||
f"Cannot start sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
# Check for existing active sprint in project
|
||||
active_sprint = await self.get_active_sprint(db, project_id=sprint.project_id)
|
||||
if active_sprint:
|
||||
raise ValueError(
|
||||
f"Project already has an active sprint: {active_sprint.name}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.ACTIVE
|
||||
if start_date:
|
||||
sprint.start_date = start_date
|
||||
|
||||
# Calculate planned points from issues
|
||||
points_result = await db.execute(
|
||||
select(func.sum(Issue.story_points)).where(Issue.sprint_id == sprint_id)
|
||||
)
|
||||
sprint.planned_points = points_result.scalar_one_or_none() or 0
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error starting sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def complete_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Complete an active sprint and calculate completed points."""
|
||||
try:
|
||||
result = await db.execute(select(Sprint).where(Sprint.id == sprint_id))
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status != SprintStatus.ACTIVE:
|
||||
raise ValueError(
|
||||
f"Cannot complete sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.COMPLETED
|
||||
|
||||
# Calculate completed points from closed issues
|
||||
points_result = await db.execute(
|
||||
select(func.sum(Issue.story_points)).where(
|
||||
Issue.sprint_id == sprint_id,
|
||||
Issue.status == IssueStatus.CLOSED,
|
||||
)
|
||||
)
|
||||
sprint.completed_points = points_result.scalar_one_or_none() or 0
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error completing sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def cancel_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Cancel a sprint (only PLANNED or ACTIVE sprints can be cancelled)."""
|
||||
try:
|
||||
result = await db.execute(select(Sprint).where(Sprint.id == sprint_id))
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status not in [SprintStatus.PLANNED, SprintStatus.ACTIVE]:
|
||||
raise ValueError(
|
||||
f"Cannot cancel sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.CANCELLED
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error cancelling sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_velocity(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
limit: int = 5,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Get velocity data for completed sprints."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Sprint)
|
||||
.where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.COMPLETED,
|
||||
)
|
||||
.order_by(Sprint.number.desc())
|
||||
.limit(limit)
|
||||
)
|
||||
sprints = list(result.scalars().all())
|
||||
|
||||
velocity_data = []
|
||||
for sprint in reversed(sprints): # Return in chronological order
|
||||
velocity = None
|
||||
if sprint.planned_points and sprint.planned_points > 0:
|
||||
velocity = (sprint.completed_points or 0) / sprint.planned_points
|
||||
velocity_data.append(
|
||||
{
|
||||
"sprint_number": sprint.number,
|
||||
"sprint_name": sprint.name,
|
||||
"planned_points": sprint.planned_points,
|
||||
"completed_points": sprint.completed_points,
|
||||
"velocity": velocity,
|
||||
}
|
||||
)
|
||||
|
||||
return velocity_data
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting velocity for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_sprints_with_issue_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Get sprints with issue counts in optimized queries."""
|
||||
try:
|
||||
# Get sprints
|
||||
sprints, total = await self.get_by_project(
|
||||
db, project_id=project_id, skip=skip, limit=limit
|
||||
)
|
||||
|
||||
if not sprints:
|
||||
return [], 0
|
||||
|
||||
sprint_ids = [s.id for s in sprints]
|
||||
|
||||
# Get issue counts in bulk
|
||||
issue_counts = await db.execute(
|
||||
select(
|
||||
Issue.sprint_id,
|
||||
func.count(Issue.id).label("total"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.OPEN)
|
||||
.label("open"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
)
|
||||
.where(Issue.sprint_id.in_(sprint_ids))
|
||||
.group_by(Issue.sprint_id)
|
||||
)
|
||||
counts_map = {
|
||||
row.sprint_id: {
|
||||
"issue_count": row.total,
|
||||
"open_issues": row.open,
|
||||
"completed_issues": row.completed,
|
||||
}
|
||||
for row in issue_counts
|
||||
}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"sprint": sprint,
|
||||
**counts_map.get(
|
||||
sprint.id, {"issue_count": 0, "open_issues": 0, "completed_issues": 0}
|
||||
),
|
||||
}
|
||||
for sprint in sprints
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprints with counts for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
sprint = CRUDSprint(Sprint)
|
||||
Reference in New Issue
Block a user