feat(backend): Add Syndarix domain models with CRUD operations
- Add Project model with slug, description, autonomy level, and settings - Add AgentType model for agent templates with model config and failover - Add AgentInstance model for running agents with status and memory - Add Issue model with external tracker sync (Gitea/GitHub/GitLab) - Add Sprint model with velocity tracking and lifecycle management - Add comprehensive Pydantic schemas with validation - Add full CRUD operations for all models with filtering/sorting - Add 280+ tests for models, schemas, and CRUD operations Implements #23, #24, #25, #26, #27 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2
backend/tests/schemas/syndarix/__init__.py
Normal file
2
backend/tests/schemas/syndarix/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# tests/schemas/syndarix/__init__.py
|
||||
"""Syndarix schema validation tests."""
|
||||
68
backend/tests/schemas/syndarix/conftest.py
Normal file
68
backend/tests/schemas/syndarix/conftest.py
Normal file
@@ -0,0 +1,68 @@
|
||||
# tests/schemas/syndarix/conftest.py
|
||||
"""
|
||||
Shared fixtures for Syndarix schema tests.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from datetime import date, timedelta
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_uuid():
|
||||
"""Return a valid UUID for testing."""
|
||||
return uuid.uuid4()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_project_data():
|
||||
"""Return valid project data for schema testing."""
|
||||
return {
|
||||
"name": "Test Project",
|
||||
"slug": "test-project",
|
||||
"description": "A test project",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_agent_type_data():
|
||||
"""Return valid agent type data for schema testing."""
|
||||
return {
|
||||
"name": "Backend Engineer",
|
||||
"slug": "backend-engineer",
|
||||
"personality_prompt": "You are an expert backend engineer.",
|
||||
"primary_model": "claude-opus-4-5-20251101",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_sprint_data(valid_uuid):
|
||||
"""Return valid sprint data for schema testing."""
|
||||
today = date.today()
|
||||
return {
|
||||
"project_id": valid_uuid,
|
||||
"name": "Sprint 1",
|
||||
"number": 1,
|
||||
"start_date": today,
|
||||
"end_date": today + timedelta(days=14),
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_issue_data(valid_uuid):
|
||||
"""Return valid issue data for schema testing."""
|
||||
return {
|
||||
"project_id": valid_uuid,
|
||||
"title": "Test Issue",
|
||||
"body": "Issue description",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_agent_instance_data(valid_uuid):
|
||||
"""Return valid agent instance data for schema testing."""
|
||||
return {
|
||||
"agent_type_id": valid_uuid,
|
||||
"project_id": valid_uuid,
|
||||
}
|
||||
244
backend/tests/schemas/syndarix/test_agent_instance_schemas.py
Normal file
244
backend/tests/schemas/syndarix/test_agent_instance_schemas.py
Normal file
@@ -0,0 +1,244 @@
|
||||
# tests/schemas/syndarix/test_agent_instance_schemas.py
|
||||
"""
|
||||
Tests for AgentInstance schema validation.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from decimal import Decimal
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.schemas.syndarix import (
|
||||
AgentInstanceCreate,
|
||||
AgentInstanceUpdate,
|
||||
AgentStatus,
|
||||
)
|
||||
|
||||
|
||||
class TestAgentInstanceCreateValidation:
|
||||
"""Tests for AgentInstanceCreate schema validation."""
|
||||
|
||||
def test_valid_agent_instance_create(self, valid_agent_instance_data):
|
||||
"""Test creating agent instance with valid data."""
|
||||
instance = AgentInstanceCreate(**valid_agent_instance_data)
|
||||
|
||||
assert instance.agent_type_id is not None
|
||||
assert instance.project_id is not None
|
||||
|
||||
def test_agent_instance_create_defaults(self, valid_agent_instance_data):
|
||||
"""Test that defaults are applied correctly."""
|
||||
instance = AgentInstanceCreate(**valid_agent_instance_data)
|
||||
|
||||
assert instance.status == AgentStatus.IDLE
|
||||
assert instance.current_task is None
|
||||
assert instance.short_term_memory == {}
|
||||
assert instance.long_term_memory_ref is None
|
||||
assert instance.session_id is None
|
||||
|
||||
def test_agent_instance_create_with_all_fields(self, valid_uuid):
|
||||
"""Test creating agent instance with all optional fields."""
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
status=AgentStatus.WORKING,
|
||||
current_task="Processing feature request",
|
||||
short_term_memory={"context": "working"},
|
||||
long_term_memory_ref="project-123/agent-456",
|
||||
session_id="session-abc",
|
||||
)
|
||||
|
||||
assert instance.status == AgentStatus.WORKING
|
||||
assert instance.current_task == "Processing feature request"
|
||||
assert instance.short_term_memory == {"context": "working"}
|
||||
assert instance.long_term_memory_ref == "project-123/agent-456"
|
||||
assert instance.session_id == "session-abc"
|
||||
|
||||
def test_agent_instance_create_agent_type_id_required(self, valid_uuid):
|
||||
"""Test that agent_type_id is required."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceCreate(
|
||||
project_id=valid_uuid,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("agent_type_id" in str(e).lower() for e in errors)
|
||||
|
||||
def test_agent_instance_create_project_id_required(self, valid_uuid):
|
||||
"""Test that project_id is required."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("project_id" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestAgentInstanceUpdateValidation:
|
||||
"""Tests for AgentInstanceUpdate schema validation."""
|
||||
|
||||
def test_agent_instance_update_partial(self):
|
||||
"""Test updating only some fields."""
|
||||
update = AgentInstanceUpdate(
|
||||
status=AgentStatus.WORKING,
|
||||
)
|
||||
|
||||
assert update.status == AgentStatus.WORKING
|
||||
assert update.current_task is None
|
||||
assert update.short_term_memory is None
|
||||
|
||||
def test_agent_instance_update_all_fields(self):
|
||||
"""Test updating all fields."""
|
||||
from datetime import UTC, datetime
|
||||
|
||||
now = datetime.now(UTC)
|
||||
update = AgentInstanceUpdate(
|
||||
status=AgentStatus.WORKING,
|
||||
current_task="New task",
|
||||
short_term_memory={"new": "context"},
|
||||
long_term_memory_ref="new-ref",
|
||||
session_id="new-session",
|
||||
last_activity_at=now,
|
||||
tasks_completed=5,
|
||||
tokens_used=10000,
|
||||
cost_incurred=Decimal("1.5000"),
|
||||
)
|
||||
|
||||
assert update.status == AgentStatus.WORKING
|
||||
assert update.current_task == "New task"
|
||||
assert update.tasks_completed == 5
|
||||
assert update.tokens_used == 10000
|
||||
assert update.cost_incurred == Decimal("1.5000")
|
||||
|
||||
def test_agent_instance_update_tasks_completed_negative_fails(self):
|
||||
"""Test that negative tasks_completed raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceUpdate(tasks_completed=-1)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("tasks_completed" in str(e).lower() for e in errors)
|
||||
|
||||
def test_agent_instance_update_tokens_used_negative_fails(self):
|
||||
"""Test that negative tokens_used raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceUpdate(tokens_used=-1)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("tokens_used" in str(e).lower() for e in errors)
|
||||
|
||||
def test_agent_instance_update_cost_incurred_negative_fails(self):
|
||||
"""Test that negative cost_incurred raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceUpdate(cost_incurred=Decimal("-0.01"))
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("cost_incurred" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestAgentStatusEnum:
|
||||
"""Tests for AgentStatus enum validation."""
|
||||
|
||||
def test_valid_agent_statuses(self, valid_uuid):
|
||||
"""Test all valid agent statuses."""
|
||||
for status in AgentStatus:
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
status=status,
|
||||
)
|
||||
assert instance.status == status
|
||||
|
||||
def test_invalid_agent_status(self, valid_uuid):
|
||||
"""Test that invalid agent status raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
status="invalid", # type: ignore
|
||||
)
|
||||
|
||||
|
||||
class TestAgentInstanceShortTermMemory:
|
||||
"""Tests for AgentInstance short_term_memory validation."""
|
||||
|
||||
def test_short_term_memory_empty_dict(self, valid_uuid):
|
||||
"""Test that empty short_term_memory is valid."""
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
short_term_memory={},
|
||||
)
|
||||
assert instance.short_term_memory == {}
|
||||
|
||||
def test_short_term_memory_complex(self, valid_uuid):
|
||||
"""Test complex short_term_memory structure."""
|
||||
memory = {
|
||||
"conversation_history": [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
],
|
||||
"recent_files": ["file1.py", "file2.py"],
|
||||
"decisions": {"key": "value"},
|
||||
"context_tokens": 1024,
|
||||
}
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
short_term_memory=memory,
|
||||
)
|
||||
assert instance.short_term_memory == memory
|
||||
|
||||
|
||||
class TestAgentInstanceStringFields:
|
||||
"""Tests for AgentInstance string field validation."""
|
||||
|
||||
def test_long_term_memory_ref_max_length(self, valid_uuid):
|
||||
"""Test long_term_memory_ref max length."""
|
||||
long_ref = "a" * 500 # Max length is 500
|
||||
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
long_term_memory_ref=long_ref,
|
||||
)
|
||||
assert instance.long_term_memory_ref == long_ref
|
||||
|
||||
def test_long_term_memory_ref_too_long(self, valid_uuid):
|
||||
"""Test that too long long_term_memory_ref raises ValidationError."""
|
||||
too_long = "a" * 501
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
long_term_memory_ref=too_long,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("long_term_memory_ref" in str(e).lower() for e in errors)
|
||||
|
||||
def test_session_id_max_length(self, valid_uuid):
|
||||
"""Test session_id max length."""
|
||||
long_session = "a" * 255 # Max length is 255
|
||||
|
||||
instance = AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
session_id=long_session,
|
||||
)
|
||||
assert instance.session_id == long_session
|
||||
|
||||
def test_session_id_too_long(self, valid_uuid):
|
||||
"""Test that too long session_id raises ValidationError."""
|
||||
too_long = "a" * 256
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentInstanceCreate(
|
||||
agent_type_id=valid_uuid,
|
||||
project_id=valid_uuid,
|
||||
session_id=too_long,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("session_id" in str(e).lower() for e in errors)
|
||||
318
backend/tests/schemas/syndarix/test_agent_type_schemas.py
Normal file
318
backend/tests/schemas/syndarix/test_agent_type_schemas.py
Normal file
@@ -0,0 +1,318 @@
|
||||
# tests/schemas/syndarix/test_agent_type_schemas.py
|
||||
"""
|
||||
Tests for AgentType schema validation.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.schemas.syndarix import (
|
||||
AgentTypeCreate,
|
||||
AgentTypeUpdate,
|
||||
)
|
||||
|
||||
|
||||
class TestAgentTypeCreateValidation:
|
||||
"""Tests for AgentTypeCreate schema validation."""
|
||||
|
||||
def test_valid_agent_type_create(self, valid_agent_type_data):
|
||||
"""Test creating agent type with valid data."""
|
||||
agent_type = AgentTypeCreate(**valid_agent_type_data)
|
||||
|
||||
assert agent_type.name == "Backend Engineer"
|
||||
assert agent_type.slug == "backend-engineer"
|
||||
assert agent_type.personality_prompt == "You are an expert backend engineer."
|
||||
assert agent_type.primary_model == "claude-opus-4-5-20251101"
|
||||
|
||||
def test_agent_type_create_defaults(self, valid_agent_type_data):
|
||||
"""Test that defaults are applied correctly."""
|
||||
agent_type = AgentTypeCreate(**valid_agent_type_data)
|
||||
|
||||
assert agent_type.expertise == []
|
||||
assert agent_type.fallback_models == []
|
||||
assert agent_type.model_params == {}
|
||||
assert agent_type.mcp_servers == []
|
||||
assert agent_type.tool_permissions == {}
|
||||
assert agent_type.is_active is True
|
||||
|
||||
def test_agent_type_create_with_all_fields(self, valid_agent_type_data):
|
||||
"""Test creating agent type with all optional fields."""
|
||||
agent_type = AgentTypeCreate(
|
||||
**valid_agent_type_data,
|
||||
description="Detailed description",
|
||||
expertise=["python", "fastapi"],
|
||||
fallback_models=["claude-sonnet-4-20250514"],
|
||||
model_params={"temperature": 0.7},
|
||||
mcp_servers=["gitea", "slack"],
|
||||
tool_permissions={"allowed": ["*"]},
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
assert agent_type.description == "Detailed description"
|
||||
assert agent_type.expertise == ["python", "fastapi"]
|
||||
assert agent_type.fallback_models == ["claude-sonnet-4-20250514"]
|
||||
|
||||
def test_agent_type_create_name_empty_fails(self):
|
||||
"""Test that empty name raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentTypeCreate(
|
||||
name="",
|
||||
slug="valid-slug",
|
||||
personality_prompt="Test prompt",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_agent_type_create_name_stripped(self):
|
||||
"""Test that name is stripped of whitespace."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name=" Padded Name ",
|
||||
slug="padded-slug",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
|
||||
assert agent_type.name == "Padded Name"
|
||||
|
||||
def test_agent_type_create_personality_prompt_required(self):
|
||||
"""Test that personality_prompt is required."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("personality_prompt" in str(e).lower() for e in errors)
|
||||
|
||||
def test_agent_type_create_primary_model_required(self):
|
||||
"""Test that primary_model is required."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test prompt",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("primary_model" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestAgentTypeSlugValidation:
|
||||
"""Tests for AgentType slug validation."""
|
||||
|
||||
def test_valid_slugs(self):
|
||||
"""Test various valid slug formats."""
|
||||
valid_slugs = [
|
||||
"simple",
|
||||
"with-hyphens",
|
||||
"has123numbers",
|
||||
]
|
||||
|
||||
for slug in valid_slugs:
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug=slug,
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
assert agent_type.slug == slug
|
||||
|
||||
def test_invalid_slug_uppercase(self):
|
||||
"""Test that uppercase letters in slug raise ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="Invalid-Uppercase",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
|
||||
def test_invalid_slug_special_chars(self):
|
||||
"""Test that special characters raise ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="has_underscore",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
)
|
||||
|
||||
|
||||
class TestAgentTypeExpertiseValidation:
|
||||
"""Tests for AgentType expertise validation."""
|
||||
|
||||
def test_expertise_normalized_lowercase(self):
|
||||
"""Test that expertise is normalized to lowercase."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
expertise=["Python", "FastAPI", "PostgreSQL"],
|
||||
)
|
||||
|
||||
assert agent_type.expertise == ["python", "fastapi", "postgresql"]
|
||||
|
||||
def test_expertise_stripped(self):
|
||||
"""Test that expertise items are stripped."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
expertise=[" python ", " fastapi "],
|
||||
)
|
||||
|
||||
assert agent_type.expertise == ["python", "fastapi"]
|
||||
|
||||
def test_expertise_empty_strings_removed(self):
|
||||
"""Test that empty expertise strings are removed."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
expertise=["python", "", " ", "fastapi"],
|
||||
)
|
||||
|
||||
assert agent_type.expertise == ["python", "fastapi"]
|
||||
|
||||
|
||||
class TestAgentTypeMcpServersValidation:
|
||||
"""Tests for AgentType MCP servers validation."""
|
||||
|
||||
def test_mcp_servers_stripped(self):
|
||||
"""Test that MCP server names are stripped."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
mcp_servers=[" gitea ", " slack "],
|
||||
)
|
||||
|
||||
assert agent_type.mcp_servers == ["gitea", "slack"]
|
||||
|
||||
def test_mcp_servers_empty_strings_removed(self):
|
||||
"""Test that empty MCP server strings are removed."""
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
mcp_servers=["gitea", "", " ", "slack"],
|
||||
)
|
||||
|
||||
assert agent_type.mcp_servers == ["gitea", "slack"]
|
||||
|
||||
|
||||
class TestAgentTypeUpdateValidation:
|
||||
"""Tests for AgentTypeUpdate schema validation."""
|
||||
|
||||
def test_agent_type_update_partial(self):
|
||||
"""Test updating only some fields."""
|
||||
update = AgentTypeUpdate(
|
||||
name="Updated Name",
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.slug is None
|
||||
assert update.description is None
|
||||
assert update.expertise is None
|
||||
|
||||
def test_agent_type_update_all_fields(self):
|
||||
"""Test updating all fields."""
|
||||
update = AgentTypeUpdate(
|
||||
name="Updated Name",
|
||||
slug="updated-slug",
|
||||
description="Updated description",
|
||||
expertise=["new-skill"],
|
||||
personality_prompt="Updated prompt",
|
||||
primary_model="new-model",
|
||||
fallback_models=["fallback-1"],
|
||||
model_params={"temp": 0.5},
|
||||
mcp_servers=["server-1"],
|
||||
tool_permissions={"key": "value"},
|
||||
is_active=False,
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.slug == "updated-slug"
|
||||
assert update.is_active is False
|
||||
|
||||
def test_agent_type_update_empty_name_fails(self):
|
||||
"""Test that empty name in update raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
AgentTypeUpdate(name="")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_agent_type_update_slug_validation(self):
|
||||
"""Test that slug validation applies to updates."""
|
||||
with pytest.raises(ValidationError):
|
||||
AgentTypeUpdate(slug="Invalid-Slug")
|
||||
|
||||
def test_agent_type_update_expertise_normalized(self):
|
||||
"""Test that expertise is normalized in updates."""
|
||||
update = AgentTypeUpdate(
|
||||
expertise=["Python", "FastAPI"],
|
||||
)
|
||||
|
||||
assert update.expertise == ["python", "fastapi"]
|
||||
|
||||
|
||||
class TestAgentTypeJsonFields:
|
||||
"""Tests for AgentType JSON field validation."""
|
||||
|
||||
def test_model_params_complex(self):
|
||||
"""Test complex model_params structure."""
|
||||
params = {
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 4096,
|
||||
"top_p": 0.9,
|
||||
"stop_sequences": ["###"],
|
||||
}
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
model_params=params,
|
||||
)
|
||||
|
||||
assert agent_type.model_params == params
|
||||
|
||||
def test_tool_permissions_complex(self):
|
||||
"""Test complex tool_permissions structure."""
|
||||
permissions = {
|
||||
"allowed": ["file:read", "git:commit"],
|
||||
"denied": ["file:delete"],
|
||||
"require_approval": ["git:push"],
|
||||
}
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
tool_permissions=permissions,
|
||||
)
|
||||
|
||||
assert agent_type.tool_permissions == permissions
|
||||
|
||||
def test_fallback_models_list(self):
|
||||
"""Test fallback_models as a list."""
|
||||
models = ["claude-sonnet-4-20250514", "gpt-4o", "mistral-large"]
|
||||
agent_type = AgentTypeCreate(
|
||||
name="Test Agent",
|
||||
slug="test-agent",
|
||||
personality_prompt="Test",
|
||||
primary_model="claude-opus-4-5-20251101",
|
||||
fallback_models=models,
|
||||
)
|
||||
|
||||
assert agent_type.fallback_models == models
|
||||
342
backend/tests/schemas/syndarix/test_issue_schemas.py
Normal file
342
backend/tests/schemas/syndarix/test_issue_schemas.py
Normal file
@@ -0,0 +1,342 @@
|
||||
# tests/schemas/syndarix/test_issue_schemas.py
|
||||
"""
|
||||
Tests for Issue schema validation.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.schemas.syndarix import (
|
||||
IssueAssign,
|
||||
IssueCreate,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
IssueUpdate,
|
||||
SyncStatus,
|
||||
)
|
||||
|
||||
|
||||
class TestIssueCreateValidation:
|
||||
"""Tests for IssueCreate schema validation."""
|
||||
|
||||
def test_valid_issue_create(self, valid_issue_data):
|
||||
"""Test creating issue with valid data."""
|
||||
issue = IssueCreate(**valid_issue_data)
|
||||
|
||||
assert issue.title == "Test Issue"
|
||||
assert issue.body == "Issue description"
|
||||
|
||||
def test_issue_create_defaults(self, valid_issue_data):
|
||||
"""Test that defaults are applied correctly."""
|
||||
issue = IssueCreate(**valid_issue_data)
|
||||
|
||||
assert issue.status == IssueStatus.OPEN
|
||||
assert issue.priority == IssuePriority.MEDIUM
|
||||
assert issue.labels == []
|
||||
assert issue.story_points is None
|
||||
assert issue.assigned_agent_id is None
|
||||
assert issue.human_assignee is None
|
||||
assert issue.sprint_id is None
|
||||
|
||||
def test_issue_create_with_all_fields(self, valid_uuid):
|
||||
"""Test creating issue with all optional fields."""
|
||||
agent_id = uuid.uuid4()
|
||||
sprint_id = uuid.uuid4()
|
||||
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Full Issue",
|
||||
body="Detailed body",
|
||||
status=IssueStatus.IN_PROGRESS,
|
||||
priority=IssuePriority.HIGH,
|
||||
labels=["bug", "security"],
|
||||
story_points=5,
|
||||
assigned_agent_id=agent_id,
|
||||
sprint_id=sprint_id,
|
||||
external_tracker="gitea",
|
||||
external_id="gitea-123",
|
||||
external_url="https://gitea.example.com/issues/123",
|
||||
external_number=123,
|
||||
)
|
||||
|
||||
assert issue.status == IssueStatus.IN_PROGRESS
|
||||
assert issue.priority == IssuePriority.HIGH
|
||||
assert issue.labels == ["bug", "security"]
|
||||
assert issue.story_points == 5
|
||||
assert issue.external_tracker == "gitea"
|
||||
|
||||
def test_issue_create_title_empty_fails(self, valid_uuid):
|
||||
"""Test that empty title raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("title" in str(e) for e in errors)
|
||||
|
||||
def test_issue_create_title_whitespace_only_fails(self, valid_uuid):
|
||||
"""Test that whitespace-only title raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title=" ",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("title" in str(e) for e in errors)
|
||||
|
||||
def test_issue_create_title_stripped(self, valid_uuid):
|
||||
"""Test that title is stripped."""
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title=" Padded Title ",
|
||||
)
|
||||
|
||||
assert issue.title == "Padded Title"
|
||||
|
||||
def test_issue_create_project_id_required(self):
|
||||
"""Test that project_id is required."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueCreate(title="No Project Issue")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("project_id" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestIssueLabelsValidation:
|
||||
"""Tests for Issue labels validation."""
|
||||
|
||||
def test_labels_normalized_lowercase(self, valid_uuid):
|
||||
"""Test that labels are normalized to lowercase."""
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
labels=["Bug", "SECURITY", "FrontEnd"],
|
||||
)
|
||||
|
||||
assert issue.labels == ["bug", "security", "frontend"]
|
||||
|
||||
def test_labels_stripped(self, valid_uuid):
|
||||
"""Test that labels are stripped."""
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
labels=[" bug ", " security "],
|
||||
)
|
||||
|
||||
assert issue.labels == ["bug", "security"]
|
||||
|
||||
def test_labels_empty_strings_removed(self, valid_uuid):
|
||||
"""Test that empty label strings are removed."""
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
labels=["bug", "", " ", "security"],
|
||||
)
|
||||
|
||||
assert issue.labels == ["bug", "security"]
|
||||
|
||||
|
||||
class TestIssueStoryPointsValidation:
|
||||
"""Tests for Issue story_points validation."""
|
||||
|
||||
def test_story_points_valid_range(self, valid_uuid):
|
||||
"""Test valid story_points values."""
|
||||
for points in [0, 1, 5, 13, 21, 100]:
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
story_points=points,
|
||||
)
|
||||
assert issue.story_points == points
|
||||
|
||||
def test_story_points_negative_fails(self, valid_uuid):
|
||||
"""Test that negative story_points raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
story_points=-1,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("story_points" in str(e).lower() for e in errors)
|
||||
|
||||
def test_story_points_over_100_fails(self, valid_uuid):
|
||||
"""Test that story_points > 100 raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
story_points=101,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("story_points" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestIssueExternalTrackerValidation:
|
||||
"""Tests for Issue external tracker validation."""
|
||||
|
||||
def test_valid_external_trackers(self, valid_uuid):
|
||||
"""Test valid external tracker values."""
|
||||
for tracker in ["gitea", "github", "gitlab"]:
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
external_tracker=tracker,
|
||||
external_id="ext-123",
|
||||
)
|
||||
assert issue.external_tracker == tracker
|
||||
|
||||
def test_invalid_external_tracker(self, valid_uuid):
|
||||
"""Test that invalid external tracker raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
external_tracker="invalid", # type: ignore
|
||||
external_id="ext-123",
|
||||
)
|
||||
|
||||
|
||||
class TestIssueUpdateValidation:
|
||||
"""Tests for IssueUpdate schema validation."""
|
||||
|
||||
def test_issue_update_partial(self):
|
||||
"""Test updating only some fields."""
|
||||
update = IssueUpdate(
|
||||
title="Updated Title",
|
||||
)
|
||||
|
||||
assert update.title == "Updated Title"
|
||||
assert update.body is None
|
||||
assert update.status is None
|
||||
|
||||
def test_issue_update_all_fields(self):
|
||||
"""Test updating all fields."""
|
||||
agent_id = uuid.uuid4()
|
||||
sprint_id = uuid.uuid4()
|
||||
|
||||
update = IssueUpdate(
|
||||
title="Updated Title",
|
||||
body="Updated body",
|
||||
status=IssueStatus.CLOSED,
|
||||
priority=IssuePriority.CRITICAL,
|
||||
labels=["updated"],
|
||||
assigned_agent_id=agent_id,
|
||||
human_assignee=None,
|
||||
sprint_id=sprint_id,
|
||||
story_points=8,
|
||||
sync_status=SyncStatus.PENDING,
|
||||
)
|
||||
|
||||
assert update.title == "Updated Title"
|
||||
assert update.status == IssueStatus.CLOSED
|
||||
assert update.priority == IssuePriority.CRITICAL
|
||||
|
||||
def test_issue_update_empty_title_fails(self):
|
||||
"""Test that empty title in update raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueUpdate(title="")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("title" in str(e) for e in errors)
|
||||
|
||||
def test_issue_update_labels_normalized(self):
|
||||
"""Test that labels are normalized in updates."""
|
||||
update = IssueUpdate(
|
||||
labels=["Bug", "SECURITY"],
|
||||
)
|
||||
|
||||
assert update.labels == ["bug", "security"]
|
||||
|
||||
|
||||
class TestIssueAssignValidation:
|
||||
"""Tests for IssueAssign schema validation."""
|
||||
|
||||
def test_assign_to_agent(self):
|
||||
"""Test assigning to an agent."""
|
||||
agent_id = uuid.uuid4()
|
||||
assign = IssueAssign(assigned_agent_id=agent_id)
|
||||
|
||||
assert assign.assigned_agent_id == agent_id
|
||||
assert assign.human_assignee is None
|
||||
|
||||
def test_assign_to_human(self):
|
||||
"""Test assigning to a human."""
|
||||
assign = IssueAssign(human_assignee="developer@example.com")
|
||||
|
||||
assert assign.human_assignee == "developer@example.com"
|
||||
assert assign.assigned_agent_id is None
|
||||
|
||||
def test_unassign(self):
|
||||
"""Test unassigning (both None)."""
|
||||
assign = IssueAssign()
|
||||
|
||||
assert assign.assigned_agent_id is None
|
||||
assert assign.human_assignee is None
|
||||
|
||||
def test_assign_both_fails(self):
|
||||
"""Test that assigning to both agent and human raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
IssueAssign(
|
||||
assigned_agent_id=uuid.uuid4(),
|
||||
human_assignee="developer@example.com",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
# Check for the validation error message
|
||||
assert len(errors) > 0
|
||||
|
||||
|
||||
class TestIssueEnums:
|
||||
"""Tests for Issue enum validation."""
|
||||
|
||||
def test_valid_issue_statuses(self, valid_uuid):
|
||||
"""Test all valid issue statuses."""
|
||||
for status in IssueStatus:
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title=f"Issue {status.value}",
|
||||
status=status,
|
||||
)
|
||||
assert issue.status == status
|
||||
|
||||
def test_invalid_issue_status(self, valid_uuid):
|
||||
"""Test that invalid issue status raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
status="invalid", # type: ignore
|
||||
)
|
||||
|
||||
def test_valid_issue_priorities(self, valid_uuid):
|
||||
"""Test all valid issue priorities."""
|
||||
for priority in IssuePriority:
|
||||
issue = IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title=f"Issue {priority.value}",
|
||||
priority=priority,
|
||||
)
|
||||
assert issue.priority == priority
|
||||
|
||||
def test_invalid_issue_priority(self, valid_uuid):
|
||||
"""Test that invalid issue priority raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
IssueCreate(
|
||||
project_id=valid_uuid,
|
||||
title="Test Issue",
|
||||
priority="invalid", # type: ignore
|
||||
)
|
||||
|
||||
def test_valid_sync_statuses(self):
|
||||
"""Test all valid sync statuses in update."""
|
||||
for status in SyncStatus:
|
||||
update = IssueUpdate(sync_status=status)
|
||||
assert update.sync_status == status
|
||||
300
backend/tests/schemas/syndarix/test_project_schemas.py
Normal file
300
backend/tests/schemas/syndarix/test_project_schemas.py
Normal file
@@ -0,0 +1,300 @@
|
||||
# tests/schemas/syndarix/test_project_schemas.py
|
||||
"""
|
||||
Tests for Project schema validation.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.schemas.syndarix import (
|
||||
AutonomyLevel,
|
||||
ProjectCreate,
|
||||
ProjectStatus,
|
||||
ProjectUpdate,
|
||||
)
|
||||
|
||||
|
||||
class TestProjectCreateValidation:
|
||||
"""Tests for ProjectCreate schema validation."""
|
||||
|
||||
def test_valid_project_create(self, valid_project_data):
|
||||
"""Test creating project with valid data."""
|
||||
project = ProjectCreate(**valid_project_data)
|
||||
|
||||
assert project.name == "Test Project"
|
||||
assert project.slug == "test-project"
|
||||
assert project.description == "A test project"
|
||||
|
||||
def test_project_create_defaults(self):
|
||||
"""Test that defaults are applied correctly."""
|
||||
project = ProjectCreate(
|
||||
name="Minimal Project",
|
||||
slug="minimal-project",
|
||||
)
|
||||
|
||||
assert project.autonomy_level == AutonomyLevel.MILESTONE
|
||||
assert project.status == ProjectStatus.ACTIVE
|
||||
assert project.settings == {}
|
||||
assert project.owner_id is None
|
||||
|
||||
def test_project_create_with_owner(self, valid_project_data):
|
||||
"""Test creating project with owner ID."""
|
||||
owner_id = uuid.uuid4()
|
||||
project = ProjectCreate(
|
||||
**valid_project_data,
|
||||
owner_id=owner_id,
|
||||
)
|
||||
|
||||
assert project.owner_id == owner_id
|
||||
|
||||
def test_project_create_name_empty_fails(self):
|
||||
"""Test that empty name raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name="",
|
||||
slug="valid-slug",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_project_create_name_whitespace_only_fails(self):
|
||||
"""Test that whitespace-only name raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name=" ",
|
||||
slug="valid-slug",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_project_create_name_stripped(self):
|
||||
"""Test that name is stripped of leading/trailing whitespace."""
|
||||
project = ProjectCreate(
|
||||
name=" Padded Name ",
|
||||
slug="padded-slug",
|
||||
)
|
||||
|
||||
assert project.name == "Padded Name"
|
||||
|
||||
def test_project_create_slug_required(self):
|
||||
"""Test that slug is required for create."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(name="No Slug Project")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("slug" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestProjectSlugValidation:
|
||||
"""Tests for Project slug validation."""
|
||||
|
||||
def test_valid_slugs(self):
|
||||
"""Test various valid slug formats."""
|
||||
valid_slugs = [
|
||||
"simple",
|
||||
"with-hyphens",
|
||||
"has123numbers",
|
||||
"mix3d-with-hyphen5",
|
||||
"a", # Single character
|
||||
]
|
||||
|
||||
for slug in valid_slugs:
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug=slug,
|
||||
)
|
||||
assert project.slug == slug
|
||||
|
||||
def test_invalid_slug_uppercase(self):
|
||||
"""Test that uppercase letters in slug raise ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="Invalid-Uppercase",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("slug" in str(e).lower() for e in errors)
|
||||
|
||||
def test_invalid_slug_special_chars(self):
|
||||
"""Test that special characters in slug raise ValidationError."""
|
||||
invalid_slugs = [
|
||||
"has_underscore",
|
||||
"has.dot",
|
||||
"has@symbol",
|
||||
"has space",
|
||||
"has/slash",
|
||||
]
|
||||
|
||||
for slug in invalid_slugs:
|
||||
with pytest.raises(ValidationError):
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug=slug,
|
||||
)
|
||||
|
||||
def test_invalid_slug_starts_with_hyphen(self):
|
||||
"""Test that slug starting with hyphen raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="-invalid-start",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("hyphen" in str(e).lower() for e in errors)
|
||||
|
||||
def test_invalid_slug_ends_with_hyphen(self):
|
||||
"""Test that slug ending with hyphen raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="invalid-end-",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("hyphen" in str(e).lower() for e in errors)
|
||||
|
||||
def test_invalid_slug_consecutive_hyphens(self):
|
||||
"""Test that consecutive hyphens in slug raise ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="invalid--consecutive",
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("consecutive" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestProjectUpdateValidation:
|
||||
"""Tests for ProjectUpdate schema validation."""
|
||||
|
||||
def test_project_update_partial(self):
|
||||
"""Test updating only some fields."""
|
||||
update = ProjectUpdate(
|
||||
name="Updated Name",
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.slug is None
|
||||
assert update.description is None
|
||||
assert update.autonomy_level is None
|
||||
assert update.status is None
|
||||
|
||||
def test_project_update_all_fields(self):
|
||||
"""Test updating all fields."""
|
||||
owner_id = uuid.uuid4()
|
||||
update = ProjectUpdate(
|
||||
name="Updated Name",
|
||||
slug="updated-slug",
|
||||
description="Updated description",
|
||||
autonomy_level=AutonomyLevel.AUTONOMOUS,
|
||||
status=ProjectStatus.PAUSED,
|
||||
settings={"key": "value"},
|
||||
owner_id=owner_id,
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.slug == "updated-slug"
|
||||
assert update.autonomy_level == AutonomyLevel.AUTONOMOUS
|
||||
assert update.status == ProjectStatus.PAUSED
|
||||
|
||||
def test_project_update_empty_name_fails(self):
|
||||
"""Test that empty name in update raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
ProjectUpdate(name="")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_project_update_slug_validation(self):
|
||||
"""Test that slug validation applies to updates too."""
|
||||
with pytest.raises(ValidationError):
|
||||
ProjectUpdate(slug="Invalid-Slug")
|
||||
|
||||
|
||||
class TestProjectEnums:
|
||||
"""Tests for Project enum validation."""
|
||||
|
||||
def test_valid_autonomy_levels(self):
|
||||
"""Test all valid autonomy levels."""
|
||||
for level in AutonomyLevel:
|
||||
# Replace underscores with hyphens for valid slug
|
||||
slug_suffix = level.value.replace("_", "-")
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug=f"project-{slug_suffix}",
|
||||
autonomy_level=level,
|
||||
)
|
||||
assert project.autonomy_level == level
|
||||
|
||||
def test_invalid_autonomy_level(self):
|
||||
"""Test that invalid autonomy level raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="invalid-autonomy",
|
||||
autonomy_level="invalid", # type: ignore
|
||||
)
|
||||
|
||||
def test_valid_project_statuses(self):
|
||||
"""Test all valid project statuses."""
|
||||
for status in ProjectStatus:
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug=f"project-status-{status.value}",
|
||||
status=status,
|
||||
)
|
||||
assert project.status == status
|
||||
|
||||
def test_invalid_project_status(self):
|
||||
"""Test that invalid project status raises ValidationError."""
|
||||
with pytest.raises(ValidationError):
|
||||
ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="invalid-status",
|
||||
status="invalid", # type: ignore
|
||||
)
|
||||
|
||||
|
||||
class TestProjectSettings:
|
||||
"""Tests for Project settings validation."""
|
||||
|
||||
def test_settings_empty_dict(self):
|
||||
"""Test that empty settings dict is valid."""
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="empty-settings",
|
||||
settings={},
|
||||
)
|
||||
assert project.settings == {}
|
||||
|
||||
def test_settings_complex_structure(self):
|
||||
"""Test that complex settings structure is valid."""
|
||||
complex_settings = {
|
||||
"mcp_servers": ["gitea", "slack"],
|
||||
"webhooks": {
|
||||
"on_issue_created": "https://example.com",
|
||||
},
|
||||
"flags": True,
|
||||
"count": 42,
|
||||
}
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="complex-settings",
|
||||
settings=complex_settings,
|
||||
)
|
||||
assert project.settings == complex_settings
|
||||
|
||||
def test_settings_default_to_empty_dict(self):
|
||||
"""Test that settings default to empty dict when not provided."""
|
||||
project = ProjectCreate(
|
||||
name="Test Project",
|
||||
slug="default-settings",
|
||||
)
|
||||
assert project.settings == {}
|
||||
366
backend/tests/schemas/syndarix/test_sprint_schemas.py
Normal file
366
backend/tests/schemas/syndarix/test_sprint_schemas.py
Normal file
@@ -0,0 +1,366 @@
|
||||
# tests/schemas/syndarix/test_sprint_schemas.py
|
||||
"""
|
||||
Tests for Sprint schema validation.
|
||||
"""
|
||||
|
||||
from datetime import date, timedelta
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.schemas.syndarix import (
|
||||
SprintCreate,
|
||||
SprintStatus,
|
||||
SprintUpdate,
|
||||
)
|
||||
|
||||
|
||||
class TestSprintCreateValidation:
|
||||
"""Tests for SprintCreate schema validation."""
|
||||
|
||||
def test_valid_sprint_create(self, valid_sprint_data):
|
||||
"""Test creating sprint with valid data."""
|
||||
sprint = SprintCreate(**valid_sprint_data)
|
||||
|
||||
assert sprint.name == "Sprint 1"
|
||||
assert sprint.number == 1
|
||||
assert sprint.start_date is not None
|
||||
assert sprint.end_date is not None
|
||||
|
||||
def test_sprint_create_defaults(self, valid_sprint_data):
|
||||
"""Test that defaults are applied correctly."""
|
||||
sprint = SprintCreate(**valid_sprint_data)
|
||||
|
||||
assert sprint.status == SprintStatus.PLANNED
|
||||
assert sprint.goal is None
|
||||
assert sprint.planned_points is None
|
||||
assert sprint.completed_points is None
|
||||
|
||||
def test_sprint_create_with_all_fields(self, valid_uuid):
|
||||
"""Test creating sprint with all optional fields."""
|
||||
today = date.today()
|
||||
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Full Sprint",
|
||||
number=5,
|
||||
goal="Complete all features",
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
status=SprintStatus.PLANNED,
|
||||
planned_points=21,
|
||||
completed_points=0,
|
||||
)
|
||||
|
||||
assert sprint.name == "Full Sprint"
|
||||
assert sprint.number == 5
|
||||
assert sprint.goal == "Complete all features"
|
||||
assert sprint.planned_points == 21
|
||||
|
||||
def test_sprint_create_name_empty_fails(self, valid_uuid):
|
||||
"""Test that empty name raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_sprint_create_name_whitespace_only_fails(self, valid_uuid):
|
||||
"""Test that whitespace-only name raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=" ",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_sprint_create_name_stripped(self, valid_uuid):
|
||||
"""Test that name is stripped."""
|
||||
today = date.today()
|
||||
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=" Padded Sprint Name ",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
assert sprint.name == "Padded Sprint Name"
|
||||
|
||||
def test_sprint_create_project_id_required(self):
|
||||
"""Test that project_id is required."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
name="Sprint 1",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("project_id" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestSprintNumberValidation:
|
||||
"""Tests for Sprint number validation."""
|
||||
|
||||
def test_sprint_number_valid(self, valid_uuid):
|
||||
"""Test valid sprint numbers."""
|
||||
today = date.today()
|
||||
|
||||
for number in [1, 10, 100]:
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=f"Sprint {number}",
|
||||
number=number,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
assert sprint.number == number
|
||||
|
||||
def test_sprint_number_zero_fails(self, valid_uuid):
|
||||
"""Test that sprint number 0 raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Sprint Zero",
|
||||
number=0,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("number" in str(e).lower() for e in errors)
|
||||
|
||||
def test_sprint_number_negative_fails(self, valid_uuid):
|
||||
"""Test that negative sprint number raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Negative Sprint",
|
||||
number=-1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("number" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestSprintDateValidation:
|
||||
"""Tests for Sprint date validation."""
|
||||
|
||||
def test_valid_date_range(self, valid_uuid):
|
||||
"""Test valid date range (end > start)."""
|
||||
today = date.today()
|
||||
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Sprint 1",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
)
|
||||
|
||||
assert sprint.end_date > sprint.start_date
|
||||
|
||||
def test_same_day_sprint(self, valid_uuid):
|
||||
"""Test that same day sprint is valid."""
|
||||
today = date.today()
|
||||
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="One Day Sprint",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today, # Same day is allowed
|
||||
)
|
||||
|
||||
assert sprint.start_date == sprint.end_date
|
||||
|
||||
def test_end_before_start_fails(self, valid_uuid):
|
||||
"""Test that end date before start date raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Invalid Sprint",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today - timedelta(days=1), # Before start
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert len(errors) > 0
|
||||
|
||||
|
||||
class TestSprintPointsValidation:
|
||||
"""Tests for Sprint points validation."""
|
||||
|
||||
def test_valid_planned_points(self, valid_uuid):
|
||||
"""Test valid planned_points values."""
|
||||
today = date.today()
|
||||
|
||||
for points in [0, 1, 21, 100]:
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=f"Sprint {points}",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
planned_points=points,
|
||||
)
|
||||
assert sprint.planned_points == points
|
||||
|
||||
def test_planned_points_negative_fails(self, valid_uuid):
|
||||
"""Test that negative planned_points raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Negative Points Sprint",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
planned_points=-1,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("planned_points" in str(e).lower() for e in errors)
|
||||
|
||||
def test_valid_completed_points(self, valid_uuid):
|
||||
"""Test valid completed_points values."""
|
||||
today = date.today()
|
||||
|
||||
for points in [0, 5, 21]:
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=f"Sprint {points}",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
completed_points=points,
|
||||
)
|
||||
assert sprint.completed_points == points
|
||||
|
||||
def test_completed_points_negative_fails(self, valid_uuid):
|
||||
"""Test that negative completed_points raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Negative Completed Sprint",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
completed_points=-1,
|
||||
)
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("completed_points" in str(e).lower() for e in errors)
|
||||
|
||||
|
||||
class TestSprintUpdateValidation:
|
||||
"""Tests for SprintUpdate schema validation."""
|
||||
|
||||
def test_sprint_update_partial(self):
|
||||
"""Test updating only some fields."""
|
||||
update = SprintUpdate(
|
||||
name="Updated Name",
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.goal is None
|
||||
assert update.start_date is None
|
||||
assert update.end_date is None
|
||||
|
||||
def test_sprint_update_all_fields(self):
|
||||
"""Test updating all fields."""
|
||||
today = date.today()
|
||||
|
||||
update = SprintUpdate(
|
||||
name="Updated Name",
|
||||
goal="Updated goal",
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=21),
|
||||
status=SprintStatus.ACTIVE,
|
||||
planned_points=34,
|
||||
completed_points=20,
|
||||
)
|
||||
|
||||
assert update.name == "Updated Name"
|
||||
assert update.goal == "Updated goal"
|
||||
assert update.status == SprintStatus.ACTIVE
|
||||
assert update.planned_points == 34
|
||||
|
||||
def test_sprint_update_empty_name_fails(self):
|
||||
"""Test that empty name in update raises ValidationError."""
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
SprintUpdate(name="")
|
||||
|
||||
errors = exc_info.value.errors()
|
||||
assert any("name" in str(e) for e in errors)
|
||||
|
||||
def test_sprint_update_name_stripped(self):
|
||||
"""Test that name is stripped in updates."""
|
||||
update = SprintUpdate(name=" Updated ")
|
||||
|
||||
assert update.name == "Updated"
|
||||
|
||||
|
||||
class TestSprintStatusEnum:
|
||||
"""Tests for SprintStatus enum validation."""
|
||||
|
||||
def test_valid_sprint_statuses(self, valid_uuid):
|
||||
"""Test all valid sprint statuses."""
|
||||
today = date.today()
|
||||
|
||||
for status in SprintStatus:
|
||||
sprint = SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name=f"Sprint {status.value}",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
status=status,
|
||||
)
|
||||
assert sprint.status == status
|
||||
|
||||
def test_invalid_sprint_status(self, valid_uuid):
|
||||
"""Test that invalid sprint status raises ValidationError."""
|
||||
today = date.today()
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
SprintCreate(
|
||||
project_id=valid_uuid,
|
||||
name="Invalid Status Sprint",
|
||||
number=1,
|
||||
start_date=today,
|
||||
end_date=today + timedelta(days=14),
|
||||
status="invalid", # type: ignore
|
||||
)
|
||||
Reference in New Issue
Block a user