Compare commits
196 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ad3d20cf2 | ||
|
|
8623eb56f5 | ||
|
|
3cb6c8d13b | ||
|
|
8e16e2645e | ||
|
|
82c3a6ba47 | ||
|
|
b6c38cac88 | ||
|
|
51404216ae | ||
|
|
3f23bc3db3 | ||
|
|
a0ec5fa2cc | ||
|
|
f262d08be2 | ||
|
|
b3f371e0a3 | ||
|
|
93cc37224c | ||
|
|
5717bffd63 | ||
|
|
9339ea30a1 | ||
|
|
79cb6bfd7b | ||
|
|
45025bb2f1 | ||
|
|
3c6b14d2bf | ||
|
|
6b21a6fadd | ||
|
|
600657adc4 | ||
|
|
c9d0d079b3 | ||
|
|
4c8f81368c | ||
|
|
efbe91ce14 | ||
|
|
5d646779c9 | ||
|
|
5a4d93df26 | ||
|
|
7ef217be39 | ||
|
|
20159c5865 | ||
|
|
f9a72fcb34 | ||
|
|
fcb0a5f86a | ||
|
|
92782bcb05 | ||
|
|
1dcf99ee38 | ||
|
|
70009676a3 | ||
|
|
192237e69b | ||
|
|
3edce9cd26 | ||
|
|
35aea2d73a | ||
|
|
d0f32d04f7 | ||
|
|
da85a8aba8 | ||
|
|
f8bd1011e9 | ||
|
|
f057c2f0b6 | ||
|
|
33ec889fc4 | ||
|
|
74b8c65741 | ||
|
|
b232298c61 | ||
|
|
cf6291ac8e | ||
|
|
e3fe0439fd | ||
|
|
57680c3772 | ||
|
|
997cfaa03a | ||
|
|
6954774e36 | ||
|
|
30e5c68304 | ||
|
|
0b24d4c6cc | ||
|
|
1670e05e0d | ||
|
|
999b7ac03f | ||
|
|
48ecb40f18 | ||
|
|
b818f17418 | ||
|
|
e946787a61 | ||
|
|
3554efe66a | ||
|
|
bd988f76b0 | ||
|
|
4974233169 | ||
|
|
c9d8c0835c | ||
|
|
085a748929 | ||
|
|
4b149b8a52 | ||
|
|
ad0c06851d | ||
|
|
49359b1416 | ||
|
|
911d950c15 | ||
|
|
b2a3ac60e0 | ||
|
|
dea092e1bb | ||
|
|
4154dd5268 | ||
|
|
db12937495 | ||
|
|
81e1456631 | ||
|
|
58e78d8700 | ||
|
|
5e80139afa | ||
|
|
60ebeaa582 | ||
|
|
758052dcff | ||
|
|
1628eacf2b | ||
|
|
2bea057fb1 | ||
|
|
9e54f16e56 | ||
|
|
96e6400bd8 | ||
|
|
6c7b72f130 | ||
|
|
027ebfc332 | ||
|
|
c2466ab401 | ||
|
|
7828d35e06 | ||
|
|
6b07e62f00 | ||
|
|
0d2005ddcb | ||
|
|
dfa75e682e | ||
|
|
22ecb5e989 | ||
|
|
2ab69f8561 | ||
|
|
95342cc94d | ||
|
|
f6194b3e19 | ||
|
|
6bb376a336 | ||
|
|
cd7a9ccbdf | ||
|
|
953af52d0e | ||
|
|
e6e98d4ed1 | ||
|
|
ca5f5e3383 | ||
|
|
d0fc7f37ff | ||
|
|
18d717e996 | ||
|
|
f482559e15 | ||
|
|
6e8b0b022a | ||
|
|
746fb7b181 | ||
|
|
caf283bed2 | ||
|
|
520c06175e | ||
|
|
065e43c5a9 | ||
|
|
c8b88dadc3 | ||
|
|
015f2de6c6 | ||
|
|
f36bfb3781 | ||
|
|
ef659cd72d | ||
|
|
728edd1453 | ||
|
|
498c0a0e94 | ||
|
|
e5975fa5d0 | ||
|
|
731a188a76 | ||
|
|
fe2104822e | ||
|
|
664415111a | ||
|
|
acd18ff694 | ||
|
|
da5affd613 | ||
|
|
a79d923dc1 | ||
|
|
c72f6aa2f9 | ||
|
|
4f24cebf11 | ||
|
|
e0739a786c | ||
|
|
64576da7dc | ||
|
|
4a55bd63a3 | ||
|
|
a78b903f5a | ||
|
|
c7b2c82700 | ||
|
|
50b865b23b | ||
|
|
6f5dd58b54 | ||
|
|
0ceee8545e | ||
|
|
62aea06e0d | ||
|
|
24f1cc637e | ||
|
|
8b6cca5d4d | ||
|
|
c9700f760e | ||
|
|
6f509e71ce | ||
|
|
f5a86953c6 | ||
|
|
246d2a6752 | ||
|
|
36ab7069cf | ||
|
|
a4c91cb8c3 | ||
|
|
a7ba0f9bd8 | ||
|
|
f3fb4ecbeb | ||
|
|
5c35702caf | ||
|
|
7280b182bd | ||
|
|
06b2491c1f | ||
|
|
b8265783f3 | ||
|
|
63066c50ba | ||
|
|
ddf9b5fe25 | ||
|
|
c3b66cccfc | ||
|
|
896f0d92e5 | ||
|
|
2ccaeb23f2 | ||
|
|
04c939d4c2 | ||
|
|
71c94c3b5a | ||
|
|
d71891ac4e | ||
|
|
3492941aec | ||
|
|
81e8d7e73d | ||
|
|
f0b04d53af | ||
|
|
35af7daf90 | ||
|
|
5fab15a11e | ||
|
|
ab913575e1 | ||
|
|
82cb6386a6 | ||
|
|
2d05035c1d | ||
|
|
15d747eb28 | ||
|
|
3d6fa6b791 | ||
|
|
3ea1874638 | ||
|
|
e1657d5ad8 | ||
|
|
83fa51fd4a | ||
|
|
db868c53c6 | ||
|
|
68f1865a1e | ||
|
|
5b1e2852ea | ||
|
|
d0a88d1fd1 | ||
|
|
e85788f79f | ||
|
|
25d42ee2a6 | ||
|
|
e41ceafaef | ||
|
|
43fa69db7d | ||
|
|
29309e5cfd | ||
|
|
cea97afe25 | ||
|
|
b43fa8ace2 | ||
|
|
742ce4c9c8 | ||
|
|
6ea9edf3d1 | ||
|
|
25b8f1723e | ||
|
|
73d10f364c | ||
|
|
2310c8cdfd | ||
|
|
2f7124959d | ||
|
|
2104ae38ec | ||
|
|
2055320058 | ||
|
|
11da0d57a8 | ||
|
|
acfda1e9a9 | ||
|
|
3c24a8c522 | ||
|
|
ec111f9ce6 | ||
|
|
520a4d60fb | ||
|
|
6e645835dc | ||
|
|
fcda8f0f96 | ||
|
|
d6db6af964 | ||
|
|
88cf4e0abc | ||
|
|
f138417486 | ||
|
|
de47d9ee43 | ||
|
|
406b25cda0 | ||
|
|
bd702734c2 | ||
|
|
5594655fba | ||
|
|
ebd307cab4 | ||
|
|
6e3cdebbfb | ||
|
|
a6a336b66e | ||
|
|
9901dc7f51 | ||
|
|
ac64d9505e |
@@ -1,15 +1,22 @@
|
||||
# Common settings
|
||||
PROJECT_NAME=App
|
||||
PROJECT_NAME=Syndarix
|
||||
VERSION=1.0.0
|
||||
|
||||
# Database settings
|
||||
POSTGRES_USER=postgres
|
||||
POSTGRES_PASSWORD=postgres
|
||||
POSTGRES_DB=app
|
||||
POSTGRES_DB=syndarix
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_PORT=5432
|
||||
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
# Redis settings (cache, pub/sub, Celery broker)
|
||||
REDIS_URL=redis://redis:6379/0
|
||||
|
||||
# Celery settings (optional - defaults to REDIS_URL if not set)
|
||||
# CELERY_BROKER_URL=redis://redis:6379/0
|
||||
# CELERY_RESULT_BACKEND=redis://redis:6379/0
|
||||
|
||||
# Backend settings
|
||||
BACKEND_PORT=8000
|
||||
# CRITICAL: Generate a secure SECRET_KEY for production!
|
||||
|
||||
460
.gitea/workflows/ci.yaml
Normal file
460
.gitea/workflows/ci.yaml
Normal file
@@ -0,0 +1,460 @@
|
||||
# Syndarix CI/CD Pipeline
|
||||
# Gitea Actions workflow for continuous integration and deployment
|
||||
#
|
||||
# Pipeline Structure:
|
||||
# - lint: Fast feedback (linting and type checking)
|
||||
# - test: Run test suites (depends on lint)
|
||||
# - build: Build Docker images (depends on test)
|
||||
# - deploy: Deploy to production (depends on build, only on main)
|
||||
|
||||
name: CI/CD Pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- 'feature/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.12"
|
||||
NODE_VERSION: "20"
|
||||
UV_VERSION: "0.4.x"
|
||||
|
||||
jobs:
|
||||
# ===========================================================================
|
||||
# LINT JOB - Fast feedback first
|
||||
# ===========================================================================
|
||||
lint:
|
||||
name: Lint & Type Check
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
component: [backend, frontend]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# ----- Backend Linting -----
|
||||
- name: Set up Python
|
||||
if: matrix.component == 'backend'
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install uv
|
||||
if: matrix.component == 'backend'
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: ${{ env.UV_VERSION }}
|
||||
|
||||
- name: Cache uv dependencies
|
||||
if: matrix.component == 'backend'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
backend/.venv
|
||||
key: uv-${{ runner.os }}-${{ hashFiles('backend/uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-${{ runner.os }}-
|
||||
|
||||
- name: Install backend dependencies
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
run: uv sync --extra dev --frozen
|
||||
|
||||
- name: Run ruff linting
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
run: uv run ruff check app
|
||||
|
||||
- name: Run ruff format check
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
run: uv run ruff format --check app
|
||||
|
||||
- name: Run mypy type checking
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
run: uv run mypy app
|
||||
|
||||
# ----- Frontend Linting -----
|
||||
- name: Set up Node.js
|
||||
if: matrix.component == 'frontend'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Cache npm dependencies
|
||||
if: matrix.component == 'frontend'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.npm
|
||||
frontend/node_modules
|
||||
key: npm-${{ runner.os }}-${{ hashFiles('frontend/package-lock.json') }}
|
||||
restore-keys: |
|
||||
npm-${{ runner.os }}-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm ci
|
||||
|
||||
- name: Run ESLint
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm run lint
|
||||
|
||||
- name: Run TypeScript type check
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm run type-check
|
||||
|
||||
- name: Run Prettier format check
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm run format:check
|
||||
|
||||
# ===========================================================================
|
||||
# TEST JOB - Run test suites
|
||||
# ===========================================================================
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
needs: lint
|
||||
strategy:
|
||||
matrix:
|
||||
component: [backend, frontend]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# ----- Backend Tests -----
|
||||
- name: Set up Python
|
||||
if: matrix.component == 'backend'
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install uv
|
||||
if: matrix.component == 'backend'
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: ${{ env.UV_VERSION }}
|
||||
|
||||
- name: Cache uv dependencies
|
||||
if: matrix.component == 'backend'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
backend/.venv
|
||||
key: uv-${{ runner.os }}-${{ hashFiles('backend/uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-${{ runner.os }}-
|
||||
|
||||
- name: Install backend dependencies
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
run: uv sync --extra dev --frozen
|
||||
|
||||
- name: Run pytest with coverage
|
||||
if: matrix.component == 'backend'
|
||||
working-directory: backend
|
||||
env:
|
||||
IS_TEST: "True"
|
||||
run: |
|
||||
uv run pytest --cov=app --cov-report=xml --cov-report=term-missing --cov-fail-under=90
|
||||
|
||||
- name: Upload backend coverage report
|
||||
if: matrix.component == 'backend'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: backend-coverage
|
||||
path: backend/coverage.xml
|
||||
retention-days: 7
|
||||
|
||||
# ----- Frontend Tests -----
|
||||
- name: Set up Node.js
|
||||
if: matrix.component == 'frontend'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Cache npm dependencies
|
||||
if: matrix.component == 'frontend'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.npm
|
||||
frontend/node_modules
|
||||
key: npm-${{ runner.os }}-${{ hashFiles('frontend/package-lock.json') }}
|
||||
restore-keys: |
|
||||
npm-${{ runner.os }}-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm ci
|
||||
|
||||
- name: Run Jest unit tests
|
||||
if: matrix.component == 'frontend'
|
||||
working-directory: frontend
|
||||
run: npm test -- --coverage --passWithNoTests
|
||||
|
||||
- name: Upload frontend coverage report
|
||||
if: matrix.component == 'frontend'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: frontend-coverage
|
||||
path: frontend/coverage/
|
||||
retention-days: 7
|
||||
|
||||
# ===========================================================================
|
||||
# BUILD JOB - Build Docker images
|
||||
# ===========================================================================
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
strategy:
|
||||
matrix:
|
||||
component: [backend, frontend]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: docker-${{ matrix.component }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
docker-${{ matrix.component }}-
|
||||
|
||||
- name: Build backend Docker image
|
||||
if: matrix.component == 'backend'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./backend
|
||||
file: ./backend/Dockerfile
|
||||
target: production
|
||||
push: false
|
||||
tags: syndarix-backend:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build frontend Docker image
|
||||
if: matrix.component == 'frontend'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./frontend
|
||||
file: ./frontend/Dockerfile
|
||||
target: runner
|
||||
push: false
|
||||
tags: syndarix-frontend:${{ github.sha }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_API_URL=http://localhost:8000
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
# Prevent cache from growing indefinitely
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
# ===========================================================================
|
||||
# DEPLOY JOB - Deploy to production (only on main branch)
|
||||
# ===========================================================================
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
environment: production
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Deploy notification
|
||||
run: |
|
||||
echo "Deployment to production would happen here"
|
||||
echo "Branch: ${{ github.ref }}"
|
||||
echo "Commit: ${{ github.sha }}"
|
||||
echo "Actor: ${{ github.actor }}"
|
||||
|
||||
# TODO: Add actual deployment steps when infrastructure is ready
|
||||
# Options:
|
||||
# - SSH to production server and run docker-compose pull && docker-compose up -d
|
||||
# - Use Kubernetes deployment
|
||||
# - Use cloud provider deployment (AWS ECS, GCP Cloud Run, etc.)
|
||||
# - Trigger webhook to deployment orchestrator
|
||||
|
||||
# ===========================================================================
|
||||
# SECURITY SCAN JOB - Run on main and dev branches
|
||||
# ===========================================================================
|
||||
security:
|
||||
name: Security Scan
|
||||
runs-on: ubuntu-latest
|
||||
needs: lint
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: ${{ env.UV_VERSION }}
|
||||
|
||||
- name: Install backend dependencies
|
||||
working-directory: backend
|
||||
run: uv sync --extra dev --frozen
|
||||
|
||||
- name: Run Bandit security scan (via ruff)
|
||||
working-directory: backend
|
||||
run: |
|
||||
# Ruff includes flake8-bandit (S rules) for security scanning
|
||||
# Run with explicit security rules only
|
||||
uv run ruff check app --select=S --ignore=S101,S104,S105,S106,S603,S607
|
||||
|
||||
- name: Run pip-audit for dependency vulnerabilities
|
||||
working-directory: backend
|
||||
run: |
|
||||
# pip-audit checks for known vulnerabilities in Python dependencies
|
||||
uv run pip-audit --require-hashes --disable-pip -r <(uv pip compile pyproject.toml) || true
|
||||
# Note: Using || true temporarily while setting up proper remediation
|
||||
|
||||
- name: Check for secrets in code
|
||||
run: |
|
||||
# Basic check for common secret patterns
|
||||
# In production, use tools like gitleaks or trufflehog
|
||||
echo "Checking for potential hardcoded secrets..."
|
||||
! grep -rn --include="*.py" --include="*.ts" --include="*.tsx" --include="*.js" \
|
||||
-E "(api_key|apikey|secret_key|secretkey|password|passwd|token)\s*=\s*['\"][^'\"]{8,}['\"]" \
|
||||
backend/app frontend/src || echo "No obvious secrets found"
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: frontend
|
||||
run: npm ci
|
||||
|
||||
- name: Run npm audit
|
||||
working-directory: frontend
|
||||
run: |
|
||||
npm audit --audit-level=high || true
|
||||
# Note: Using || true to not fail on moderate vulnerabilities
|
||||
# In production, consider stricter settings
|
||||
|
||||
# ===========================================================================
|
||||
# E2E TEST JOB - Run end-to-end tests with Playwright
|
||||
# ===========================================================================
|
||||
e2e-tests:
|
||||
name: E2E Tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test]
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev' || github.event_name == 'pull_request'
|
||||
services:
|
||||
postgres:
|
||||
image: pgvector/pgvector:pg17
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: syndarix_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- 6379:6379
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: ${{ env.UV_VERSION }}
|
||||
|
||||
- name: Install backend dependencies
|
||||
working-directory: backend
|
||||
run: uv sync --extra dev --frozen
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: frontend
|
||||
run: npm ci
|
||||
|
||||
- name: Install Playwright browsers
|
||||
working-directory: frontend
|
||||
run: npx playwright install --with-deps chromium
|
||||
|
||||
- name: Start backend server
|
||||
working-directory: backend
|
||||
env:
|
||||
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/syndarix_test
|
||||
REDIS_URL: redis://localhost:6379/0
|
||||
SECRET_KEY: test-secret-key-for-e2e-tests-only
|
||||
ENVIRONMENT: test
|
||||
IS_TEST: "True"
|
||||
run: |
|
||||
# Run migrations
|
||||
uv run python -c "from app.database import create_tables; import asyncio; asyncio.run(create_tables())" || true
|
||||
# Start backend in background
|
||||
uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 &
|
||||
# Wait for backend to be ready
|
||||
sleep 10
|
||||
|
||||
- name: Run Playwright E2E tests
|
||||
working-directory: frontend
|
||||
env:
|
||||
NEXT_PUBLIC_API_URL: http://localhost:8000
|
||||
run: |
|
||||
npm run build
|
||||
npm run test:e2e -- --project=chromium
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: frontend/playwright-report/
|
||||
retention-days: 7
|
||||
61
.githooks/pre-commit
Executable file
61
.githooks/pre-commit
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
# Pre-commit hook to enforce validation before commits on protected branches
|
||||
# Install: git config core.hooksPath .githooks
|
||||
|
||||
set -e
|
||||
|
||||
# Get the current branch name
|
||||
BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
# Protected branches that require validation
|
||||
PROTECTED_BRANCHES="main dev"
|
||||
|
||||
# Check if we're on a protected branch
|
||||
is_protected() {
|
||||
for branch in $PROTECTED_BRANCHES; do
|
||||
if [ "$BRANCH" = "$branch" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
if is_protected; then
|
||||
echo "🔒 Committing to protected branch '$BRANCH' - running validation..."
|
||||
|
||||
# Check if we have backend changes
|
||||
if git diff --cached --name-only | grep -q "^backend/"; then
|
||||
echo "📦 Backend changes detected - running make validate..."
|
||||
cd backend
|
||||
if ! make validate; then
|
||||
echo ""
|
||||
echo "❌ Backend validation failed!"
|
||||
echo " Please fix the issues and try again."
|
||||
echo " Run 'cd backend && make validate' to see errors."
|
||||
exit 1
|
||||
fi
|
||||
cd ..
|
||||
echo "✅ Backend validation passed!"
|
||||
fi
|
||||
|
||||
# Check if we have frontend changes
|
||||
if git diff --cached --name-only | grep -q "^frontend/"; then
|
||||
echo "🎨 Frontend changes detected - running npm run validate..."
|
||||
cd frontend
|
||||
if ! npm run validate 2>/dev/null; then
|
||||
echo ""
|
||||
echo "❌ Frontend validation failed!"
|
||||
echo " Please fix the issues and try again."
|
||||
echo " Run 'cd frontend && npm run validate' to see errors."
|
||||
exit 1
|
||||
fi
|
||||
cd ..
|
||||
echo "✅ Frontend validation passed!"
|
||||
fi
|
||||
|
||||
echo "🎉 All validations passed! Proceeding with commit..."
|
||||
else
|
||||
echo "📝 Committing to feature branch '$BRANCH' - skipping validation (run manually if needed)"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
2
.github/workflows/README.md
vendored
2
.github/workflows/README.md
vendored
@@ -41,7 +41,7 @@ To enable CI/CD workflows:
|
||||
- Runs on: Push to main/develop, PRs affecting frontend code
|
||||
- Tests: Frontend unit tests (Jest)
|
||||
- Coverage: Uploads to Codecov
|
||||
- Fast: Uses bun cache
|
||||
- Fast: Uses npm cache
|
||||
|
||||
### `e2e-tests.yml`
|
||||
- Runs on: All pushes and PRs
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -187,7 +187,7 @@ coverage.xml
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
backend/.benchmarks
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
40
AGENTS.md
40
AGENTS.md
@@ -13,10 +13,10 @@ uv run uvicorn app.main:app --reload # Start dev server
|
||||
|
||||
# Frontend (Node.js)
|
||||
cd frontend
|
||||
bun install # Install dependencies
|
||||
bun run dev # Start dev server
|
||||
bun run generate:api # Generate API client from OpenAPI
|
||||
bun run test:e2e # Run E2E tests
|
||||
npm install # Install dependencies
|
||||
npm run dev # Start dev server
|
||||
npm run generate:api # Generate API client from OpenAPI
|
||||
npm run test:e2e # Run E2E tests
|
||||
```
|
||||
|
||||
**Access points:**
|
||||
@@ -37,7 +37,7 @@ Default superuser (change in production):
|
||||
│ ├── app/
|
||||
│ │ ├── api/ # API routes (auth, users, organizations, admin)
|
||||
│ │ ├── core/ # Core functionality (auth, config, database)
|
||||
│ │ ├── repositories/ # Repository pattern (database operations)
|
||||
│ │ ├── crud/ # Database CRUD operations
|
||||
│ │ ├── models/ # SQLAlchemy ORM models
|
||||
│ │ ├── schemas/ # Pydantic request/response schemas
|
||||
│ │ ├── services/ # Business logic layer
|
||||
@@ -113,7 +113,7 @@ OAUTH_ISSUER=https://api.yourdomain.com # JWT issuer URL (must be HTTPS in
|
||||
### Database Pattern
|
||||
- **Async SQLAlchemy 2.0** with PostgreSQL
|
||||
- **Connection pooling**: 20 base connections, 50 max overflow
|
||||
- **Repository base class**: `repositories/base.py` with common operations
|
||||
- **CRUD base class**: `crud/base.py` with common operations
|
||||
- **Migrations**: Alembic with helper script `migrate.py`
|
||||
- `python migrate.py auto "message"` - Generate and apply
|
||||
- `python migrate.py list` - View history
|
||||
@@ -121,7 +121,7 @@ OAUTH_ISSUER=https://api.yourdomain.com # JWT issuer URL (must be HTTPS in
|
||||
### Frontend State Management
|
||||
- **Zustand stores**: Lightweight state management
|
||||
- **TanStack Query**: API data fetching/caching
|
||||
- **Auto-generated client**: From OpenAPI spec via `bun run generate:api`
|
||||
- **Auto-generated client**: From OpenAPI spec via `npm run generate:api`
|
||||
- **Dependency Injection**: ALWAYS use `useAuth()` from `AuthContext`, NEVER import `useAuthStore` directly
|
||||
|
||||
### Internationalization (i18n)
|
||||
@@ -165,25 +165,21 @@ Permission dependencies in `api/dependencies/permissions.py`:
|
||||
**Frontend Unit Tests (Jest):**
|
||||
- 97% coverage
|
||||
- Component, hook, and utility testing
|
||||
- Run: `bun run test`
|
||||
- Coverage: `bun run test:coverage`
|
||||
- Run: `npm test`
|
||||
- Coverage: `npm run test:coverage`
|
||||
|
||||
**Frontend E2E Tests (Playwright):**
|
||||
- 56 passing, 1 skipped (zero flaky tests)
|
||||
- Complete user flows (auth, navigation, settings)
|
||||
- Run: `bun run test:e2e`
|
||||
- UI mode: `bun run test:e2e:ui`
|
||||
- Run: `npm run test:e2e`
|
||||
- UI mode: `npm run test:e2e:ui`
|
||||
|
||||
### Development Tooling
|
||||
|
||||
**Backend:**
|
||||
- **uv**: Modern Python package manager (10-100x faster than pip)
|
||||
- **Ruff**: All-in-one linting/formatting (replaces Black, Flake8, isort)
|
||||
- **Pyright**: Static type checking (strict mode)
|
||||
- **pip-audit**: Dependency vulnerability scanning (OSV database)
|
||||
- **detect-secrets**: Hardcoded secrets detection
|
||||
- **pip-licenses**: License compliance checking
|
||||
- **pre-commit**: Git hook framework (Ruff, detect-secrets, standard checks)
|
||||
- **mypy**: Type checking with Pydantic plugin
|
||||
- **Makefile**: `make help` for all commands
|
||||
|
||||
**Frontend:**
|
||||
@@ -222,11 +218,11 @@ NEXT_PUBLIC_API_URL=http://localhost:8000/api/v1
|
||||
### Adding a New API Endpoint
|
||||
|
||||
1. **Define schema** in `backend/app/schemas/`
|
||||
2. **Create repository** in `backend/app/repositories/`
|
||||
2. **Create CRUD operations** in `backend/app/crud/`
|
||||
3. **Implement route** in `backend/app/api/routes/`
|
||||
4. **Register router** in `backend/app/api/main.py`
|
||||
5. **Write tests** in `backend/tests/api/`
|
||||
6. **Generate frontend client**: `bun run generate:api`
|
||||
6. **Generate frontend client**: `npm run generate:api`
|
||||
|
||||
### Database Migrations
|
||||
|
||||
@@ -243,7 +239,7 @@ python migrate.py auto "description" # Generate + apply
|
||||
2. **Follow design system** (see `frontend/docs/design-system/`)
|
||||
3. **Use dependency injection** for auth (`useAuth()` not `useAuthStore`)
|
||||
4. **Write tests** in `frontend/tests/` or `__tests__/`
|
||||
5. **Run type check**: `bun run type-check`
|
||||
5. **Run type check**: `npm run type-check`
|
||||
|
||||
## Security Features
|
||||
|
||||
@@ -253,10 +249,6 @@ python migrate.py auto "description" # Generate + apply
|
||||
- **CSRF protection**: Built into FastAPI
|
||||
- **Session revocation**: Database-backed session tracking
|
||||
- **Comprehensive security tests**: JWT algorithm attacks, session hijacking, privilege escalation
|
||||
- **Dependency vulnerability scanning**: `make dep-audit` (pip-audit against OSV database)
|
||||
- **License compliance**: `make license-check` (blocks GPL-3.0/AGPL)
|
||||
- **Secrets detection**: Pre-commit hook blocks hardcoded secrets
|
||||
- **Unified security pipeline**: `make audit` (all security checks), `make check` (quality + security + tests)
|
||||
|
||||
## Docker Deployment
|
||||
|
||||
@@ -289,7 +281,7 @@ docker-compose exec backend python -c "from app.init_db import init_db; import a
|
||||
- Authentication system (JWT with refresh tokens, OAuth/social login)
|
||||
- **OAuth Provider Mode (MCP-ready)**: Full OAuth 2.0 Authorization Server
|
||||
- Session management (device tracking, revocation)
|
||||
- User management (full lifecycle, password change)
|
||||
- User management (CRUD, password change)
|
||||
- Organization system (multi-tenant with RBAC)
|
||||
- Admin panel (user/org management, bulk operations)
|
||||
- **Internationalization (i18n)** with English and Italian
|
||||
|
||||
357
CLAUDE.md
357
CLAUDE.md
@@ -1,253 +1,204 @@
|
||||
# CLAUDE.md
|
||||
|
||||
Claude Code context for FastAPI + Next.js Full-Stack Template.
|
||||
Claude Code context for **Syndarix** - AI-Powered Software Consulting Agency.
|
||||
|
||||
**See [AGENTS.md](./AGENTS.md) for project context, architecture, and development commands.**
|
||||
**Built on PragmaStack.** See [AGENTS.md](./AGENTS.md) for base template context.
|
||||
|
||||
---
|
||||
|
||||
## Syndarix Project Context
|
||||
|
||||
### Vision
|
||||
|
||||
Syndarix is an autonomous platform that orchestrates specialized AI agents to deliver complete software solutions with minimal human intervention. It acts as a virtual consulting agency with AI agents playing roles like Product Owner, Architect, Engineers, QA, etc.
|
||||
|
||||
### Repository
|
||||
|
||||
- **URL:** https://gitea.pragmazest.com/cardosofelipe/syndarix
|
||||
- **Issue Tracker:** Gitea Issues (primary)
|
||||
- **CI/CD:** Gitea Actions
|
||||
|
||||
### Core Concepts
|
||||
|
||||
**Agent Types & Instances:**
|
||||
- Agent Type = Template (base model, failover, expertise, personality)
|
||||
- Agent Instance = Spawned from type, assigned to project
|
||||
- Multiple instances of same type can work together
|
||||
|
||||
**Project Workflow:**
|
||||
1. Requirements discovery with Product Owner agent
|
||||
2. Architecture spike (PO + BA + Architect brainstorm)
|
||||
3. Implementation planning and backlog creation
|
||||
4. Autonomous sprint execution with checkpoints
|
||||
5. Demo and client feedback
|
||||
|
||||
**Autonomy Levels:**
|
||||
- `FULL_CONTROL`: Approve every action
|
||||
- `MILESTONE`: Approve sprint boundaries
|
||||
- `AUTONOMOUS`: Only major decisions
|
||||
|
||||
**MCP-First Architecture:**
|
||||
All integrations via Model Context Protocol servers with explicit scoping:
|
||||
```python
|
||||
# All tools take project_id for scoping
|
||||
search_knowledge(project_id="proj-123", query="auth flow")
|
||||
create_issue(project_id="proj-123", title="Add login")
|
||||
```
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
docs/
|
||||
├── development/ # Workflow and coding standards
|
||||
├── requirements/ # Requirements documents
|
||||
├── architecture/ # Architecture documentation
|
||||
├── adrs/ # Architecture Decision Records
|
||||
└── spikes/ # Spike research documents
|
||||
```
|
||||
|
||||
### Current Phase
|
||||
|
||||
**Backlog Population** - Creating detailed issues for Phase 0-1 implementation.
|
||||
|
||||
---
|
||||
|
||||
## Development Standards
|
||||
|
||||
**CRITICAL: These rules are mandatory. See linked docs for full details.**
|
||||
|
||||
### Quick Reference
|
||||
|
||||
| Topic | Documentation |
|
||||
|-------|---------------|
|
||||
| **Workflow & Branching** | [docs/development/WORKFLOW.md](./docs/development/WORKFLOW.md) |
|
||||
| **Coding Standards** | [docs/development/CODING_STANDARDS.md](./docs/development/CODING_STANDARDS.md) |
|
||||
| **Design System** | [frontend/docs/design-system/](./frontend/docs/design-system/) |
|
||||
| **Backend E2E Testing** | [backend/docs/E2E_TESTING.md](./backend/docs/E2E_TESTING.md) |
|
||||
| **Demo Mode** | [frontend/docs/DEMO_MODE.md](./frontend/docs/DEMO_MODE.md) |
|
||||
|
||||
### Essential Rules Summary
|
||||
|
||||
1. **Issue-Driven Development**: Every piece of work MUST have an issue first
|
||||
2. **Branch per Feature**: `feature/<issue-number>-<description>`, single branch for design+implementation
|
||||
3. **Testing Required**: All code must be tested, aim for >90% coverage
|
||||
4. **Code Review**: Must pass multi-agent review before merge
|
||||
5. **No Direct Commits**: Never commit directly to `main` or `dev`
|
||||
6. **Stack Verification**: ALWAYS run the full stack before considering work done (see below)
|
||||
|
||||
### CRITICAL: Stack Verification Before Merge
|
||||
|
||||
**This is NON-NEGOTIABLE. A feature with 100% test coverage that crashes on startup is WORTHLESS.**
|
||||
|
||||
Before considering ANY issue complete:
|
||||
|
||||
```bash
|
||||
# 1. Start the dev stack
|
||||
make dev
|
||||
|
||||
# 2. Wait for backend to be healthy, check logs
|
||||
docker compose -f docker-compose.dev.yml logs backend --tail=100
|
||||
|
||||
# 3. Start frontend
|
||||
cd frontend && npm run dev
|
||||
|
||||
# 4. Verify both are running without errors
|
||||
```
|
||||
|
||||
**The issue is NOT done if:**
|
||||
- Backend crashes on startup (import errors, missing dependencies)
|
||||
- Frontend fails to compile or render
|
||||
- Health checks fail
|
||||
- Any error appears in logs
|
||||
|
||||
**Why this matters:**
|
||||
- Tests run in isolation and may pass despite broken imports
|
||||
- Docker builds cache layers and may hide dependency issues
|
||||
- A single `ModuleNotFoundError` renders all test coverage meaningless
|
||||
|
||||
### Common Commands
|
||||
|
||||
```bash
|
||||
# Backend
|
||||
IS_TEST=True uv run pytest # Run tests
|
||||
uv run ruff check src/ # Lint
|
||||
uv run mypy src/ # Type check
|
||||
python migrate.py auto "message" # Database migration
|
||||
|
||||
# Frontend
|
||||
npm test # Unit tests
|
||||
npm run lint # Lint
|
||||
npm run type-check # Type check
|
||||
npm run generate:api # Regenerate API client
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Claude Code-Specific Guidance
|
||||
|
||||
### Critical User Preferences
|
||||
|
||||
#### File Operations - NEVER Use Heredoc/Cat Append
|
||||
**ALWAYS use Read/Write/Edit tools instead of `cat >> file << EOF` commands.**
|
||||
**File Operations:**
|
||||
- ALWAYS use Read/Write/Edit tools instead of `cat >> file << EOF`
|
||||
- Never use heredoc - it triggers manual approval dialogs
|
||||
|
||||
This triggers manual approval dialogs and disrupts workflow.
|
||||
|
||||
```bash
|
||||
# WRONG ❌
|
||||
cat >> file.txt << EOF
|
||||
content
|
||||
EOF
|
||||
|
||||
# CORRECT ✅ - Use Read, then Write tools
|
||||
```
|
||||
|
||||
#### Work Style
|
||||
**Work Style:**
|
||||
- User prefers autonomous operation without frequent interruptions
|
||||
- Ask for batch permissions upfront for long work sessions
|
||||
- Work independently, document decisions clearly
|
||||
- Only use emojis if the user explicitly requests it
|
||||
|
||||
### When Working with This Stack
|
||||
|
||||
**Dependency Management:**
|
||||
- Backend uses **uv** (modern Python package manager), not pip
|
||||
- Always use `uv run` prefix: `IS_TEST=True uv run pytest`
|
||||
- Or use Makefile commands: `make test`, `make install-dev`
|
||||
- Add dependencies: `uv add <package>` or `uv add --dev <package>`
|
||||
|
||||
**Database Migrations:**
|
||||
- Use the `migrate.py` helper script, not Alembic directly
|
||||
- Generate + apply: `python migrate.py auto "message"`
|
||||
- Never commit migrations without testing them first
|
||||
- Check current state: `python migrate.py current`
|
||||
|
||||
**Frontend API Client Generation:**
|
||||
- Run `bun run generate:api` after backend schema changes
|
||||
- Client is auto-generated from OpenAPI spec
|
||||
- Located in `frontend/src/lib/api/generated/`
|
||||
- NEVER manually edit generated files
|
||||
|
||||
**Testing Commands:**
|
||||
- Backend unit/integration: `IS_TEST=True uv run pytest` (always prefix with `IS_TEST=True`)
|
||||
- Backend E2E (requires Docker): `make test-e2e`
|
||||
- Frontend unit: `bun run test`
|
||||
- Frontend E2E: `bun run test:e2e`
|
||||
- Use `make test` or `make test-cov` in backend for convenience
|
||||
|
||||
**Security & Quality Commands (Backend):**
|
||||
- `make validate` — lint + format + type checks
|
||||
- `make audit` — dependency vulnerabilities + license compliance
|
||||
- `make validate-all` — quality + security checks
|
||||
- `make check` — **full pipeline**: quality + security + tests
|
||||
|
||||
**Backend E2E Testing (requires Docker):**
|
||||
- Install deps: `make install-e2e`
|
||||
- Run all E2E tests: `make test-e2e`
|
||||
- Run schema tests only: `make test-e2e-schema`
|
||||
- Run all tests: `make test-all` (unit + E2E)
|
||||
- Uses Testcontainers (real PostgreSQL) + Schemathesis (OpenAPI contract testing)
|
||||
- Markers: `@pytest.mark.e2e`, `@pytest.mark.postgres`, `@pytest.mark.schemathesis`
|
||||
- See: `backend/docs/E2E_TESTING.md` for complete guide
|
||||
|
||||
### 🔴 CRITICAL: Auth Store Dependency Injection Pattern
|
||||
### Critical Pattern: Auth Store DI
|
||||
|
||||
**ALWAYS use `useAuth()` from `AuthContext`, NEVER import `useAuthStore` directly!**
|
||||
|
||||
```typescript
|
||||
// ❌ WRONG - Bypasses dependency injection
|
||||
// ❌ WRONG
|
||||
import { useAuthStore } from '@/lib/stores/authStore';
|
||||
const { user, isAuthenticated } = useAuthStore();
|
||||
|
||||
// ✅ CORRECT - Uses dependency injection
|
||||
// ✅ CORRECT
|
||||
import { useAuth } from '@/lib/auth/AuthContext';
|
||||
const { user, isAuthenticated } = useAuth();
|
||||
```
|
||||
|
||||
**Why This Matters:**
|
||||
- E2E tests inject mock stores via `window.__TEST_AUTH_STORE__`
|
||||
- Unit tests inject via `<AuthProvider store={mockStore}>`
|
||||
- Direct `useAuthStore` imports bypass this injection → **tests fail**
|
||||
- ESLint will catch violations (added Nov 2025)
|
||||
|
||||
**Exceptions:**
|
||||
1. `AuthContext.tsx` - DI boundary, legitimately needs real store
|
||||
2. `client.ts` - Non-React context, uses dynamic import + `__TEST_AUTH_STORE__` check
|
||||
|
||||
### E2E Test Best Practices
|
||||
|
||||
When writing or fixing Playwright tests:
|
||||
|
||||
**Navigation Pattern:**
|
||||
```typescript
|
||||
// ✅ CORRECT - Use Promise.all for Next.js Link clicks
|
||||
await Promise.all([
|
||||
page.waitForURL('/target', { timeout: 10000 }),
|
||||
link.click()
|
||||
]);
|
||||
```
|
||||
|
||||
**Selectors:**
|
||||
- Use ID-based selectors for validation errors: `#email-error`
|
||||
- Error IDs use dashes not underscores: `#new-password-error`
|
||||
- Target `.border-destructive[role="alert"]` to avoid Next.js route announcer conflicts
|
||||
- Avoid generic `[role="alert"]` which matches multiple elements
|
||||
|
||||
**URL Assertions:**
|
||||
```typescript
|
||||
// ✅ Use regex to handle query params
|
||||
await expect(page).toHaveURL(/\/auth\/login/);
|
||||
|
||||
// ❌ Don't use exact strings (fails with query params)
|
||||
await expect(page).toHaveURL('/auth/login');
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
- Uses 12 workers in non-CI mode (`playwright.config.ts`)
|
||||
- Reduces to 2 workers in CI for stability
|
||||
- Tests are designed to be non-flaky with proper waits
|
||||
|
||||
### Important Implementation Details
|
||||
|
||||
**Authentication Testing:**
|
||||
- Backend fixtures in `tests/conftest.py`:
|
||||
- `async_test_db`: Fresh SQLite per test
|
||||
- `async_test_user` / `async_test_superuser`: Pre-created users
|
||||
- `user_token` / `superuser_token`: Access tokens for API calls
|
||||
- Always use `@pytest.mark.asyncio` for async tests
|
||||
- Use `@pytest_asyncio.fixture` for async fixtures
|
||||
|
||||
**Database Testing:**
|
||||
```python
|
||||
# Mock database exceptions correctly
|
||||
from unittest.mock import patch, AsyncMock
|
||||
|
||||
async def mock_commit():
|
||||
raise OperationalError("Connection lost", {}, Exception())
|
||||
|
||||
with patch.object(session, 'commit', side_effect=mock_commit):
|
||||
with patch.object(session, 'rollback', new_callable=AsyncMock) as mock_rollback:
|
||||
with pytest.raises(OperationalError):
|
||||
await repo_method(session, obj_in=data)
|
||||
mock_rollback.assert_called_once()
|
||||
```
|
||||
|
||||
**Frontend Component Development:**
|
||||
- Follow design system docs in `frontend/docs/design-system/`
|
||||
- Read `08-ai-guidelines.md` for AI code generation rules
|
||||
- Use parent-controlled spacing (see `04-spacing-philosophy.md`)
|
||||
- WCAG AA compliance required (see `07-accessibility.md`)
|
||||
|
||||
**Security Considerations:**
|
||||
- Backend has comprehensive security tests (JWT attacks, session hijacking)
|
||||
- Never skip security headers in production
|
||||
- Rate limiting is configured in route decorators: `@limiter.limit("10/minute")`
|
||||
- Session revocation is database-backed, not just JWT expiry
|
||||
- Run `make audit` to check for dependency vulnerabilities and license compliance
|
||||
- Run `make check` for the full pipeline: quality + security + tests
|
||||
- Pre-commit hooks enforce Ruff lint/format and detect-secrets on every commit
|
||||
- Setup hooks: `cd backend && uv run pre-commit install`
|
||||
|
||||
### Common Workflows Guidance
|
||||
|
||||
**When Adding a New Feature:**
|
||||
1. Start with backend schema and repository
|
||||
2. Implement API route with proper authorization
|
||||
3. Write backend tests (aim for >90% coverage)
|
||||
4. Generate frontend API client: `bun run generate:api`
|
||||
5. Implement frontend components
|
||||
6. Write frontend unit tests
|
||||
7. Add E2E tests for critical flows
|
||||
8. Update relevant documentation
|
||||
|
||||
**When Fixing Tests:**
|
||||
- Backend: Check test database isolation and async fixture usage
|
||||
- Frontend unit: Verify mocking of `useAuth()` not `useAuthStore`
|
||||
- E2E: Use `Promise.all()` pattern and regex URL assertions
|
||||
|
||||
**When Debugging:**
|
||||
- Backend: Check `IS_TEST=True` environment variable is set
|
||||
- Frontend: Run `bun run type-check` first
|
||||
- E2E: Use `bun run test:e2e:debug` for step-by-step debugging
|
||||
- Check logs: Backend has detailed error logging
|
||||
|
||||
**Demo Mode (Frontend-Only Showcase):**
|
||||
- Enable: `echo "NEXT_PUBLIC_DEMO_MODE=true" > frontend/.env.local`
|
||||
- Uses MSW (Mock Service Worker) to intercept API calls in browser
|
||||
- Zero backend required - perfect for Vercel deployments
|
||||
- **Fully Automated**: MSW handlers auto-generated from OpenAPI spec
|
||||
- Run `bun run generate:api` → updates both API client AND MSW handlers
|
||||
- No manual synchronization needed!
|
||||
- Demo credentials (any password ≥8 chars works):
|
||||
- User: `demo@example.com` / `DemoPass123`
|
||||
- Admin: `admin@example.com` / `AdminPass123`
|
||||
- **Safe**: MSW never runs during tests (Jest or Playwright)
|
||||
- **Coverage**: Mock files excluded from linting and coverage
|
||||
- **Documentation**: `frontend/docs/DEMO_MODE.md` for complete guide
|
||||
See [CODING_STANDARDS.md](./docs/development/CODING_STANDARDS.md#auth-store-dependency-injection) for details.
|
||||
|
||||
### Tool Usage Preferences
|
||||
|
||||
**Prefer specialized tools over bash:**
|
||||
- Use Read/Write/Edit tools for file operations
|
||||
- Never use `cat`, `echo >`, or heredoc for file manipulation
|
||||
- Use Task tool with `subagent_type=Explore` for codebase exploration
|
||||
- Use Grep tool for code search, not bash `grep`
|
||||
|
||||
**When to use parallel tool calls:**
|
||||
- Independent git commands: `git status`, `git diff`, `git log`
|
||||
**Parallel tool calls for:**
|
||||
- Independent git commands
|
||||
- Reading multiple unrelated files
|
||||
- Running multiple test suites simultaneously
|
||||
- Running multiple test suites
|
||||
- Independent validation steps
|
||||
|
||||
## Custom Skills
|
||||
---
|
||||
|
||||
No Claude Code Skills installed yet. To create one, invoke the built-in "skill-creator" skill.
|
||||
## Key Extensions (from PragmaStack base)
|
||||
|
||||
**Potential skill ideas for this project:**
|
||||
- API endpoint generator workflow (schema → repository → route → tests → frontend client)
|
||||
- Component generator with design system compliance
|
||||
- Database migration troubleshooting helper
|
||||
- Test coverage analyzer and improvement suggester
|
||||
- E2E test generator for new features
|
||||
- Celery + Redis for agent job queue
|
||||
- WebSocket/SSE for real-time updates
|
||||
- pgvector for RAG knowledge base
|
||||
- MCP server integration layer
|
||||
|
||||
---
|
||||
|
||||
## Additional Resources
|
||||
|
||||
**Comprehensive Documentation:**
|
||||
**Documentation:**
|
||||
- [AGENTS.md](./AGENTS.md) - Framework-agnostic AI assistant context
|
||||
- [README.md](./README.md) - User-facing project overview
|
||||
- `backend/docs/` - Backend architecture, coding standards, common pitfalls
|
||||
- `frontend/docs/design-system/` - Complete design system guide
|
||||
- [docs/development/](./docs/development/) - Development workflow and standards
|
||||
- [backend/docs/](./backend/docs/) - Backend architecture and guides
|
||||
- [frontend/docs/design-system/](./frontend/docs/design-system/) - Complete design system
|
||||
|
||||
**API Documentation (when running):**
|
||||
- Swagger UI: http://localhost:8000/docs
|
||||
- ReDoc: http://localhost:8000/redoc
|
||||
- OpenAPI JSON: http://localhost:8000/api/v1/openapi.json
|
||||
|
||||
**Testing Documentation:**
|
||||
- Backend tests: `backend/tests/` (97% coverage)
|
||||
- Frontend E2E: `frontend/e2e/README.md`
|
||||
- Design system: `frontend/docs/design-system/08-ai-guidelines.md`
|
||||
|
||||
---
|
||||
|
||||
**For project architecture, development commands, and general context, see [AGENTS.md](./AGENTS.md).**
|
||||
|
||||
@@ -91,10 +91,7 @@ Ready to write some code? Awesome!
|
||||
cd backend
|
||||
|
||||
# Install dependencies (uv manages virtual environment automatically)
|
||||
make install-dev
|
||||
|
||||
# Setup pre-commit hooks
|
||||
uv run pre-commit install
|
||||
uv sync
|
||||
|
||||
# Setup environment
|
||||
cp .env.example .env
|
||||
@@ -103,14 +100,8 @@ cp .env.example .env
|
||||
# Run migrations
|
||||
python migrate.py apply
|
||||
|
||||
# Run quality + security checks
|
||||
make validate-all
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run full pipeline (quality + security + tests)
|
||||
make check
|
||||
IS_TEST=True uv run pytest
|
||||
|
||||
# Start dev server
|
||||
uvicorn app.main:app --reload
|
||||
@@ -122,20 +113,20 @@ uvicorn app.main:app --reload
|
||||
cd frontend
|
||||
|
||||
# Install dependencies
|
||||
bun install
|
||||
npm install
|
||||
|
||||
# Setup environment
|
||||
cp .env.local.example .env.local
|
||||
|
||||
# Generate API client
|
||||
bun run generate:api
|
||||
npm run generate:api
|
||||
|
||||
# Run tests
|
||||
bun run test
|
||||
bun run test:e2e:ui
|
||||
npm test
|
||||
npm run test:e2e:ui
|
||||
|
||||
# Start dev server
|
||||
bun run dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
---
|
||||
@@ -204,7 +195,7 @@ export function UserProfile({ userId }: UserProfileProps) {
|
||||
|
||||
### Key Patterns
|
||||
|
||||
- **Backend**: Use repository pattern, keep routes thin, business logic in services
|
||||
- **Backend**: Use CRUD pattern, keep routes thin, business logic in services
|
||||
- **Frontend**: Use React Query for server state, Zustand for client state
|
||||
- **Both**: Handle errors gracefully, log appropriately, write tests
|
||||
|
||||
@@ -325,7 +316,7 @@ Fixed stuff
|
||||
### Before Submitting
|
||||
|
||||
- [ ] Code follows project style guidelines
|
||||
- [ ] `make check` passes (quality + security + tests) in backend
|
||||
- [ ] All tests pass locally
|
||||
- [ ] New tests added for new features
|
||||
- [ ] Documentation updated if needed
|
||||
- [ ] No merge conflicts with `main`
|
||||
|
||||
139
Makefile
139
Makefile
@@ -1,18 +1,34 @@
|
||||
.PHONY: help dev dev-full prod down logs logs-dev clean clean-slate drop-db reset-db push-images deploy scan-images
|
||||
.PHONY: help dev dev-full prod down logs logs-dev clean clean-slate drop-db reset-db push-images deploy
|
||||
.PHONY: test test-backend test-mcp test-frontend test-all test-cov test-integration validate validate-all format-all
|
||||
|
||||
VERSION ?= latest
|
||||
REGISTRY ?= ghcr.io/cardosofelipe/pragma-stack
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@echo "FastAPI + Next.js Full-Stack Template"
|
||||
@echo "Syndarix - AI-Powered Software Consulting Agency"
|
||||
@echo ""
|
||||
@echo "Development:"
|
||||
@echo " make dev - Start backend + db (frontend runs separately)"
|
||||
@echo " make dev - Start backend + db + MCP servers (frontend runs separately)"
|
||||
@echo " make dev-full - Start all services including frontend"
|
||||
@echo " make down - Stop all services"
|
||||
@echo " make logs-dev - Follow dev container logs"
|
||||
@echo ""
|
||||
@echo "Testing:"
|
||||
@echo " make test - Run all tests (backend + MCP servers)"
|
||||
@echo " make test-backend - Run backend tests only"
|
||||
@echo " make test-mcp - Run MCP server tests only"
|
||||
@echo " make test-frontend - Run frontend tests only"
|
||||
@echo " make test-cov - Run all tests with coverage reports"
|
||||
@echo " make test-integration - Run MCP integration tests (requires running stack)"
|
||||
@echo ""
|
||||
@echo "Formatting:"
|
||||
@echo " make format-all - Format code in backend + MCP servers + frontend"
|
||||
@echo ""
|
||||
@echo "Validation:"
|
||||
@echo " make validate - Validate backend + MCP servers (lint, type-check, test)"
|
||||
@echo " make validate-all - Validate everything including frontend"
|
||||
@echo ""
|
||||
@echo "Database:"
|
||||
@echo " make drop-db - Drop and recreate empty database"
|
||||
@echo " make reset-db - Drop database and apply all migrations"
|
||||
@@ -21,7 +37,6 @@ help:
|
||||
@echo " make prod - Start production stack"
|
||||
@echo " make deploy - Pull and deploy latest images"
|
||||
@echo " make push-images - Build and push images to registry"
|
||||
@echo " make scan-images - Scan production images for CVEs (requires trivy)"
|
||||
@echo " make logs - Follow production container logs"
|
||||
@echo ""
|
||||
@echo "Cleanup:"
|
||||
@@ -29,8 +44,10 @@ help:
|
||||
@echo " make clean-slate - Stop containers AND delete volumes (DATA LOSS!)"
|
||||
@echo ""
|
||||
@echo "Subdirectory commands:"
|
||||
@echo " cd backend && make help - Backend-specific commands"
|
||||
@echo " cd frontend && npm run - Frontend-specific commands"
|
||||
@echo " cd backend && make help - Backend-specific commands"
|
||||
@echo " cd mcp-servers/llm-gateway && make - LLM Gateway commands"
|
||||
@echo " cd mcp-servers/knowledge-base && make - Knowledge Base commands"
|
||||
@echo " cd frontend && npm run - Frontend-specific commands"
|
||||
|
||||
# ============================================================================
|
||||
# Development
|
||||
@@ -90,28 +107,6 @@ push-images:
|
||||
docker push $(REGISTRY)/backend:$(VERSION)
|
||||
docker push $(REGISTRY)/frontend:$(VERSION)
|
||||
|
||||
scan-images:
|
||||
@docker info > /dev/null 2>&1 || (echo "❌ Docker is not running!"; exit 1)
|
||||
@echo "🐳 Building and scanning production images for CVEs..."
|
||||
docker build -t $(REGISTRY)/backend:scan --target production ./backend
|
||||
docker build -t $(REGISTRY)/frontend:scan --target runner ./frontend
|
||||
@echo ""
|
||||
@echo "=== Backend Image Scan ==="
|
||||
@if command -v trivy > /dev/null 2>&1; then \
|
||||
trivy image --severity HIGH,CRITICAL --exit-code 1 $(REGISTRY)/backend:scan; \
|
||||
else \
|
||||
echo "ℹ️ Trivy not found locally, using Docker to run Trivy..."; \
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image --severity HIGH,CRITICAL --exit-code 1 $(REGISTRY)/backend:scan; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "=== Frontend Image Scan ==="
|
||||
@if command -v trivy > /dev/null 2>&1; then \
|
||||
trivy image --severity HIGH,CRITICAL --exit-code 1 $(REGISTRY)/frontend:scan; \
|
||||
else \
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image --severity HIGH,CRITICAL --exit-code 1 $(REGISTRY)/frontend:scan; \
|
||||
fi
|
||||
@echo "✅ No HIGH/CRITICAL CVEs found in production images!"
|
||||
|
||||
# ============================================================================
|
||||
# Cleanup
|
||||
# ============================================================================
|
||||
@@ -122,3 +117,91 @@ clean:
|
||||
# WARNING! THIS REMOVES CONTAINERS AND VOLUMES AS WELL - DO NOT USE THIS UNLESS YOU WANT TO START OVER WITH DATA AND ALL
|
||||
clean-slate:
|
||||
docker compose -f docker-compose.dev.yml down -v --remove-orphans
|
||||
|
||||
# ============================================================================
|
||||
# Testing
|
||||
# ============================================================================
|
||||
|
||||
test: test-backend test-mcp
|
||||
@echo ""
|
||||
@echo "All tests passed!"
|
||||
|
||||
test-backend:
|
||||
@echo "Running backend tests..."
|
||||
@cd backend && IS_TEST=True uv run pytest tests/ -v
|
||||
|
||||
test-mcp:
|
||||
@echo "Running MCP server tests..."
|
||||
@echo ""
|
||||
@echo "=== LLM Gateway ==="
|
||||
@cd mcp-servers/llm-gateway && uv run pytest tests/ -v
|
||||
@echo ""
|
||||
@echo "=== Knowledge Base ==="
|
||||
@cd mcp-servers/knowledge-base && uv run pytest tests/ -v
|
||||
|
||||
test-frontend:
|
||||
@echo "Running frontend tests..."
|
||||
@cd frontend && npm test
|
||||
|
||||
test-all: test test-frontend
|
||||
@echo ""
|
||||
@echo "All tests (backend + MCP + frontend) passed!"
|
||||
|
||||
test-cov:
|
||||
@echo "Running all tests with coverage..."
|
||||
@echo ""
|
||||
@echo "=== Backend Coverage ==="
|
||||
@cd backend && IS_TEST=True uv run pytest tests/ -v --cov=app --cov-report=term-missing
|
||||
@echo ""
|
||||
@echo "=== LLM Gateway Coverage ==="
|
||||
@cd mcp-servers/llm-gateway && uv run pytest tests/ -v --cov=. --cov-report=term-missing
|
||||
@echo ""
|
||||
@echo "=== Knowledge Base Coverage ==="
|
||||
@cd mcp-servers/knowledge-base && uv run pytest tests/ -v --cov=. --cov-report=term-missing
|
||||
|
||||
test-integration:
|
||||
@echo "Running MCP integration tests..."
|
||||
@echo "Note: Requires running stack (make dev first)"
|
||||
@cd backend && RUN_INTEGRATION_TESTS=true IS_TEST=True uv run pytest tests/integration/ -v
|
||||
|
||||
# ============================================================================
|
||||
# Formatting
|
||||
# ============================================================================
|
||||
|
||||
format-all:
|
||||
@echo "Formatting backend..."
|
||||
@cd backend && make format
|
||||
@echo ""
|
||||
@echo "Formatting LLM Gateway..."
|
||||
@cd mcp-servers/llm-gateway && make format
|
||||
@echo ""
|
||||
@echo "Formatting Knowledge Base..."
|
||||
@cd mcp-servers/knowledge-base && make format
|
||||
@echo ""
|
||||
@echo "Formatting frontend..."
|
||||
@cd frontend && npm run format
|
||||
@echo ""
|
||||
@echo "All code formatted!"
|
||||
|
||||
# ============================================================================
|
||||
# Validation (lint + type-check + test)
|
||||
# ============================================================================
|
||||
|
||||
validate:
|
||||
@echo "Validating backend..."
|
||||
@cd backend && make validate
|
||||
@echo ""
|
||||
@echo "Validating LLM Gateway..."
|
||||
@cd mcp-servers/llm-gateway && make validate
|
||||
@echo ""
|
||||
@echo "Validating Knowledge Base..."
|
||||
@cd mcp-servers/knowledge-base && make validate
|
||||
@echo ""
|
||||
@echo "All validations passed!"
|
||||
|
||||
validate-all: validate
|
||||
@echo ""
|
||||
@echo "Validating frontend..."
|
||||
@cd frontend && npm run validate
|
||||
@echo ""
|
||||
@echo "Full validation passed!"
|
||||
|
||||
724
README.md
724
README.md
@@ -1,659 +1,175 @@
|
||||
# <img src="frontend/public/logo.svg" alt="PragmaStack" width="32" height="32" style="vertical-align: middle" /> PragmaStack
|
||||
# Syndarix
|
||||
|
||||
> **The Pragmatic Full-Stack Template. Production-ready, security-first, and opinionated.**
|
||||
> **Your AI-Powered Software Consulting Agency**
|
||||
>
|
||||
> An autonomous platform that orchestrates specialized AI agents to deliver complete software solutions with minimal human intervention.
|
||||
|
||||
[](./backend/tests)
|
||||
[](./frontend/tests)
|
||||
[](./frontend/e2e)
|
||||
[](https://gitea.pragmazest.com/cardosofelipe/fast-next-template)
|
||||
[](./LICENSE)
|
||||
[](./CONTRIBUTING.md)
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Why PragmaStack?
|
||||
## Vision
|
||||
|
||||
Building a modern full-stack application often leads to "analysis paralysis" or "boilerplate fatigue". You spend weeks setting up authentication, testing, and linting before writing a single line of business logic.
|
||||
Syndarix transforms the software development lifecycle by providing a **virtual consulting team** of AI agents that collaboratively plan, design, implement, test, and deliver complete software solutions.
|
||||
|
||||
**PragmaStack cuts through the noise.**
|
||||
**The Problem:** Even with AI coding assistants, developers spend as much time managing AI as doing the work themselves. Context switching, babysitting, and knowledge fragmentation limit productivity.
|
||||
|
||||
We provide a **pragmatic**, opinionated foundation that prioritizes:
|
||||
- **Speed**: Ship features, not config files.
|
||||
- **Robustness**: Security and testing are not optional.
|
||||
- **Clarity**: Code that is easy to read and maintain.
|
||||
|
||||
Whether you're building a SaaS, an internal tool, or a side project, PragmaStack gives you a solid starting point without the bloat.
|
||||
**The Solution:** A structured, autonomous agency where specialized AI agents handle different roles (Product Owner, Architect, Engineers, QA, etc.) with proper workflows, reviews, and quality gates.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Features
|
||||
## Key Features
|
||||
|
||||
### 🔐 **Authentication & Security**
|
||||
- JWT-based authentication with access + refresh tokens
|
||||
- **OAuth/Social Login** (Google, GitHub) with PKCE support
|
||||
- **OAuth 2.0 Authorization Server** (MCP-ready) for third-party integrations
|
||||
- Session management with device tracking and revocation
|
||||
- Password reset flow (email integration ready)
|
||||
- Secure password hashing (bcrypt)
|
||||
- CSRF protection, rate limiting, and security headers
|
||||
- Comprehensive security tests (JWT algorithm attacks, session hijacking, privilege escalation)
|
||||
### Multi-Agent Orchestration
|
||||
- Configurable agent **types** with base model, failover, expertise, and personality
|
||||
- Spawn multiple **instances** from the same type (e.g., Dave, Ellis, Kate as Software Developers)
|
||||
- Agent-to-agent communication and collaboration
|
||||
- Per-instance customization with domain-specific knowledge
|
||||
|
||||
### 🔌 **OAuth Provider Mode (MCP Integration)**
|
||||
Full OAuth 2.0 Authorization Server for Model Context Protocol (MCP) and third-party clients:
|
||||
- **RFC 7636**: Authorization Code Flow with PKCE (S256 only)
|
||||
- **RFC 8414**: Server metadata discovery at `/.well-known/oauth-authorization-server`
|
||||
- **RFC 7662**: Token introspection endpoint
|
||||
- **RFC 7009**: Token revocation endpoint
|
||||
- **JWT access tokens**: Self-contained, configurable lifetime
|
||||
- **Opaque refresh tokens**: Secure rotation, database-backed revocation
|
||||
- **Consent management**: Users can review and revoke app permissions
|
||||
- **Client management**: Admin endpoints for registering OAuth clients
|
||||
- **Scopes**: `openid`, `profile`, `email`, `read:users`, `write:users`, `admin`
|
||||
### Complete SDLC Support
|
||||
- **Requirements Discovery** → **Architecture Spike** → **Implementation Planning**
|
||||
- **Sprint Management** with automated ceremonies
|
||||
- **Issue Tracking** with Epic/Story/Task hierarchy
|
||||
- **Git Integration** with proper branch/PR workflows
|
||||
- **CI/CD Pipelines** with automated testing
|
||||
|
||||
### 👥 **Multi-Tenancy & Organizations**
|
||||
- Full organization system with role-based access control (Owner, Admin, Member)
|
||||
- Invite/remove members, manage permissions
|
||||
- Organization-scoped data access
|
||||
- User can belong to multiple organizations
|
||||
### Configurable Autonomy
|
||||
- From `FULL_CONTROL` (approve everything) to `AUTONOMOUS` (only major milestones)
|
||||
- Client can intervene at any point
|
||||
- Transparent progress visibility
|
||||
|
||||
### 🛠️ **Admin Panel**
|
||||
- Complete user management (full lifecycle, activate/deactivate, bulk operations)
|
||||
- Organization management (create, edit, delete, member management)
|
||||
- Session monitoring across all users
|
||||
- Real-time statistics dashboard
|
||||
- Admin-only routes with proper authorization
|
||||
### MCP-First Architecture
|
||||
- All integrations via **Model Context Protocol (MCP)** servers
|
||||
- Unified Knowledge Base with project/agent scoping
|
||||
- Git providers (Gitea, GitHub, GitLab) via MCP
|
||||
- Extensible through custom MCP tools
|
||||
|
||||
### 🎨 **Modern Frontend**
|
||||
- Next.js 16 with App Router and React 19
|
||||
- **PragmaStack Design System** built on shadcn/ui + TailwindCSS
|
||||
- Pre-configured theme with dark mode support (coming soon)
|
||||
- Responsive, accessible components (WCAG AA compliant)
|
||||
- Rich marketing landing page with animated components
|
||||
- Live component showcase and documentation at `/dev`
|
||||
|
||||
### 🌍 **Internationalization (i18n)**
|
||||
- Built-in multi-language support with next-intl v4
|
||||
- Locale-based routing (`/en/*`, `/it/*`)
|
||||
- Seamless language switching with LocaleSwitcher component
|
||||
- SEO-friendly URLs and metadata per locale
|
||||
- Translation files for English and Italian (easily extensible)
|
||||
- Type-safe translations throughout the app
|
||||
|
||||
### 🎯 **Content & UX Features**
|
||||
- **Toast notifications** with Sonner for elegant user feedback
|
||||
- **Smooth animations** powered by Framer Motion
|
||||
- **Markdown rendering** with syntax highlighting (GitHub Flavored Markdown)
|
||||
- **Charts and visualizations** ready with Recharts
|
||||
- **SEO optimization** with dynamic sitemap and robots.txt generation
|
||||
- **Session tracking UI** with device information and revocation controls
|
||||
|
||||
### 🧪 **Comprehensive Testing**
|
||||
- **Backend Testing**: ~97% unit test coverage
|
||||
- Unit, integration, and security tests
|
||||
- Async database testing with SQLAlchemy
|
||||
- API endpoint testing with fixtures
|
||||
- Security vulnerability tests (JWT attacks, session hijacking, privilege escalation)
|
||||
- **Frontend Unit Tests**: ~97% coverage with Jest
|
||||
- Component testing
|
||||
- Hook testing
|
||||
- Utility function testing
|
||||
- **End-to-End Tests**: Playwright with zero flaky tests
|
||||
- Complete user flows (auth, navigation, settings)
|
||||
- Parallel execution for speed
|
||||
- Visual regression testing ready
|
||||
|
||||
### 📚 **Developer Experience**
|
||||
- Auto-generated TypeScript API client from OpenAPI spec
|
||||
- Interactive API documentation (Swagger + ReDoc)
|
||||
- Database migrations with Alembic helper script
|
||||
- Hot reload in development for both frontend and backend
|
||||
- Comprehensive code documentation and design system docs
|
||||
- Live component playground at `/dev` with code examples
|
||||
- Docker support for easy deployment
|
||||
- VSCode workspace settings included
|
||||
|
||||
### 📊 **Ready for Production**
|
||||
- Docker + docker-compose setup
|
||||
- Environment-based configuration
|
||||
- Database connection pooling
|
||||
- Error handling and logging
|
||||
- Health check endpoints
|
||||
- Production security headers
|
||||
- Rate limiting on sensitive endpoints
|
||||
- SEO optimization with dynamic sitemaps and robots.txt
|
||||
- Multi-language SEO with locale-specific metadata
|
||||
- Performance monitoring and bundle analysis
|
||||
### Project Complexity Wizard
|
||||
- **Script** → Minimal process, no repo needed
|
||||
- **Simple** → Single sprint, basic backlog
|
||||
- **Medium/Complex** → Full AGILE workflow with multiple sprints
|
||||
|
||||
---
|
||||
|
||||
## 📸 Screenshots
|
||||
## Technology Stack
|
||||
|
||||
<details>
|
||||
<summary>Click to view screenshots</summary>
|
||||
Built on [PragmaStack](https://gitea.pragmazest.com/cardosofelipe/fast-next-template):
|
||||
|
||||
### Landing Page
|
||||

|
||||
| Component | Technology |
|
||||
|-----------|------------|
|
||||
| Backend | FastAPI 0.115+ (Python 3.11+) |
|
||||
| Frontend | Next.js 16 (React 19) |
|
||||
| Database | PostgreSQL 15+ with pgvector |
|
||||
| ORM | SQLAlchemy 2.0 |
|
||||
| State Management | Zustand + TanStack Query |
|
||||
| UI | shadcn/ui + Tailwind 4 |
|
||||
| Auth | JWT dual-token + OAuth 2.0 |
|
||||
| Testing | pytest + Jest + Playwright |
|
||||
|
||||
|
||||
|
||||
### Authentication
|
||||

|
||||
|
||||
|
||||
|
||||
### Admin Dashboard
|
||||

|
||||
|
||||
|
||||
|
||||
### Design System
|
||||

|
||||
|
||||
</details>
|
||||
### Syndarix Extensions
|
||||
| Component | Technology |
|
||||
|-----------|------------|
|
||||
| Task Queue | Celery + Redis |
|
||||
| Real-time | FastAPI WebSocket / SSE |
|
||||
| Vector DB | pgvector (PostgreSQL extension) |
|
||||
| MCP SDK | Anthropic MCP SDK |
|
||||
|
||||
---
|
||||
|
||||
## 🎭 Demo Mode
|
||||
## Project Status
|
||||
|
||||
**Try the frontend without a backend!** Perfect for:
|
||||
- **Free deployment** on Vercel (no backend costs)
|
||||
- **Portfolio showcasing** with live demos
|
||||
- **Client presentations** without infrastructure setup
|
||||
**Phase:** Architecture & Planning
|
||||
|
||||
See [docs/requirements/](./docs/requirements/) for the comprehensive requirements document.
|
||||
|
||||
### Current Milestones
|
||||
- [x] Fork PragmaStack as foundation
|
||||
- [x] Create requirements document
|
||||
- [ ] Execute architecture spikes
|
||||
- [ ] Create ADRs for key decisions
|
||||
- [ ] Begin MVP implementation
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Requirements Document](./docs/requirements/SYNDARIX_REQUIREMENTS.md)
|
||||
- [Architecture Decisions](./docs/adrs/) (coming soon)
|
||||
- [Spike Research](./docs/spikes/) (coming soon)
|
||||
- [Architecture Overview](./docs/architecture/) (coming soon)
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
- Docker & Docker Compose
|
||||
- Node.js 20+
|
||||
- Python 3.11+
|
||||
- PostgreSQL 15+ (or use Docker)
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
echo "NEXT_PUBLIC_DEMO_MODE=true" > .env.local
|
||||
bun run dev
|
||||
```
|
||||
|
||||
**Demo Credentials:**
|
||||
- Regular user: `demo@example.com` / `DemoPass123`
|
||||
- Admin user: `admin@example.com` / `AdminPass123`
|
||||
|
||||
Demo mode uses [Mock Service Worker (MSW)](https://mswjs.io/) to intercept API calls in the browser. Your code remains unchanged - the same components work with both real and mocked backends.
|
||||
|
||||
**Key Features:**
|
||||
- ✅ Zero backend required
|
||||
- ✅ All features functional (auth, admin, stats)
|
||||
- ✅ Realistic network delays and errors
|
||||
- ✅ Does NOT interfere with tests (97%+ coverage maintained)
|
||||
- ✅ One-line toggle: `NEXT_PUBLIC_DEMO_MODE=true`
|
||||
|
||||
📖 **[Complete Demo Mode Documentation](./frontend/docs/DEMO_MODE.md)**
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Tech Stack
|
||||
|
||||
### Backend
|
||||
- **[FastAPI](https://fastapi.tiangolo.com/)** - Modern async Python web framework
|
||||
- **[SQLAlchemy 2.0](https://www.sqlalchemy.org/)** - Powerful ORM with async support
|
||||
- **[PostgreSQL](https://www.postgresql.org/)** - Robust relational database
|
||||
- **[Alembic](https://alembic.sqlalchemy.org/)** - Database migrations
|
||||
- **[Pydantic v2](https://docs.pydantic.dev/)** - Data validation with type hints
|
||||
- **[pytest](https://pytest.org/)** - Testing framework with async support
|
||||
|
||||
### Frontend
|
||||
- **[Next.js 16](https://nextjs.org/)** - React framework with App Router
|
||||
- **[React 19](https://react.dev/)** - UI library
|
||||
- **[TypeScript](https://www.typescriptlang.org/)** - Type-safe JavaScript
|
||||
- **[TailwindCSS](https://tailwindcss.com/)** - Utility-first CSS framework
|
||||
- **[shadcn/ui](https://ui.shadcn.com/)** - Beautiful, accessible component library
|
||||
- **[next-intl](https://next-intl.dev/)** - Internationalization (i18n) with type safety
|
||||
- **[TanStack Query](https://tanstack.com/query)** - Powerful data fetching/caching
|
||||
- **[Zustand](https://zustand-demo.pmnd.rs/)** - Lightweight state management
|
||||
- **[Framer Motion](https://www.framer.com/motion/)** - Production-ready animation library
|
||||
- **[Sonner](https://sonner.emilkowal.ski/)** - Beautiful toast notifications
|
||||
- **[Recharts](https://recharts.org/)** - Composable charting library
|
||||
- **[React Markdown](https://github.com/remarkjs/react-markdown)** - Markdown rendering with GFM support
|
||||
- **[Playwright](https://playwright.dev/)** - End-to-end testing
|
||||
|
||||
### DevOps
|
||||
- **[Docker](https://www.docker.com/)** - Containerization
|
||||
- **[docker-compose](https://docs.docker.com/compose/)** - Multi-container orchestration
|
||||
- **GitHub Actions** (coming soon) - CI/CD pipelines
|
||||
|
||||
---
|
||||
|
||||
## 📋 Prerequisites
|
||||
|
||||
- **Docker & Docker Compose** (recommended) - [Install Docker](https://docs.docker.com/get-docker/)
|
||||
- **OR manually:**
|
||||
- Python 3.12+
|
||||
- Node.js 18+ (Node 20+ recommended)
|
||||
- PostgreSQL 15+
|
||||
|
||||
---
|
||||
|
||||
## 🏃 Quick Start (Docker)
|
||||
|
||||
The fastest way to get started is with Docker:
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/cardosofelipe/pragma-stack.git
|
||||
cd fast-next-template
|
||||
git clone https://gitea.pragmazest.com/cardosofelipe/syndarix.git
|
||||
cd syndarix
|
||||
|
||||
# Copy environment file
|
||||
# Copy environment template
|
||||
cp .env.template .env
|
||||
|
||||
# Start all services (backend, frontend, database)
|
||||
docker-compose up
|
||||
# Start development environment
|
||||
docker-compose -f docker-compose.dev.yml up -d
|
||||
|
||||
# In another terminal, run database migrations
|
||||
docker-compose exec backend alembic upgrade head
|
||||
# Run database migrations
|
||||
make migrate
|
||||
|
||||
# Create first superuser (optional)
|
||||
docker-compose exec backend python -c "from app.init_db import init_db; import asyncio; asyncio.run(init_db())"
|
||||
```
|
||||
|
||||
**That's it! 🎉**
|
||||
|
||||
- Frontend: http://localhost:3000
|
||||
- Backend API: http://localhost:8000
|
||||
- API Docs: http://localhost:8000/docs
|
||||
|
||||
Default superuser credentials:
|
||||
- Email: `admin@example.com`
|
||||
- Password: `admin123`
|
||||
|
||||
**⚠️ Change these immediately in production!**
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Manual Setup (Development)
|
||||
|
||||
### Backend Setup
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# Create virtual environment
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Setup environment
|
||||
cp .env.example .env
|
||||
# Edit .env with your database credentials
|
||||
|
||||
# Run migrations
|
||||
alembic upgrade head
|
||||
|
||||
# Initialize database with first superuser
|
||||
python -c "from app.init_db import init_db; import asyncio; asyncio.run(init_db())"
|
||||
|
||||
# Start development server
|
||||
uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### Frontend Setup
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
# Install dependencies
|
||||
bun install
|
||||
|
||||
# Setup environment
|
||||
cp .env.local.example .env.local
|
||||
# Edit .env.local with your backend URL
|
||||
|
||||
# Generate API client
|
||||
bun run generate:api
|
||||
|
||||
# Start development server
|
||||
bun run dev
|
||||
```
|
||||
|
||||
Visit http://localhost:3000 to see your app!
|
||||
|
||||
---
|
||||
|
||||
## 📂 Project Structure
|
||||
|
||||
```
|
||||
├── backend/ # FastAPI backend
|
||||
│ ├── app/
|
||||
│ │ ├── api/ # API routes and dependencies
|
||||
│ │ ├── core/ # Core functionality (auth, config, database)
|
||||
│ │ ├── repositories/ # Repository pattern (database operations)
|
||||
│ │ ├── models/ # SQLAlchemy models
|
||||
│ │ ├── schemas/ # Pydantic schemas
|
||||
│ │ ├── services/ # Business logic
|
||||
│ │ └── utils/ # Utilities
|
||||
│ ├── tests/ # Backend tests (97% coverage)
|
||||
│ ├── alembic/ # Database migrations
|
||||
│ └── docs/ # Backend documentation
|
||||
│
|
||||
├── frontend/ # Next.js frontend
|
||||
│ ├── src/
|
||||
│ │ ├── app/ # Next.js App Router pages
|
||||
│ │ ├── components/ # React components
|
||||
│ │ ├── lib/ # Libraries and utilities
|
||||
│ │ │ ├── api/ # API client (auto-generated)
|
||||
│ │ │ └── stores/ # Zustand stores
|
||||
│ │ └── hooks/ # Custom React hooks
|
||||
│ ├── e2e/ # Playwright E2E tests
|
||||
│ ├── tests/ # Unit tests (Jest)
|
||||
│ └── docs/ # Frontend documentation
|
||||
│ └── design-system/ # Comprehensive design system docs
|
||||
│
|
||||
├── docker-compose.yml # Docker orchestration
|
||||
├── docker-compose.dev.yml # Development with hot reload
|
||||
└── README.md # You are here!
|
||||
# Start the development servers
|
||||
make dev
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
## Architecture Overview
|
||||
|
||||
This template takes testing seriously with comprehensive coverage across all layers:
|
||||
|
||||
### Backend Unit & Integration Tests
|
||||
|
||||
**High coverage (~97%)** across all critical paths including security-focused tests.
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# Run all tests
|
||||
IS_TEST=True pytest
|
||||
|
||||
# Run with coverage report
|
||||
IS_TEST=True pytest --cov=app --cov-report=term-missing
|
||||
|
||||
# Run specific test file
|
||||
IS_TEST=True pytest tests/api/test_auth.py -v
|
||||
|
||||
# Generate HTML coverage report
|
||||
IS_TEST=True pytest --cov=app --cov-report=html
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
**Test types:**
|
||||
- **Unit tests**: Repository operations, utilities, business logic
|
||||
- **Integration tests**: API endpoints with database
|
||||
- **Security tests**: JWT algorithm attacks, session hijacking, privilege escalation
|
||||
- **Error handling tests**: Database failures, validation errors
|
||||
|
||||
### Frontend Unit Tests
|
||||
|
||||
**High coverage (~97%)** with Jest and React Testing Library.
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
# Run unit tests
|
||||
bun run test
|
||||
|
||||
# Run with coverage
|
||||
bun run test:coverage
|
||||
|
||||
# Watch mode
|
||||
bun run test:watch
|
||||
```
|
||||
|
||||
**Test types:**
|
||||
- Component rendering and interactions
|
||||
- Custom hooks behavior
|
||||
- State management
|
||||
- Utility functions
|
||||
- API integration mocks
|
||||
|
||||
### End-to-End Tests
|
||||
|
||||
**Zero flaky tests** with Playwright covering complete user journeys.
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
# Run E2E tests
|
||||
bun run test:e2e
|
||||
|
||||
# Run E2E tests in UI mode (recommended for development)
|
||||
bun run test:e2e:ui
|
||||
|
||||
# Run specific test file
|
||||
npx playwright test auth-login.spec.ts
|
||||
|
||||
# Generate test report
|
||||
npx playwright show-report
|
||||
```
|
||||
|
||||
**Test coverage:**
|
||||
- Complete authentication flows
|
||||
- Navigation and routing
|
||||
- Form submissions and validation
|
||||
- Settings and profile management
|
||||
- Session management
|
||||
- Admin panel workflows (in progress)
|
||||
|
||||
---
|
||||
|
||||
## 🤖 AI-Friendly Documentation
|
||||
|
||||
This project includes comprehensive documentation designed for AI coding assistants:
|
||||
|
||||
- **[AGENTS.md](./AGENTS.md)** - Framework-agnostic AI assistant context for PragmaStack
|
||||
- **[CLAUDE.md](./CLAUDE.md)** - Claude Code-specific guidance
|
||||
|
||||
These files provide AI assistants with the **PragmaStack** architecture, patterns, and best practices.
|
||||
|
||||
---
|
||||
|
||||
## 🗄️ Database Migrations
|
||||
|
||||
The template uses Alembic for database migrations:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# Generate migration from model changes
|
||||
python migrate.py generate "description of changes"
|
||||
|
||||
# Apply migrations
|
||||
python migrate.py apply
|
||||
|
||||
# Or do both in one command
|
||||
python migrate.py auto "description"
|
||||
|
||||
# View migration history
|
||||
python migrate.py list
|
||||
|
||||
# Check current revision
|
||||
python migrate.py current
|
||||
+====================================================================+
|
||||
| SYNDARIX CORE |
|
||||
+====================================================================+
|
||||
| +------------------+ +------------------+ +------------------+ |
|
||||
| | Agent Orchestrator| | Project Manager | | Workflow Engine | |
|
||||
| +------------------+ +------------------+ +------------------+ |
|
||||
+====================================================================+
|
||||
|
|
||||
v
|
||||
+====================================================================+
|
||||
| MCP ORCHESTRATION LAYER |
|
||||
| All integrations via unified MCP servers with project scoping |
|
||||
+====================================================================+
|
||||
|
|
||||
+------------------------+------------------------+
|
||||
| | |
|
||||
+----v----+ +----v----+ +----v----+ +----v----+ +----v----+
|
||||
| LLM | | Git | |Knowledge| | File | | Code |
|
||||
| Providers| | MCP | |Base MCP | |Sys. MCP | |Analysis |
|
||||
+---------+ +---------+ +---------+ +---------+ +---------+
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📖 Documentation
|
||||
## Contributing
|
||||
|
||||
### AI Assistant Documentation
|
||||
|
||||
- **[AGENTS.md](./AGENTS.md)** - Framework-agnostic AI coding assistant context
|
||||
- **[CLAUDE.md](./CLAUDE.md)** - Claude Code-specific guidance and preferences
|
||||
|
||||
### Backend Documentation
|
||||
|
||||
- **[ARCHITECTURE.md](./backend/docs/ARCHITECTURE.md)** - System architecture and design patterns
|
||||
- **[CODING_STANDARDS.md](./backend/docs/CODING_STANDARDS.md)** - Code quality standards
|
||||
- **[COMMON_PITFALLS.md](./backend/docs/COMMON_PITFALLS.md)** - Common mistakes to avoid
|
||||
- **[FEATURE_EXAMPLE.md](./backend/docs/FEATURE_EXAMPLE.md)** - Step-by-step feature guide
|
||||
|
||||
### Frontend Documentation
|
||||
|
||||
- **[PragmaStack Design System](./frontend/docs/design-system/)** - Complete design system guide
|
||||
- Quick start, foundations (colors, typography, spacing)
|
||||
- Component library guide
|
||||
- Layout patterns, spacing philosophy
|
||||
- Forms, accessibility, AI guidelines
|
||||
- **[E2E Testing Guide](./frontend/e2e/README.md)** - E2E testing setup and best practices
|
||||
|
||||
### API Documentation
|
||||
|
||||
When the backend is running:
|
||||
- **Swagger UI**: http://localhost:8000/docs
|
||||
- **ReDoc**: http://localhost:8000/redoc
|
||||
- **OpenAPI JSON**: http://localhost:8000/api/v1/openapi.json
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines.
|
||||
|
||||
---
|
||||
|
||||
## 🚢 Deployment
|
||||
## License
|
||||
|
||||
### Docker Production Deployment
|
||||
|
||||
```bash
|
||||
# Build and start all services
|
||||
docker-compose up -d
|
||||
|
||||
# Run migrations
|
||||
docker-compose exec backend alembic upgrade head
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f
|
||||
|
||||
# Stop services
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
### Production Checklist
|
||||
|
||||
- [ ] Change default superuser credentials
|
||||
- [ ] Set strong `SECRET_KEY` in backend `.env`
|
||||
- [ ] Configure production database (PostgreSQL)
|
||||
- [ ] Set `ENVIRONMENT=production` in backend
|
||||
- [ ] Configure CORS origins for your domain
|
||||
- [ ] Setup SSL/TLS certificates
|
||||
- [ ] Configure email service for password resets
|
||||
- [ ] Setup monitoring and logging
|
||||
- [ ] Configure backup strategy
|
||||
- [ ] Review and adjust rate limits
|
||||
- [ ] Test security headers
|
||||
MIT License - see [LICENSE](./LICENSE) for details.
|
||||
|
||||
---
|
||||
|
||||
## 🛣️ Roadmap & Status
|
||||
## Acknowledgments
|
||||
|
||||
### ✅ Completed
|
||||
- [x] Authentication system (JWT, refresh tokens, session management, OAuth)
|
||||
- [x] User management (full lifecycle, profile, password change)
|
||||
- [x] Organization system with RBAC (Owner, Admin, Member)
|
||||
- [x] Admin panel (users, organizations, sessions, statistics)
|
||||
- [x] **Internationalization (i18n)** with next-intl (English + Italian)
|
||||
- [x] Backend testing infrastructure (~97% coverage)
|
||||
- [x] Frontend unit testing infrastructure (~97% coverage)
|
||||
- [x] Frontend E2E testing (Playwright, zero flaky tests)
|
||||
- [x] Design system documentation
|
||||
- [x] **Marketing landing page** with animated components
|
||||
- [x] **`/dev` documentation portal** with live component examples
|
||||
- [x] **Toast notifications** system (Sonner)
|
||||
- [x] **Charts and visualizations** (Recharts)
|
||||
- [x] **Animation system** (Framer Motion)
|
||||
- [x] **Markdown rendering** with syntax highlighting
|
||||
- [x] **SEO optimization** (sitemap, robots.txt, locale-aware metadata)
|
||||
- [x] Database migrations with helper script
|
||||
- [x] Docker deployment
|
||||
- [x] API documentation (OpenAPI/Swagger)
|
||||
|
||||
### 🚧 In Progress
|
||||
- [ ] Email integration (templates ready, SMTP pending)
|
||||
|
||||
### 🔮 Planned
|
||||
- [ ] GitHub Actions CI/CD pipelines
|
||||
- [ ] Dynamic test coverage badges from CI
|
||||
- [ ] E2E test coverage reporting
|
||||
- [ ] OAuth token encryption at rest (security hardening)
|
||||
- [ ] Additional languages (Spanish, French, German, etc.)
|
||||
- [ ] SSO/SAML authentication
|
||||
- [ ] Real-time notifications with WebSockets
|
||||
- [ ] Webhook system
|
||||
- [ ] File upload/storage (S3-compatible)
|
||||
- [ ] Audit logging system
|
||||
- [ ] API versioning example
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
Contributions are welcome! Whether you're fixing bugs, improving documentation, or proposing new features, we'd love your help.
|
||||
|
||||
### How to Contribute
|
||||
|
||||
1. **Fork the repository**
|
||||
2. **Create a feature branch** (`git checkout -b feature/amazing-feature`)
|
||||
3. **Make your changes**
|
||||
- Follow existing code style
|
||||
- Add tests for new features
|
||||
- Update documentation as needed
|
||||
4. **Run tests** to ensure everything works
|
||||
5. **Commit your changes** (`git commit -m 'Add amazing feature'`)
|
||||
6. **Push to your branch** (`git push origin feature/amazing-feature`)
|
||||
7. **Open a Pull Request**
|
||||
|
||||
### Development Guidelines
|
||||
|
||||
- Write tests for new features (aim for >90% coverage)
|
||||
- Follow the existing architecture patterns
|
||||
- Update documentation when adding features
|
||||
- Keep commits atomic and well-described
|
||||
- Be respectful and constructive in discussions
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
Found a bug? Have a suggestion? [Open an issue](https://github.com/cardosofelipe/pragma-stack/issues)!
|
||||
|
||||
Please include:
|
||||
- Clear description of the issue/suggestion
|
||||
- Steps to reproduce (for bugs)
|
||||
- Expected vs. actual behavior
|
||||
- Environment details (OS, Python/Node version, etc.)
|
||||
|
||||
---
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the **MIT License** - see the [LICENSE](./LICENSE) file for details.
|
||||
|
||||
**TL;DR**: You can use this template for any purpose, commercial or non-commercial. Attribution is appreciated but not required!
|
||||
|
||||
---
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
|
||||
This template is built on the shoulders of giants:
|
||||
|
||||
- [FastAPI](https://fastapi.tiangolo.com/) by Sebastián Ramírez
|
||||
- [Next.js](https://nextjs.org/) by Vercel
|
||||
- [shadcn/ui](https://ui.shadcn.com/) by shadcn
|
||||
- [TanStack Query](https://tanstack.com/query) by Tanner Linsley
|
||||
- [Playwright](https://playwright.dev/) by Microsoft
|
||||
- And countless other open-source projects that make modern development possible
|
||||
|
||||
---
|
||||
|
||||
## 💬 Questions?
|
||||
|
||||
- **Documentation**: Check the `/docs` folders in backend and frontend
|
||||
- **Issues**: [GitHub Issues](https://github.com/cardosofelipe/pragma-stack/issues)
|
||||
- **Discussions**: [GitHub Discussions](https://github.com/cardosofelipe/pragma-stack/discussions)
|
||||
|
||||
---
|
||||
|
||||
## ⭐ Star This Repo
|
||||
|
||||
If this template saves you time, consider giving it a star! It helps others discover the project and motivates continued development.
|
||||
|
||||
**Happy coding! 🚀**
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
Made with ❤️ by a developer who got tired of rebuilding the same boilerplate
|
||||
</div>
|
||||
- Built on [PragmaStack](https://gitea.pragmazest.com/cardosofelipe/fast-next-template)
|
||||
- Powered by Claude and the Anthropic API
|
||||
|
||||
@@ -11,7 +11,7 @@ omit =
|
||||
app/utils/auth_test_utils.py
|
||||
|
||||
# Async implementations not yet in use
|
||||
app/repositories/base_async.py
|
||||
app/crud/base_async.py
|
||||
app/core/database_async.py
|
||||
|
||||
# CLI scripts - run manually, not tested
|
||||
@@ -23,7 +23,7 @@ omit =
|
||||
app/api/routes/__init__.py
|
||||
app/api/dependencies/__init__.py
|
||||
app/core/__init__.py
|
||||
app/repositories/__init__.py
|
||||
app/crud/__init__.py
|
||||
app/models/__init__.py
|
||||
app/schemas/__init__.py
|
||||
app/services/__init__.py
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
# Pre-commit hooks for backend quality and security checks.
|
||||
#
|
||||
# Install:
|
||||
# cd backend && uv run pre-commit install
|
||||
#
|
||||
# Run manually on all files:
|
||||
# cd backend && uv run pre-commit run --all-files
|
||||
#
|
||||
# Skip hooks temporarily:
|
||||
# git commit --no-verify
|
||||
#
|
||||
repos:
|
||||
# ── Code Quality ──────────────────────────────────────────────────────────
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.4
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
||||
- id: ruff-format
|
||||
|
||||
# ── General File Hygiene ──────────────────────────────────────────────────
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-toml
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
args: [--maxkb=500]
|
||||
- id: debug-statements
|
||||
|
||||
# ── Security ──────────────────────────────────────────────────────────────
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
args: ['--baseline', '.secrets.baseline']
|
||||
exclude: |
|
||||
(?x)^(
|
||||
.*\.lock$|
|
||||
.*\.svg$
|
||||
)$
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,10 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONPATH=/app \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
UV_NO_CACHE=1 \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install system dependencies and uv
|
||||
RUN apt-get update && \
|
||||
@@ -20,7 +23,7 @@ RUN apt-get update && \
|
||||
# Copy dependency files
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
# Install dependencies using uv (development mode with dev dependencies)
|
||||
# Install dependencies using uv into /opt/venv (outside /app to survive bind mounts)
|
||||
RUN uv sync --extra dev --frozen
|
||||
|
||||
# Copy application code
|
||||
@@ -33,11 +36,11 @@ RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
|
||||
# Production stage — Alpine eliminates glibc CVEs (e.g. CVE-2026-0861)
|
||||
FROM python:3.12-alpine AS production
|
||||
# Production stage
|
||||
FROM python:3.12-slim AS production
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -S appuser && adduser -S -G appuser appuser
|
||||
RUN groupadd -r appuser && useradd -r -g appuser appuser
|
||||
|
||||
WORKDIR /app
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
@@ -45,21 +48,24 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONPATH=/app \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
UV_NO_CACHE=1 \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install system dependencies and uv
|
||||
RUN apk add --no-cache postgresql-client curl ca-certificates && \
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends postgresql-client curl ca-certificates && \
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
||||
mv /root/.local/bin/uv* /usr/local/bin/
|
||||
mv /root/.local/bin/uv* /usr/local/bin/ && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy dependency files
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
# Install build dependencies, compile Python packages, then remove build deps
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc g++ musl-dev python3-dev linux-headers libffi-dev openssl-dev && \
|
||||
uv sync --frozen --no-dev && \
|
||||
apk del .build-deps
|
||||
# Install only production dependencies using uv into /opt/venv
|
||||
RUN uv sync --frozen --no-dev
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
@@ -67,7 +73,7 @@ COPY entrypoint.sh /usr/local/bin/
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Set ownership to non-root user
|
||||
RUN chown -R appuser:appuser /app
|
||||
RUN chown -R appuser:appuser /app /opt/venv
|
||||
|
||||
# Switch to non-root user
|
||||
USER appuser
|
||||
@@ -77,4 +83,4 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
CMD ["uv", "run", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
|
||||
108
backend/Makefile
108
backend/Makefile
@@ -1,7 +1,4 @@
|
||||
.PHONY: help lint lint-fix format format-check type-check test test-cov validate clean install-dev sync check-docker install-e2e test-e2e test-e2e-schema test-all dep-audit license-check audit validate-all check benchmark benchmark-check benchmark-save scan-image test-api-security
|
||||
|
||||
# Prevent a stale VIRTUAL_ENV in the caller's shell from confusing uv
|
||||
unexport VIRTUAL_ENV
|
||||
.PHONY: help lint lint-fix format format-check type-check test test-cov validate clean install-dev sync check-docker install-e2e test-e2e test-e2e-schema test-all test-integration
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@@ -17,30 +14,17 @@ help:
|
||||
@echo " make lint-fix - Run Ruff linter with auto-fix"
|
||||
@echo " make format - Format code with Ruff"
|
||||
@echo " make format-check - Check if code is formatted"
|
||||
@echo " make type-check - Run pyright type checking"
|
||||
@echo " make validate - Run all checks (lint + format + types + schema fuzz)"
|
||||
@echo ""
|
||||
@echo "Performance:"
|
||||
@echo " make benchmark - Run performance benchmarks"
|
||||
@echo " make benchmark-save - Run benchmarks and save as baseline"
|
||||
@echo " make benchmark-check - Run benchmarks and compare against baseline"
|
||||
@echo ""
|
||||
@echo "Security & Audit:"
|
||||
@echo " make dep-audit - Scan dependencies for known vulnerabilities"
|
||||
@echo " make license-check - Check dependency license compliance"
|
||||
@echo " make audit - Run all security audits (deps + licenses)"
|
||||
@echo " make scan-image - Scan Docker image for CVEs (requires trivy)"
|
||||
@echo " make validate-all - Run all quality + security checks"
|
||||
@echo " make check - Full pipeline: quality + security + tests"
|
||||
@echo " make type-check - Run mypy type checking"
|
||||
@echo " make validate - Run all checks (lint + format + types)"
|
||||
@echo ""
|
||||
@echo "Testing:"
|
||||
@echo " make test - Run pytest (unit/integration, SQLite)"
|
||||
@echo " make test-cov - Run pytest with coverage report"
|
||||
@echo " make test-e2e - Run E2E tests (PostgreSQL, requires Docker)"
|
||||
@echo " make test-e2e-schema - Run Schemathesis API schema tests"
|
||||
@echo " make test-integration - Run MCP integration tests (requires running stack)"
|
||||
@echo " make test-all - Run all tests (unit + E2E)"
|
||||
@echo " make check-docker - Check if Docker is available"
|
||||
@echo " make check - Full pipeline: quality + security + tests"
|
||||
@echo ""
|
||||
@echo "Cleanup:"
|
||||
@echo " make clean - Remove cache and build artifacts"
|
||||
@@ -80,52 +64,12 @@ format-check:
|
||||
@uv run ruff format --check app/ tests/
|
||||
|
||||
type-check:
|
||||
@echo "🔎 Running pyright type checking..."
|
||||
@uv run pyright app/
|
||||
@echo "🔎 Running mypy type checking..."
|
||||
@uv run mypy app/
|
||||
|
||||
validate: lint format-check type-check test-api-security
|
||||
validate: lint format-check type-check
|
||||
@echo "✅ All quality checks passed!"
|
||||
|
||||
# API Security Testing (Schemathesis property-based fuzzing)
|
||||
test-api-security: check-docker
|
||||
@echo "🔐 Running Schemathesis API security fuzzing..."
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest tests/e2e/ -v -m "schemathesis" --tb=short -n 0
|
||||
@echo "✅ API schema security tests passed!"
|
||||
|
||||
# ============================================================================
|
||||
# Security & Audit
|
||||
# ============================================================================
|
||||
|
||||
dep-audit:
|
||||
@echo "🔒 Scanning dependencies for known vulnerabilities..."
|
||||
@uv run pip-audit --desc --skip-editable
|
||||
@echo "✅ No known vulnerabilities found!"
|
||||
|
||||
license-check:
|
||||
@echo "📜 Checking dependency license compliance..."
|
||||
@uv run pip-licenses --fail-on="GPL-3.0-or-later;AGPL-3.0-or-later" --format=plain > /dev/null
|
||||
@echo "✅ All dependency licenses are compliant!"
|
||||
|
||||
audit: dep-audit license-check
|
||||
@echo "✅ All security audits passed!"
|
||||
|
||||
scan-image: check-docker
|
||||
@echo "🐳 Scanning Docker image for OS-level CVEs with Trivy..."
|
||||
@docker build -t pragma-backend:scan -q --target production .
|
||||
@if command -v trivy > /dev/null 2>&1; then \
|
||||
trivy image --severity HIGH,CRITICAL --exit-code 1 pragma-backend:scan; \
|
||||
else \
|
||||
echo "ℹ️ Trivy not found locally, using Docker to run Trivy..."; \
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image --severity HIGH,CRITICAL --exit-code 1 pragma-backend:scan; \
|
||||
fi
|
||||
@echo "✅ No HIGH/CRITICAL CVEs found in Docker image!"
|
||||
|
||||
validate-all: validate audit
|
||||
@echo "✅ All quality + security checks passed!"
|
||||
|
||||
check: validate-all test
|
||||
@echo "✅ Full validation pipeline complete!"
|
||||
|
||||
# ============================================================================
|
||||
# Testing
|
||||
# ============================================================================
|
||||
@@ -136,9 +80,18 @@ test:
|
||||
|
||||
test-cov:
|
||||
@echo "🧪 Running tests with coverage..."
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest --cov=app --cov-report=term-missing --cov-report=html -n 16
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest --cov=app --cov-report=term-missing --cov-report=html -n 20
|
||||
@echo "📊 Coverage report generated in htmlcov/index.html"
|
||||
|
||||
# ============================================================================
|
||||
# Integration Testing (requires running stack: make dev)
|
||||
# ============================================================================
|
||||
|
||||
test-integration:
|
||||
@echo "🧪 Running MCP integration tests..."
|
||||
@echo "Note: Requires running stack (make dev from project root)"
|
||||
@RUN_INTEGRATION_TESTS=true IS_TEST=True PYTHONPATH=. uv run pytest tests/integration/ -v
|
||||
|
||||
# ============================================================================
|
||||
# E2E Testing (requires Docker)
|
||||
# ============================================================================
|
||||
@@ -171,31 +124,6 @@ test-e2e-schema: check-docker
|
||||
@echo "🧪 Running Schemathesis API schema tests..."
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest tests/e2e/ -v -m "schemathesis" --tb=short -n 0
|
||||
|
||||
# ============================================================================
|
||||
# Performance Benchmarks
|
||||
# ============================================================================
|
||||
|
||||
benchmark:
|
||||
@echo "⏱️ Running performance benchmarks..."
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest tests/benchmarks/ -v --benchmark-only --benchmark-sort=mean -p no:xdist --override-ini='addopts='
|
||||
|
||||
benchmark-save:
|
||||
@echo "⏱️ Running benchmarks and saving baseline..."
|
||||
@IS_TEST=True PYTHONPATH=. uv run pytest tests/benchmarks/ -v --benchmark-only --benchmark-save=baseline --benchmark-sort=mean -p no:xdist --override-ini='addopts='
|
||||
@echo "✅ Benchmark baseline saved to .benchmarks/"
|
||||
|
||||
benchmark-check:
|
||||
@echo "⏱️ Running benchmarks and comparing against baseline..."
|
||||
@if find .benchmarks -name '*_baseline*' -print -quit 2>/dev/null | grep -q .; then \
|
||||
IS_TEST=True PYTHONPATH=. uv run pytest tests/benchmarks/ -v --benchmark-only --benchmark-compare=0001_baseline --benchmark-sort=mean --benchmark-compare-fail=mean:200% -p no:xdist --override-ini='addopts='; \
|
||||
echo "✅ No performance regressions detected!"; \
|
||||
else \
|
||||
echo "⚠️ No benchmark baseline found. Run 'make benchmark-save' first to create one."; \
|
||||
echo " Running benchmarks without comparison..."; \
|
||||
IS_TEST=True PYTHONPATH=. uv run pytest tests/benchmarks/ -v --benchmark-only --benchmark-save=baseline --benchmark-sort=mean -p no:xdist --override-ini='addopts='; \
|
||||
echo "✅ Benchmark baseline created. Future runs of 'make benchmark-check' will compare against it."; \
|
||||
fi
|
||||
|
||||
test-all:
|
||||
@echo "🧪 Running ALL tests (unit + E2E)..."
|
||||
@$(MAKE) test
|
||||
@@ -209,7 +137,7 @@ clean:
|
||||
@echo "🧹 Cleaning up..."
|
||||
@find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name ".pytest_cache" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name ".pyright" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name ".mypy_cache" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name ".ruff_cache" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true
|
||||
@find . -type d -name "htmlcov" -exec rm -rf {} + 2>/dev/null || true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# PragmaStack Backend API
|
||||
# Syndarix Backend API
|
||||
|
||||
> The pragmatic, production-ready FastAPI backend for PragmaStack.
|
||||
> The pragmatic, production-ready FastAPI backend for Syndarix.
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -14,9 +14,7 @@ Features:
|
||||
- **Multi-tenancy**: Organization-based access control with roles (Owner/Admin/Member)
|
||||
- **Testing**: 97%+ coverage with security-focused test suite
|
||||
- **Performance**: Async throughout, connection pooling, optimized queries
|
||||
- **Modern Tooling**: uv for dependencies, Ruff for linting/formatting, Pyright for type checking
|
||||
- **Security Auditing**: Automated dependency vulnerability scanning, license compliance, secrets detection
|
||||
- **Pre-commit Hooks**: Ruff, detect-secrets, and standard checks on every commit
|
||||
- **Modern Tooling**: uv for dependencies, Ruff for linting/formatting, mypy for type checking
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -151,7 +149,7 @@ uv pip list --outdated
|
||||
# Run any Python command via uv (no activation needed)
|
||||
uv run python script.py
|
||||
uv run pytest
|
||||
uv run pyright app/
|
||||
uv run mypy app/
|
||||
|
||||
# Or activate the virtual environment
|
||||
source .venv/bin/activate
|
||||
@@ -173,22 +171,12 @@ make lint # Run Ruff linter (check only)
|
||||
make lint-fix # Run Ruff with auto-fix
|
||||
make format # Format code with Ruff
|
||||
make format-check # Check if code is formatted
|
||||
make type-check # Run Pyright type checking
|
||||
make type-check # Run mypy type checking
|
||||
make validate # Run all checks (lint + format + types)
|
||||
|
||||
# Security & Audit
|
||||
make dep-audit # Scan dependencies for known vulnerabilities (CVEs)
|
||||
make license-check # Check dependency license compliance
|
||||
make audit # Run all security audits (deps + licenses)
|
||||
make validate-all # Run all quality + security checks
|
||||
make check # Full pipeline: quality + security + tests
|
||||
|
||||
# Testing
|
||||
make test # Run all tests
|
||||
make test-cov # Run tests with coverage report
|
||||
make test-e2e # Run E2E tests (PostgreSQL, requires Docker)
|
||||
make test-e2e-schema # Run Schemathesis API schema tests
|
||||
make test-all # Run all tests (unit + E2E)
|
||||
|
||||
# Utilities
|
||||
make clean # Remove cache and build artifacts
|
||||
@@ -264,7 +252,7 @@ app/
|
||||
│ ├── database.py # Database engine setup
|
||||
│ ├── auth.py # JWT token handling
|
||||
│ └── exceptions.py # Custom exceptions
|
||||
├── repositories/ # Repository pattern (database operations)
|
||||
├── crud/ # Database operations
|
||||
├── models/ # SQLAlchemy ORM models
|
||||
├── schemas/ # Pydantic request/response schemas
|
||||
├── services/ # Business logic layer
|
||||
@@ -364,29 +352,18 @@ open htmlcov/index.html
|
||||
# Using Makefile (recommended)
|
||||
make lint # Ruff linting
|
||||
make format # Ruff formatting
|
||||
make type-check # Pyright type checking
|
||||
make type-check # mypy type checking
|
||||
make validate # All checks at once
|
||||
|
||||
# Security audits
|
||||
make dep-audit # Scan dependencies for CVEs
|
||||
make license-check # Check license compliance
|
||||
make audit # All security audits
|
||||
make validate-all # Quality + security checks
|
||||
make check # Full pipeline: quality + security + tests
|
||||
|
||||
# Using uv directly
|
||||
uv run ruff check app/ tests/
|
||||
uv run ruff format app/ tests/
|
||||
uv run pyright app/
|
||||
uv run mypy app/
|
||||
```
|
||||
|
||||
**Tools:**
|
||||
- **Ruff**: All-in-one linting, formatting, and import sorting (replaces Black, Flake8, isort)
|
||||
- **Pyright**: Static type checking (strict mode)
|
||||
- **pip-audit**: Dependency vulnerability scanning against the OSV database
|
||||
- **pip-licenses**: Dependency license compliance checking
|
||||
- **detect-secrets**: Hardcoded secrets/credentials detection
|
||||
- **pre-commit**: Git hook framework for automated checks on every commit
|
||||
- **mypy**: Static type checking with Pydantic plugin
|
||||
|
||||
All configurations are in `pyproject.toml`.
|
||||
|
||||
@@ -462,7 +439,7 @@ See [docs/FEATURE_EXAMPLE.md](docs/FEATURE_EXAMPLE.md) for step-by-step guide.
|
||||
|
||||
Quick overview:
|
||||
1. Create Pydantic schemas in `app/schemas/`
|
||||
2. Create repository in `app/repositories/`
|
||||
2. Create CRUD operations in `app/crud/`
|
||||
3. Create route in `app/api/routes/`
|
||||
4. Register router in `app/api/main.py`
|
||||
5. Write tests in `tests/api/`
|
||||
@@ -612,42 +589,13 @@ Configured in `app/core/config.py`:
|
||||
- **Security Headers**: CSP, HSTS, X-Frame-Options, etc.
|
||||
- **Input Validation**: Pydantic schemas, SQL injection prevention (ORM)
|
||||
|
||||
### Security Auditing
|
||||
|
||||
Automated, deterministic security checks are built into the development workflow:
|
||||
|
||||
```bash
|
||||
# Scan dependencies for known vulnerabilities (CVEs)
|
||||
make dep-audit
|
||||
|
||||
# Check dependency license compliance (blocks GPL-3.0/AGPL)
|
||||
make license-check
|
||||
|
||||
# Run all security audits
|
||||
make audit
|
||||
|
||||
# Full pipeline: quality + security + tests
|
||||
make check
|
||||
```
|
||||
|
||||
**Pre-commit hooks** automatically run on every commit:
|
||||
- **Ruff** lint + format checks
|
||||
- **detect-secrets** blocks commits containing hardcoded secrets
|
||||
- **Standard checks**: trailing whitespace, YAML/TOML validation, merge conflict detection, large file prevention
|
||||
|
||||
Setup pre-commit hooks:
|
||||
```bash
|
||||
uv run pre-commit install
|
||||
```
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Never commit secrets**: Use `.env` files (git-ignored), enforced by detect-secrets pre-commit hook
|
||||
1. **Never commit secrets**: Use `.env` files (git-ignored)
|
||||
2. **Strong SECRET_KEY**: Min 32 chars, cryptographically random
|
||||
3. **HTTPS in production**: Required for token security
|
||||
4. **Regular updates**: Keep dependencies current (`uv sync --upgrade`), run `make dep-audit` to check for CVEs
|
||||
4. **Regular updates**: Keep dependencies current (`uv sync --upgrade`)
|
||||
5. **Audit logs**: Monitor authentication events
|
||||
6. **Run `make check` before pushing**: Validates quality, security, and tests in one command
|
||||
|
||||
---
|
||||
|
||||
@@ -697,11 +645,7 @@ logging.basicConfig(level=logging.INFO)
|
||||
**Built with modern Python tooling:**
|
||||
- 🚀 **uv** - 10-100x faster dependency management
|
||||
- ⚡ **Ruff** - 10-100x faster linting & formatting
|
||||
- 🔍 **Pyright** - Static type checking (strict mode)
|
||||
- 🔍 **mypy** - Static type checking
|
||||
- ✅ **pytest** - Comprehensive test suite
|
||||
- 🔒 **pip-audit** - Dependency vulnerability scanning
|
||||
- 🔑 **detect-secrets** - Hardcoded secrets detection
|
||||
- 📜 **pip-licenses** - License compliance checking
|
||||
- 🪝 **pre-commit** - Automated git hooks
|
||||
|
||||
**All configured in a single `pyproject.toml` file!**
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
"""Enable pgvector extension
|
||||
|
||||
Revision ID: 0003
|
||||
Revises: 0002
|
||||
Create Date: 2025-12-30
|
||||
|
||||
This migration enables the pgvector extension for PostgreSQL, which provides
|
||||
vector similarity search capabilities required for the RAG (Retrieval-Augmented
|
||||
Generation) knowledge base system.
|
||||
|
||||
Vector Dimension Reference (per ADR-008 and SPIKE-006):
|
||||
---------------------------------------------------------
|
||||
The dimension size depends on the embedding model used:
|
||||
|
||||
| Model | Dimensions | Use Case |
|
||||
|----------------------------|------------|------------------------------|
|
||||
| text-embedding-3-small | 1536 | General docs, conversations |
|
||||
| text-embedding-3-large | 256-3072 | High accuracy (configurable) |
|
||||
| voyage-code-3 | 1024 | Code files (Python, JS, etc) |
|
||||
| voyage-3-large | 1024 | High quality general purpose |
|
||||
| nomic-embed-text (Ollama) | 768 | Local/fallback embedding |
|
||||
|
||||
Recommended defaults for Syndarix:
|
||||
- Documentation/conversations: 1536 (text-embedding-3-small)
|
||||
- Code files: 1024 (voyage-code-3)
|
||||
|
||||
Prerequisites:
|
||||
--------------
|
||||
This migration requires PostgreSQL with the pgvector extension installed.
|
||||
The Docker Compose configuration uses `pgvector/pgvector:pg17` which includes
|
||||
the extension pre-installed.
|
||||
|
||||
References:
|
||||
-----------
|
||||
- ADR-008: Knowledge Base and RAG Architecture
|
||||
- SPIKE-006: Knowledge Base with pgvector for RAG System
|
||||
- https://github.com/pgvector/pgvector
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0003"
|
||||
down_revision: str | None = "0002"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Enable the pgvector extension.
|
||||
|
||||
The CREATE EXTENSION IF NOT EXISTS statement is idempotent - it will
|
||||
succeed whether the extension already exists or not.
|
||||
"""
|
||||
op.execute("CREATE EXTENSION IF NOT EXISTS vector")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Drop the pgvector extension.
|
||||
|
||||
Note: This will fail if any tables with vector columns exist.
|
||||
Future migrations that create vector columns should be downgraded first.
|
||||
"""
|
||||
op.execute("DROP EXTENSION IF EXISTS vector")
|
||||
@@ -1,35 +0,0 @@
|
||||
"""rename oauth account token fields drop encrypted suffix
|
||||
|
||||
Revision ID: 0003
|
||||
Revises: 0002
|
||||
Create Date: 2026-02-27 01:03:18.869178
|
||||
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0003"
|
||||
down_revision: str | None = "0002"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.alter_column(
|
||||
"oauth_accounts", "access_token_encrypted", new_column_name="access_token"
|
||||
)
|
||||
op.alter_column(
|
||||
"oauth_accounts", "refresh_token_encrypted", new_column_name="refresh_token"
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.alter_column(
|
||||
"oauth_accounts", "access_token", new_column_name="access_token_encrypted"
|
||||
)
|
||||
op.alter_column(
|
||||
"oauth_accounts", "refresh_token", new_column_name="refresh_token_encrypted"
|
||||
)
|
||||
507
backend/app/alembic/versions/0004_add_syndarix_models.py
Normal file
507
backend/app/alembic/versions/0004_add_syndarix_models.py
Normal file
@@ -0,0 +1,507 @@
|
||||
"""Add Syndarix models
|
||||
|
||||
Revision ID: 0004
|
||||
Revises: 0003
|
||||
Create Date: 2025-12-31
|
||||
|
||||
This migration creates the core Syndarix domain tables:
|
||||
- projects: Client engagement projects
|
||||
- agent_types: Agent template configurations
|
||||
- agent_instances: Spawned agent instances assigned to projects
|
||||
- sprints: Sprint containers for issues
|
||||
- issues: Work items (epics, stories, tasks, bugs)
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0004"
|
||||
down_revision: str | None = "0003"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Create Syndarix domain tables."""
|
||||
|
||||
# =========================================================================
|
||||
# Create projects table
|
||||
# Note: ENUM types are created automatically by sa.Enum() during table creation
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"projects",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("name", sa.String(255), nullable=False),
|
||||
sa.Column("slug", sa.String(255), nullable=False),
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"autonomy_level",
|
||||
sa.Enum(
|
||||
"full_control",
|
||||
"milestone",
|
||||
"autonomous",
|
||||
name="autonomy_level",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="milestone",
|
||||
),
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.Enum(
|
||||
"active",
|
||||
"paused",
|
||||
"completed",
|
||||
"archived",
|
||||
name="project_status",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="active",
|
||||
),
|
||||
sa.Column(
|
||||
"complexity",
|
||||
sa.Enum(
|
||||
"script",
|
||||
"simple",
|
||||
"medium",
|
||||
"complex",
|
||||
name="project_complexity",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="medium",
|
||||
),
|
||||
sa.Column(
|
||||
"client_mode",
|
||||
sa.Enum("technical", "auto", name="client_mode"),
|
||||
nullable=False,
|
||||
server_default="auto",
|
||||
),
|
||||
sa.Column(
|
||||
"settings",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="{}",
|
||||
),
|
||||
sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(["owner_id"], ["users.id"], ondelete="SET NULL"),
|
||||
sa.UniqueConstraint("slug"),
|
||||
)
|
||||
# Single column indexes
|
||||
op.create_index("ix_projects_name", "projects", ["name"])
|
||||
op.create_index("ix_projects_slug", "projects", ["slug"])
|
||||
op.create_index("ix_projects_status", "projects", ["status"])
|
||||
op.create_index("ix_projects_autonomy_level", "projects", ["autonomy_level"])
|
||||
op.create_index("ix_projects_complexity", "projects", ["complexity"])
|
||||
op.create_index("ix_projects_client_mode", "projects", ["client_mode"])
|
||||
op.create_index("ix_projects_owner_id", "projects", ["owner_id"])
|
||||
# Composite indexes
|
||||
op.create_index("ix_projects_slug_status", "projects", ["slug", "status"])
|
||||
op.create_index("ix_projects_owner_status", "projects", ["owner_id", "status"])
|
||||
op.create_index(
|
||||
"ix_projects_autonomy_status", "projects", ["autonomy_level", "status"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_projects_complexity_status", "projects", ["complexity", "status"]
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create agent_types table
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"agent_types",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("name", sa.String(255), nullable=False),
|
||||
sa.Column("slug", sa.String(255), nullable=False),
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
# Areas of expertise (e.g., ["python", "fastapi", "databases"])
|
||||
sa.Column(
|
||||
"expertise",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
# System prompt defining personality and behavior (required)
|
||||
sa.Column("personality_prompt", sa.Text(), nullable=False),
|
||||
# LLM model configuration
|
||||
sa.Column("primary_model", sa.String(100), nullable=False),
|
||||
sa.Column(
|
||||
"fallback_models",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
# Model parameters (temperature, max_tokens, etc.)
|
||||
sa.Column(
|
||||
"model_params",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="{}",
|
||||
),
|
||||
# MCP servers this agent can connect to
|
||||
sa.Column(
|
||||
"mcp_servers",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
# Tool permissions configuration
|
||||
sa.Column(
|
||||
"tool_permissions",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="{}",
|
||||
),
|
||||
sa.Column("is_active", sa.Boolean(), nullable=False, server_default="true"),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("slug"),
|
||||
)
|
||||
# Single column indexes
|
||||
op.create_index("ix_agent_types_name", "agent_types", ["name"])
|
||||
op.create_index("ix_agent_types_slug", "agent_types", ["slug"])
|
||||
op.create_index("ix_agent_types_is_active", "agent_types", ["is_active"])
|
||||
# Composite indexes
|
||||
op.create_index("ix_agent_types_slug_active", "agent_types", ["slug", "is_active"])
|
||||
op.create_index("ix_agent_types_name_active", "agent_types", ["name", "is_active"])
|
||||
|
||||
# =========================================================================
|
||||
# Create agent_instances table
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"agent_instances",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("agent_type_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("name", sa.String(100), nullable=False),
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.Enum(
|
||||
"idle",
|
||||
"working",
|
||||
"waiting",
|
||||
"paused",
|
||||
"terminated",
|
||||
name="agent_status",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="idle",
|
||||
),
|
||||
sa.Column("current_task", sa.Text(), nullable=True),
|
||||
# Short-term memory (conversation context, recent decisions)
|
||||
sa.Column(
|
||||
"short_term_memory",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="{}",
|
||||
),
|
||||
# Reference to long-term memory in vector store
|
||||
sa.Column("long_term_memory_ref", sa.String(500), nullable=True),
|
||||
# Session ID for active MCP connections
|
||||
sa.Column("session_id", sa.String(255), nullable=True),
|
||||
# Activity tracking
|
||||
sa.Column("last_activity_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column("terminated_at", sa.DateTime(timezone=True), nullable=True),
|
||||
# Usage metrics
|
||||
sa.Column("tasks_completed", sa.Integer(), nullable=False, server_default="0"),
|
||||
sa.Column("tokens_used", sa.BigInteger(), nullable=False, server_default="0"),
|
||||
sa.Column(
|
||||
"cost_incurred",
|
||||
sa.Numeric(precision=10, scale=4),
|
||||
nullable=False,
|
||||
server_default="0",
|
||||
),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["agent_type_id"], ["agent_types.id"], ondelete="RESTRICT"
|
||||
),
|
||||
sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
|
||||
)
|
||||
# Single column indexes
|
||||
op.create_index("ix_agent_instances_name", "agent_instances", ["name"])
|
||||
op.create_index("ix_agent_instances_status", "agent_instances", ["status"])
|
||||
op.create_index(
|
||||
"ix_agent_instances_agent_type_id", "agent_instances", ["agent_type_id"]
|
||||
)
|
||||
op.create_index("ix_agent_instances_project_id", "agent_instances", ["project_id"])
|
||||
op.create_index("ix_agent_instances_session_id", "agent_instances", ["session_id"])
|
||||
op.create_index(
|
||||
"ix_agent_instances_last_activity_at", "agent_instances", ["last_activity_at"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_agent_instances_terminated_at", "agent_instances", ["terminated_at"]
|
||||
)
|
||||
# Composite indexes
|
||||
op.create_index(
|
||||
"ix_agent_instances_project_status",
|
||||
"agent_instances",
|
||||
["project_id", "status"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_agent_instances_type_status",
|
||||
"agent_instances",
|
||||
["agent_type_id", "status"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_agent_instances_project_type",
|
||||
"agent_instances",
|
||||
["project_id", "agent_type_id"],
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create sprints table (before issues for FK reference)
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"sprints",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("name", sa.String(255), nullable=False),
|
||||
sa.Column("number", sa.Integer(), nullable=False),
|
||||
sa.Column("goal", sa.Text(), nullable=True),
|
||||
sa.Column("start_date", sa.Date(), nullable=False),
|
||||
sa.Column("end_date", sa.Date(), nullable=False),
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.Enum(
|
||||
"planned",
|
||||
"active",
|
||||
"in_review",
|
||||
"completed",
|
||||
"cancelled",
|
||||
name="sprint_status",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="planned",
|
||||
),
|
||||
sa.Column("planned_points", sa.Integer(), nullable=True),
|
||||
sa.Column("velocity", sa.Integer(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
|
||||
sa.UniqueConstraint("project_id", "number", name="uq_sprint_project_number"),
|
||||
)
|
||||
# Single column indexes
|
||||
op.create_index("ix_sprints_project_id", "sprints", ["project_id"])
|
||||
op.create_index("ix_sprints_status", "sprints", ["status"])
|
||||
op.create_index("ix_sprints_start_date", "sprints", ["start_date"])
|
||||
op.create_index("ix_sprints_end_date", "sprints", ["end_date"])
|
||||
# Composite indexes
|
||||
op.create_index("ix_sprints_project_status", "sprints", ["project_id", "status"])
|
||||
op.create_index("ix_sprints_project_number", "sprints", ["project_id", "number"])
|
||||
op.create_index("ix_sprints_date_range", "sprints", ["start_date", "end_date"])
|
||||
|
||||
# =========================================================================
|
||||
# Create issues table
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"issues",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
# Parent issue for hierarchy (Epic -> Story -> Task)
|
||||
sa.Column("parent_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
# Issue type (epic, story, task, bug)
|
||||
sa.Column(
|
||||
"type",
|
||||
sa.Enum(
|
||||
"epic",
|
||||
"story",
|
||||
"task",
|
||||
"bug",
|
||||
name="issue_type",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="task",
|
||||
),
|
||||
# Reporter (who created this issue)
|
||||
sa.Column("reporter_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
# Issue content
|
||||
sa.Column("title", sa.String(500), nullable=False),
|
||||
sa.Column("body", sa.Text(), nullable=False, server_default=""),
|
||||
# Status and priority
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.Enum(
|
||||
"open",
|
||||
"in_progress",
|
||||
"in_review",
|
||||
"blocked",
|
||||
"closed",
|
||||
name="issue_status",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="open",
|
||||
),
|
||||
sa.Column(
|
||||
"priority",
|
||||
sa.Enum(
|
||||
"low",
|
||||
"medium",
|
||||
"high",
|
||||
"critical",
|
||||
name="issue_priority",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="medium",
|
||||
),
|
||||
# Labels for categorization
|
||||
sa.Column(
|
||||
"labels",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
# Assignment - agent or human (mutually exclusive)
|
||||
sa.Column("assigned_agent_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("human_assignee", sa.String(255), nullable=True),
|
||||
# Sprint association
|
||||
sa.Column("sprint_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
# Estimation
|
||||
sa.Column("story_points", sa.Integer(), nullable=True),
|
||||
sa.Column("due_date", sa.Date(), nullable=True),
|
||||
# External tracker integration (String for flexibility)
|
||||
sa.Column("external_tracker_type", sa.String(50), nullable=True),
|
||||
sa.Column("external_issue_id", sa.String(255), nullable=True),
|
||||
sa.Column("remote_url", sa.String(1000), nullable=True),
|
||||
sa.Column("external_issue_number", sa.Integer(), nullable=True),
|
||||
# Sync status
|
||||
sa.Column(
|
||||
"sync_status",
|
||||
sa.Enum(
|
||||
"synced",
|
||||
"pending",
|
||||
"conflict",
|
||||
"error",
|
||||
name="sync_status",
|
||||
),
|
||||
nullable=False,
|
||||
server_default="synced",
|
||||
),
|
||||
sa.Column("last_synced_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column("external_updated_at", sa.DateTime(timezone=True), nullable=True),
|
||||
# Lifecycle
|
||||
sa.Column("closed_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(["project_id"], ["projects.id"], ondelete="CASCADE"),
|
||||
sa.ForeignKeyConstraint(["parent_id"], ["issues.id"], ondelete="CASCADE"),
|
||||
sa.ForeignKeyConstraint(["sprint_id"], ["sprints.id"], ondelete="SET NULL"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["assigned_agent_id"], ["agent_instances.id"], ondelete="SET NULL"
|
||||
),
|
||||
)
|
||||
# Single column indexes
|
||||
op.create_index("ix_issues_project_id", "issues", ["project_id"])
|
||||
op.create_index("ix_issues_parent_id", "issues", ["parent_id"])
|
||||
op.create_index("ix_issues_type", "issues", ["type"])
|
||||
op.create_index("ix_issues_reporter_id", "issues", ["reporter_id"])
|
||||
op.create_index("ix_issues_status", "issues", ["status"])
|
||||
op.create_index("ix_issues_priority", "issues", ["priority"])
|
||||
op.create_index("ix_issues_assigned_agent_id", "issues", ["assigned_agent_id"])
|
||||
op.create_index("ix_issues_human_assignee", "issues", ["human_assignee"])
|
||||
op.create_index("ix_issues_sprint_id", "issues", ["sprint_id"])
|
||||
op.create_index("ix_issues_due_date", "issues", ["due_date"])
|
||||
op.create_index(
|
||||
"ix_issues_external_tracker_type", "issues", ["external_tracker_type"]
|
||||
)
|
||||
op.create_index("ix_issues_sync_status", "issues", ["sync_status"])
|
||||
op.create_index("ix_issues_closed_at", "issues", ["closed_at"])
|
||||
# Composite indexes
|
||||
op.create_index("ix_issues_project_status", "issues", ["project_id", "status"])
|
||||
op.create_index("ix_issues_project_priority", "issues", ["project_id", "priority"])
|
||||
op.create_index("ix_issues_project_sprint", "issues", ["project_id", "sprint_id"])
|
||||
op.create_index("ix_issues_project_type", "issues", ["project_id", "type"])
|
||||
op.create_index(
|
||||
"ix_issues_project_agent", "issues", ["project_id", "assigned_agent_id"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_issues_project_status_priority",
|
||||
"issues",
|
||||
["project_id", "status", "priority"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_issues_external_tracker_id",
|
||||
"issues",
|
||||
["external_tracker_type", "external_issue_id"],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Drop Syndarix domain tables."""
|
||||
# Drop tables in reverse order (respecting FK constraints)
|
||||
op.drop_table("issues")
|
||||
op.drop_table("sprints")
|
||||
op.drop_table("agent_instances")
|
||||
op.drop_table("agent_types")
|
||||
op.drop_table("projects")
|
||||
|
||||
# Drop ENUM types
|
||||
op.execute("DROP TYPE IF EXISTS sprint_status")
|
||||
op.execute("DROP TYPE IF EXISTS sync_status")
|
||||
op.execute("DROP TYPE IF EXISTS issue_priority")
|
||||
op.execute("DROP TYPE IF EXISTS issue_status")
|
||||
op.execute("DROP TYPE IF EXISTS issue_type")
|
||||
op.execute("DROP TYPE IF EXISTS agent_status")
|
||||
op.execute("DROP TYPE IF EXISTS client_mode")
|
||||
op.execute("DROP TYPE IF EXISTS project_complexity")
|
||||
op.execute("DROP TYPE IF EXISTS project_status")
|
||||
op.execute("DROP TYPE IF EXISTS autonomy_level")
|
||||
512
backend/app/alembic/versions/0005_add_memory_system_tables.py
Normal file
512
backend/app/alembic/versions/0005_add_memory_system_tables.py
Normal file
@@ -0,0 +1,512 @@
|
||||
"""Add Agent Memory System tables
|
||||
|
||||
Revision ID: 0005
|
||||
Revises: 0004
|
||||
Create Date: 2025-01-05
|
||||
|
||||
This migration creates the Agent Memory System tables:
|
||||
- working_memory: Key-value storage with TTL for active sessions
|
||||
- episodes: Experiential memories from task executions
|
||||
- facts: Semantic knowledge triples with confidence scores
|
||||
- procedures: Learned skills and procedures
|
||||
- memory_consolidation_log: Tracks consolidation jobs
|
||||
|
||||
See Issue #88: Database Schema & Storage Layer
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0005"
|
||||
down_revision: str | None = "0004"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Create Agent Memory System tables."""
|
||||
|
||||
# =========================================================================
|
||||
# Create ENUM types for memory system
|
||||
# =========================================================================
|
||||
|
||||
# Scope type enum
|
||||
scope_type_enum = postgresql.ENUM(
|
||||
"global",
|
||||
"project",
|
||||
"agent_type",
|
||||
"agent_instance",
|
||||
"session",
|
||||
name="scope_type",
|
||||
create_type=False,
|
||||
)
|
||||
scope_type_enum.create(op.get_bind(), checkfirst=True)
|
||||
|
||||
# Episode outcome enum
|
||||
episode_outcome_enum = postgresql.ENUM(
|
||||
"success",
|
||||
"failure",
|
||||
"partial",
|
||||
name="episode_outcome",
|
||||
create_type=False,
|
||||
)
|
||||
episode_outcome_enum.create(op.get_bind(), checkfirst=True)
|
||||
|
||||
# Consolidation type enum
|
||||
consolidation_type_enum = postgresql.ENUM(
|
||||
"working_to_episodic",
|
||||
"episodic_to_semantic",
|
||||
"episodic_to_procedural",
|
||||
"pruning",
|
||||
name="consolidation_type",
|
||||
create_type=False,
|
||||
)
|
||||
consolidation_type_enum.create(op.get_bind(), checkfirst=True)
|
||||
|
||||
# Consolidation status enum
|
||||
consolidation_status_enum = postgresql.ENUM(
|
||||
"pending",
|
||||
"running",
|
||||
"completed",
|
||||
"failed",
|
||||
name="consolidation_status",
|
||||
create_type=False,
|
||||
)
|
||||
consolidation_status_enum.create(op.get_bind(), checkfirst=True)
|
||||
|
||||
# =========================================================================
|
||||
# Create working_memory table
|
||||
# Key-value storage with TTL for active sessions
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"working_memory",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column(
|
||||
"scope_type",
|
||||
scope_type_enum,
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("scope_id", sa.String(255), nullable=False),
|
||||
sa.Column("key", sa.String(255), nullable=False),
|
||||
sa.Column("value", postgresql.JSONB(astext_type=sa.Text()), nullable=False),
|
||||
sa.Column("expires_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
# Working memory indexes
|
||||
op.create_index(
|
||||
"ix_working_memory_scope_type",
|
||||
"working_memory",
|
||||
["scope_type"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_working_memory_scope_id",
|
||||
"working_memory",
|
||||
["scope_id"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_working_memory_scope_key",
|
||||
"working_memory",
|
||||
["scope_type", "scope_id", "key"],
|
||||
unique=True,
|
||||
)
|
||||
op.create_index(
|
||||
"ix_working_memory_expires",
|
||||
"working_memory",
|
||||
["expires_at"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_working_memory_scope_list",
|
||||
"working_memory",
|
||||
["scope_type", "scope_id"],
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create episodes table
|
||||
# Experiential memories from task executions
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"episodes",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("agent_instance_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("agent_type_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("session_id", sa.String(255), nullable=False),
|
||||
sa.Column("task_type", sa.String(100), nullable=False),
|
||||
sa.Column("task_description", sa.Text(), nullable=False),
|
||||
sa.Column(
|
||||
"actions",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
sa.Column("context_summary", sa.Text(), nullable=False),
|
||||
sa.Column(
|
||||
"outcome",
|
||||
episode_outcome_enum,
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("outcome_details", sa.Text(), nullable=True),
|
||||
sa.Column("duration_seconds", sa.Float(), nullable=False, server_default="0.0"),
|
||||
sa.Column("tokens_used", sa.BigInteger(), nullable=False, server_default="0"),
|
||||
sa.Column(
|
||||
"lessons_learned",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
sa.Column("importance_score", sa.Float(), nullable=False, server_default="0.5"),
|
||||
# Vector embedding - using TEXT as fallback, will be VECTOR(1536) when pgvector is available
|
||||
sa.Column("embedding", sa.Text(), nullable=True),
|
||||
sa.Column("occurred_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["project_id"],
|
||||
["projects.id"],
|
||||
name="fk_episodes_project",
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["agent_instance_id"],
|
||||
["agent_instances.id"],
|
||||
name="fk_episodes_agent_instance",
|
||||
ondelete="SET NULL",
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["agent_type_id"],
|
||||
["agent_types.id"],
|
||||
name="fk_episodes_agent_type",
|
||||
ondelete="SET NULL",
|
||||
),
|
||||
)
|
||||
|
||||
# Episode indexes
|
||||
op.create_index("ix_episodes_project_id", "episodes", ["project_id"])
|
||||
op.create_index("ix_episodes_agent_instance_id", "episodes", ["agent_instance_id"])
|
||||
op.create_index("ix_episodes_agent_type_id", "episodes", ["agent_type_id"])
|
||||
op.create_index("ix_episodes_session_id", "episodes", ["session_id"])
|
||||
op.create_index("ix_episodes_task_type", "episodes", ["task_type"])
|
||||
op.create_index("ix_episodes_outcome", "episodes", ["outcome"])
|
||||
op.create_index("ix_episodes_importance_score", "episodes", ["importance_score"])
|
||||
op.create_index("ix_episodes_occurred_at", "episodes", ["occurred_at"])
|
||||
op.create_index("ix_episodes_project_task", "episodes", ["project_id", "task_type"])
|
||||
op.create_index(
|
||||
"ix_episodes_project_outcome", "episodes", ["project_id", "outcome"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_episodes_agent_task", "episodes", ["agent_instance_id", "task_type"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_episodes_project_time", "episodes", ["project_id", "occurred_at"]
|
||||
)
|
||||
op.create_index(
|
||||
"ix_episodes_importance_time",
|
||||
"episodes",
|
||||
["importance_score", "occurred_at"],
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create facts table
|
||||
# Semantic knowledge triples with confidence scores
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"facts",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column(
|
||||
"project_id", postgresql.UUID(as_uuid=True), nullable=True
|
||||
), # NULL for global facts
|
||||
sa.Column("subject", sa.String(500), nullable=False),
|
||||
sa.Column("predicate", sa.String(255), nullable=False),
|
||||
sa.Column("object", sa.Text(), nullable=False),
|
||||
sa.Column("confidence", sa.Float(), nullable=False, server_default="0.8"),
|
||||
# Source episode IDs stored as JSON array of UUID strings for cross-db compatibility
|
||||
sa.Column(
|
||||
"source_episode_ids",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
sa.Column("first_learned", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("last_reinforced", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column(
|
||||
"reinforcement_count", sa.Integer(), nullable=False, server_default="1"
|
||||
),
|
||||
# Vector embedding
|
||||
sa.Column("embedding", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["project_id"],
|
||||
["projects.id"],
|
||||
name="fk_facts_project",
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
)
|
||||
|
||||
# Fact indexes
|
||||
op.create_index("ix_facts_project_id", "facts", ["project_id"])
|
||||
op.create_index("ix_facts_subject", "facts", ["subject"])
|
||||
op.create_index("ix_facts_predicate", "facts", ["predicate"])
|
||||
op.create_index("ix_facts_confidence", "facts", ["confidence"])
|
||||
op.create_index("ix_facts_subject_predicate", "facts", ["subject", "predicate"])
|
||||
op.create_index("ix_facts_project_subject", "facts", ["project_id", "subject"])
|
||||
op.create_index(
|
||||
"ix_facts_confidence_time", "facts", ["confidence", "last_reinforced"]
|
||||
)
|
||||
# Unique constraint for triples within project scope
|
||||
op.create_index(
|
||||
"ix_facts_unique_triple",
|
||||
"facts",
|
||||
["project_id", "subject", "predicate", "object"],
|
||||
unique=True,
|
||||
postgresql_where=sa.text("project_id IS NOT NULL"),
|
||||
)
|
||||
# Unique constraint for global facts (project_id IS NULL)
|
||||
op.create_index(
|
||||
"ix_facts_unique_triple_global",
|
||||
"facts",
|
||||
["subject", "predicate", "object"],
|
||||
unique=True,
|
||||
postgresql_where=sa.text("project_id IS NULL"),
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create procedures table
|
||||
# Learned skills and procedures
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"procedures",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("agent_type_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("name", sa.String(255), nullable=False),
|
||||
sa.Column("trigger_pattern", sa.Text(), nullable=False),
|
||||
sa.Column(
|
||||
"steps",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
sa.Column("success_count", sa.Integer(), nullable=False, server_default="0"),
|
||||
sa.Column("failure_count", sa.Integer(), nullable=False, server_default="0"),
|
||||
sa.Column("last_used", sa.DateTime(timezone=True), nullable=True),
|
||||
# Vector embedding
|
||||
sa.Column("embedding", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["project_id"],
|
||||
["projects.id"],
|
||||
name="fk_procedures_project",
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["agent_type_id"],
|
||||
["agent_types.id"],
|
||||
name="fk_procedures_agent_type",
|
||||
ondelete="SET NULL",
|
||||
),
|
||||
)
|
||||
|
||||
# Procedure indexes
|
||||
op.create_index("ix_procedures_project_id", "procedures", ["project_id"])
|
||||
op.create_index("ix_procedures_agent_type_id", "procedures", ["agent_type_id"])
|
||||
op.create_index("ix_procedures_name", "procedures", ["name"])
|
||||
op.create_index("ix_procedures_last_used", "procedures", ["last_used"])
|
||||
op.create_index(
|
||||
"ix_procedures_unique_name",
|
||||
"procedures",
|
||||
["project_id", "agent_type_id", "name"],
|
||||
unique=True,
|
||||
)
|
||||
op.create_index("ix_procedures_project_name", "procedures", ["project_id", "name"])
|
||||
# Note: agent_type_id already indexed via ix_procedures_agent_type_id (line 354)
|
||||
op.create_index(
|
||||
"ix_procedures_success_rate",
|
||||
"procedures",
|
||||
["success_count", "failure_count"],
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Add check constraints for data integrity
|
||||
# =========================================================================
|
||||
|
||||
# Episode constraints
|
||||
op.create_check_constraint(
|
||||
"ck_episodes_importance_range",
|
||||
"episodes",
|
||||
"importance_score >= 0.0 AND importance_score <= 1.0",
|
||||
)
|
||||
op.create_check_constraint(
|
||||
"ck_episodes_duration_positive",
|
||||
"episodes",
|
||||
"duration_seconds >= 0.0",
|
||||
)
|
||||
op.create_check_constraint(
|
||||
"ck_episodes_tokens_positive",
|
||||
"episodes",
|
||||
"tokens_used >= 0",
|
||||
)
|
||||
|
||||
# Fact constraints
|
||||
op.create_check_constraint(
|
||||
"ck_facts_confidence_range",
|
||||
"facts",
|
||||
"confidence >= 0.0 AND confidence <= 1.0",
|
||||
)
|
||||
op.create_check_constraint(
|
||||
"ck_facts_reinforcement_positive",
|
||||
"facts",
|
||||
"reinforcement_count >= 1",
|
||||
)
|
||||
|
||||
# Procedure constraints
|
||||
op.create_check_constraint(
|
||||
"ck_procedures_success_positive",
|
||||
"procedures",
|
||||
"success_count >= 0",
|
||||
)
|
||||
op.create_check_constraint(
|
||||
"ck_procedures_failure_positive",
|
||||
"procedures",
|
||||
"failure_count >= 0",
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Create memory_consolidation_log table
|
||||
# Tracks consolidation jobs
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
"memory_consolidation_log",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column(
|
||||
"consolidation_type",
|
||||
consolidation_type_enum,
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("source_count", sa.Integer(), nullable=False, server_default="0"),
|
||||
sa.Column("result_count", sa.Integer(), nullable=False, server_default="0"),
|
||||
sa.Column("started_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column(
|
||||
"status",
|
||||
consolidation_status_enum,
|
||||
nullable=False,
|
||||
server_default="pending",
|
||||
),
|
||||
sa.Column("error", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("now()"),
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
# Consolidation log indexes
|
||||
op.create_index(
|
||||
"ix_consolidation_type",
|
||||
"memory_consolidation_log",
|
||||
["consolidation_type"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_consolidation_status",
|
||||
"memory_consolidation_log",
|
||||
["status"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_consolidation_type_status",
|
||||
"memory_consolidation_log",
|
||||
["consolidation_type", "status"],
|
||||
)
|
||||
op.create_index(
|
||||
"ix_consolidation_started",
|
||||
"memory_consolidation_log",
|
||||
["started_at"],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Drop Agent Memory System tables."""
|
||||
|
||||
# Drop check constraints first
|
||||
op.drop_constraint("ck_procedures_failure_positive", "procedures", type_="check")
|
||||
op.drop_constraint("ck_procedures_success_positive", "procedures", type_="check")
|
||||
op.drop_constraint("ck_facts_reinforcement_positive", "facts", type_="check")
|
||||
op.drop_constraint("ck_facts_confidence_range", "facts", type_="check")
|
||||
op.drop_constraint("ck_episodes_tokens_positive", "episodes", type_="check")
|
||||
op.drop_constraint("ck_episodes_duration_positive", "episodes", type_="check")
|
||||
op.drop_constraint("ck_episodes_importance_range", "episodes", type_="check")
|
||||
|
||||
# Drop unique indexes for global facts
|
||||
op.drop_index("ix_facts_unique_triple_global", "facts")
|
||||
|
||||
# Drop tables in reverse order (dependencies first)
|
||||
op.drop_table("memory_consolidation_log")
|
||||
op.drop_table("procedures")
|
||||
op.drop_table("facts")
|
||||
op.drop_table("episodes")
|
||||
op.drop_table("working_memory")
|
||||
|
||||
# Drop ENUM types
|
||||
op.execute("DROP TYPE IF EXISTS consolidation_status")
|
||||
op.execute("DROP TYPE IF EXISTS consolidation_type")
|
||||
op.execute("DROP TYPE IF EXISTS episode_outcome")
|
||||
op.execute("DROP TYPE IF EXISTS scope_type")
|
||||
52
backend/app/alembic/versions/0006_add_abandoned_outcome.py
Normal file
52
backend/app/alembic/versions/0006_add_abandoned_outcome.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Add ABANDONED to episode_outcome enum
|
||||
|
||||
Revision ID: 0006
|
||||
Revises: 0005
|
||||
Create Date: 2025-01-06
|
||||
|
||||
This migration adds the 'abandoned' value to the episode_outcome enum type.
|
||||
This allows episodes to track when a task was abandoned (not completed,
|
||||
but not necessarily a failure either - e.g., user cancelled, session timeout).
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0006"
|
||||
down_revision: str | None = "0005"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add 'abandoned' value to episode_outcome enum."""
|
||||
# PostgreSQL ALTER TYPE ADD VALUE is safe and non-blocking
|
||||
op.execute("ALTER TYPE episode_outcome ADD VALUE IF NOT EXISTS 'abandoned'")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove 'abandoned' from episode_outcome enum.
|
||||
|
||||
Note: PostgreSQL doesn't support removing values from enums directly.
|
||||
This downgrade converts any 'abandoned' episodes to 'failure' and
|
||||
recreates the enum without 'abandoned'.
|
||||
"""
|
||||
# Convert any abandoned episodes to failure first
|
||||
op.execute("""
|
||||
UPDATE episodes
|
||||
SET outcome = 'failure'
|
||||
WHERE outcome = 'abandoned'
|
||||
""")
|
||||
|
||||
# Recreate the enum without abandoned
|
||||
# This is complex in PostgreSQL - requires creating new type, updating columns, dropping old
|
||||
op.execute("ALTER TYPE episode_outcome RENAME TO episode_outcome_old")
|
||||
op.execute("CREATE TYPE episode_outcome AS ENUM ('success', 'failure', 'partial')")
|
||||
op.execute("""
|
||||
ALTER TABLE episodes
|
||||
ALTER COLUMN outcome TYPE episode_outcome
|
||||
USING outcome::text::episode_outcome
|
||||
""")
|
||||
op.execute("DROP TYPE episode_outcome_old")
|
||||
@@ -0,0 +1,90 @@
|
||||
"""Add category and display fields to agent_types table
|
||||
|
||||
Revision ID: 0007
|
||||
Revises: 0006
|
||||
Create Date: 2026-01-06
|
||||
|
||||
This migration adds:
|
||||
- category: String(50) for grouping agents by role type
|
||||
- icon: String(50) for Lucide icon identifier
|
||||
- color: String(7) for hex color code
|
||||
- sort_order: Integer for display ordering within categories
|
||||
- typical_tasks: JSONB list of tasks this agent excels at
|
||||
- collaboration_hints: JSONB list of agent slugs that work well together
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0007"
|
||||
down_revision: str | None = "0006"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add category and display fields to agent_types table."""
|
||||
# Add new columns
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column("category", sa.String(length=50), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column("icon", sa.String(length=50), nullable=True, server_default="bot"),
|
||||
)
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column(
|
||||
"color", sa.String(length=7), nullable=True, server_default="#3B82F6"
|
||||
),
|
||||
)
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column("sort_order", sa.Integer(), nullable=False, server_default="0"),
|
||||
)
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column(
|
||||
"typical_tasks",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
)
|
||||
op.add_column(
|
||||
"agent_types",
|
||||
sa.Column(
|
||||
"collaboration_hints",
|
||||
postgresql.JSONB(astext_type=sa.Text()),
|
||||
nullable=False,
|
||||
server_default="[]",
|
||||
),
|
||||
)
|
||||
|
||||
# Add indexes for category and sort_order
|
||||
op.create_index("ix_agent_types_category", "agent_types", ["category"])
|
||||
op.create_index("ix_agent_types_sort_order", "agent_types", ["sort_order"])
|
||||
op.create_index(
|
||||
"ix_agent_types_category_sort", "agent_types", ["category", "sort_order"]
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove category and display fields from agent_types table."""
|
||||
# Drop indexes
|
||||
op.drop_index("ix_agent_types_category_sort", table_name="agent_types")
|
||||
op.drop_index("ix_agent_types_sort_order", table_name="agent_types")
|
||||
op.drop_index("ix_agent_types_category", table_name="agent_types")
|
||||
|
||||
# Drop columns
|
||||
op.drop_column("agent_types", "collaboration_hints")
|
||||
op.drop_column("agent_types", "typical_tasks")
|
||||
op.drop_column("agent_types", "sort_order")
|
||||
op.drop_column("agent_types", "color")
|
||||
op.drop_column("agent_types", "icon")
|
||||
op.drop_column("agent_types", "category")
|
||||
@@ -1,12 +1,12 @@
|
||||
from fastapi import Depends, Header, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from fastapi.security.utils import get_authorization_scheme_param
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.auth import TokenExpiredError, TokenInvalidError, get_token_data
|
||||
from app.core.database import get_db
|
||||
from app.models.user import User
|
||||
from app.repositories.user import user_repo
|
||||
|
||||
# OAuth2 configuration
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login")
|
||||
@@ -32,8 +32,9 @@ async def get_current_user(
|
||||
# Decode token and get user ID
|
||||
token_data = get_token_data(token)
|
||||
|
||||
# Get user from database via repository
|
||||
user = await user_repo.get(db, id=str(token_data.user_id))
|
||||
# Get user from database
|
||||
result = await db.execute(select(User).where(User.id == token_data.user_id))
|
||||
user = result.scalar_one_or_none()
|
||||
|
||||
if not user:
|
||||
raise HTTPException(
|
||||
@@ -143,9 +144,90 @@ async def get_optional_current_user(
|
||||
|
||||
try:
|
||||
token_data = get_token_data(token)
|
||||
user = await user_repo.get(db, id=str(token_data.user_id))
|
||||
result = await db.execute(select(User).where(User.id == token_data.user_id))
|
||||
user = result.scalar_one_or_none()
|
||||
if not user or not user.is_active:
|
||||
return None
|
||||
return user
|
||||
except (TokenExpiredError, TokenInvalidError):
|
||||
return None
|
||||
|
||||
|
||||
async def get_current_user_sse(
|
||||
db: AsyncSession = Depends(get_db),
|
||||
authorization: str | None = Header(None),
|
||||
token: str | None = None, # Query parameter - passed directly from route
|
||||
) -> User:
|
||||
"""
|
||||
Get the current authenticated user for SSE endpoints.
|
||||
|
||||
SSE (Server-Sent Events) via EventSource API doesn't support custom headers,
|
||||
so this dependency accepts tokens from either:
|
||||
1. Authorization header (preferred, for non-EventSource clients)
|
||||
2. Query parameter 'token' (fallback for EventSource compatibility)
|
||||
|
||||
Security note: Query parameter tokens appear in server logs and browser history.
|
||||
Consider implementing short-lived SSE-specific tokens for production if this
|
||||
is a concern. The current approach is acceptable for internal/trusted networks.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
authorization: Authorization header (Bearer token)
|
||||
token: Query parameter token (fallback for EventSource)
|
||||
|
||||
Returns:
|
||||
User: The authenticated user
|
||||
|
||||
Raises:
|
||||
HTTPException: If authentication fails
|
||||
"""
|
||||
# Try Authorization header first (preferred)
|
||||
auth_token = None
|
||||
if authorization:
|
||||
scheme, param = get_authorization_scheme_param(authorization)
|
||||
if scheme.lower() == "bearer" and param:
|
||||
auth_token = param
|
||||
|
||||
# Fall back to query parameter if no header token
|
||||
if not auth_token and token:
|
||||
auth_token = token
|
||||
|
||||
if not auth_token:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Not authenticated",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
try:
|
||||
# Decode token and get user ID
|
||||
token_data = get_token_data(auth_token)
|
||||
|
||||
# Get user from database
|
||||
result = await db.execute(select(User).where(User.id == token_data.user_id))
|
||||
user = result.scalar_one_or_none()
|
||||
|
||||
if not user:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="User not found"
|
||||
)
|
||||
|
||||
if not user.is_active:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN, detail="Inactive user"
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
except TokenExpiredError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Token expired",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
except TokenInvalidError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
36
backend/app/api/dependencies/event_bus.py
Normal file
36
backend/app/api/dependencies/event_bus.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
Event bus dependency for FastAPI routes.
|
||||
|
||||
This module provides the FastAPI dependency for injecting the EventBus
|
||||
into route handlers. The event bus is a singleton that maintains
|
||||
Redis pub/sub connections for real-time event streaming.
|
||||
"""
|
||||
|
||||
from app.services.event_bus import (
|
||||
EventBus,
|
||||
get_connected_event_bus as _get_connected_event_bus,
|
||||
)
|
||||
|
||||
|
||||
async def get_event_bus() -> EventBus:
|
||||
"""
|
||||
FastAPI dependency that provides a connected EventBus instance.
|
||||
|
||||
The EventBus is a singleton that maintains Redis pub/sub connections.
|
||||
It's lazily initialized and connected on first access, and should be
|
||||
closed during application shutdown via close_event_bus().
|
||||
|
||||
Usage:
|
||||
@router.get("/events/stream")
|
||||
async def stream_events(
|
||||
event_bus: EventBus = Depends(get_event_bus)
|
||||
):
|
||||
...
|
||||
|
||||
Returns:
|
||||
EventBus: The global connected event bus instance
|
||||
|
||||
Raises:
|
||||
EventBusConnectionError: If connection to Redis fails
|
||||
"""
|
||||
return await _get_connected_event_bus()
|
||||
@@ -15,9 +15,9 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.crud.organization import organization as organization_crud
|
||||
from app.models.user import User
|
||||
from app.models.user_organization import OrganizationRole
|
||||
from app.services.organization_service import organization_service
|
||||
|
||||
|
||||
def require_superuser(current_user: User = Depends(get_current_user)) -> User:
|
||||
@@ -81,7 +81,7 @@ class OrganizationPermission:
|
||||
return current_user
|
||||
|
||||
# Get user's role in organization
|
||||
user_role = await organization_service.get_user_role_in_org(
|
||||
user_role = await organization_crud.get_user_role_in_org(
|
||||
db, user_id=current_user.id, organization_id=organization_id
|
||||
)
|
||||
|
||||
@@ -123,7 +123,7 @@ async def require_org_membership(
|
||||
if current_user.is_superuser:
|
||||
return current_user
|
||||
|
||||
user_role = await organization_service.get_user_role_in_org(
|
||||
user_role = await organization_crud.get_user_role_in_org(
|
||||
db, user_id=current_user.id, organization_id=organization_id
|
||||
)
|
||||
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
# app/api/dependencies/services.py
|
||||
"""FastAPI dependency functions for service singletons."""
|
||||
|
||||
from app.services import oauth_provider_service
|
||||
from app.services.auth_service import AuthService
|
||||
from app.services.oauth_service import OAuthService
|
||||
from app.services.organization_service import OrganizationService, organization_service
|
||||
from app.services.session_service import SessionService, session_service
|
||||
from app.services.user_service import UserService, user_service
|
||||
|
||||
|
||||
def get_auth_service() -> AuthService:
|
||||
"""Return the AuthService singleton for dependency injection."""
|
||||
from app.services.auth_service import AuthService as _AuthService
|
||||
|
||||
return _AuthService()
|
||||
|
||||
|
||||
def get_user_service() -> UserService:
|
||||
"""Return the UserService singleton for dependency injection."""
|
||||
return user_service
|
||||
|
||||
|
||||
def get_organization_service() -> OrganizationService:
|
||||
"""Return the OrganizationService singleton for dependency injection."""
|
||||
return organization_service
|
||||
|
||||
|
||||
def get_session_service() -> SessionService:
|
||||
"""Return the SessionService singleton for dependency injection."""
|
||||
return session_service
|
||||
|
||||
|
||||
def get_oauth_service() -> OAuthService:
|
||||
"""Return OAuthService for dependency injection."""
|
||||
return OAuthService()
|
||||
|
||||
|
||||
def get_oauth_provider_service():
|
||||
"""Return the oauth_provider_service module for dependency injection."""
|
||||
return oauth_provider_service
|
||||
@@ -2,11 +2,19 @@ from fastapi import APIRouter
|
||||
|
||||
from app.api.routes import (
|
||||
admin,
|
||||
agent_types,
|
||||
agents,
|
||||
auth,
|
||||
context,
|
||||
events,
|
||||
issues,
|
||||
mcp,
|
||||
oauth,
|
||||
oauth_provider,
|
||||
organizations,
|
||||
projects,
|
||||
sessions,
|
||||
sprints,
|
||||
users,
|
||||
)
|
||||
|
||||
@@ -22,3 +30,25 @@ api_router.include_router(admin.router, prefix="/admin", tags=["Admin"])
|
||||
api_router.include_router(
|
||||
organizations.router, prefix="/organizations", tags=["Organizations"]
|
||||
)
|
||||
# SSE events router - no prefix, routes define full paths
|
||||
api_router.include_router(events.router, tags=["Events"])
|
||||
|
||||
# MCP (Model Context Protocol) router
|
||||
api_router.include_router(mcp.router, prefix="/mcp", tags=["MCP"])
|
||||
|
||||
# Context Management Engine router
|
||||
api_router.include_router(context.router, prefix="/context", tags=["Context"])
|
||||
|
||||
# Syndarix domain routers
|
||||
api_router.include_router(projects.router, prefix="/projects", tags=["Projects"])
|
||||
api_router.include_router(
|
||||
agent_types.router, prefix="/agent-types", tags=["Agent Types"]
|
||||
)
|
||||
# Issues router - routes include /projects/{project_id}/issues paths
|
||||
api_router.include_router(issues.router, tags=["Issues"])
|
||||
# Agents router - routes include /projects/{project_id}/agents paths
|
||||
api_router.include_router(agents.router, tags=["Agents"])
|
||||
# Sprints router - routes need prefix as they use /projects/{project_id}/sprints paths
|
||||
api_router.include_router(
|
||||
sprints.router, prefix="/projects/{project_id}/sprints", tags=["Sprints"]
|
||||
)
|
||||
|
||||
@@ -14,6 +14,7 @@ from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, status
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.permissions import require_superuser
|
||||
@@ -24,9 +25,12 @@ from app.core.exceptions import (
|
||||
ErrorCode,
|
||||
NotFoundError,
|
||||
)
|
||||
from app.core.repository_exceptions import DuplicateEntryError
|
||||
from app.crud.organization import organization as organization_crud
|
||||
from app.crud.session import session as session_crud
|
||||
from app.crud.user import user as user_crud
|
||||
from app.models.organization import Organization
|
||||
from app.models.user import User
|
||||
from app.models.user_organization import OrganizationRole
|
||||
from app.models.user_organization import OrganizationRole, UserOrganization
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
PaginatedResponse,
|
||||
@@ -42,9 +46,6 @@ from app.schemas.organizations import (
|
||||
)
|
||||
from app.schemas.sessions import AdminSessionResponse
|
||||
from app.schemas.users import UserCreate, UserResponse, UserUpdate
|
||||
from app.services.organization_service import organization_service
|
||||
from app.services.session_service import session_service
|
||||
from app.services.user_service import user_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -65,7 +66,7 @@ class BulkUserAction(BaseModel):
|
||||
|
||||
action: BulkAction = Field(..., description="Action to perform on selected users")
|
||||
user_ids: list[UUID] = Field(
|
||||
..., min_length=1, max_length=100, description="List of user IDs (max 100)"
|
||||
..., min_items=1, max_items=100, description="List of user IDs (max 100)"
|
||||
)
|
||||
|
||||
|
||||
@@ -177,29 +178,38 @@ async def admin_get_stats(
|
||||
"""Get admin dashboard statistics with real data from database."""
|
||||
from app.core.config import settings
|
||||
|
||||
stats = await user_service.get_stats(db)
|
||||
total_users = stats["total_users"]
|
||||
active_count = stats["active_count"]
|
||||
inactive_count = stats["inactive_count"]
|
||||
all_users = stats["all_users"]
|
||||
# Check if we have any data
|
||||
total_users_query = select(func.count()).select_from(User)
|
||||
total_users = (await db.execute(total_users_query)).scalar() or 0
|
||||
|
||||
# If database is essentially empty (only admin user), return demo data
|
||||
if total_users <= 1 and settings.DEMO_MODE: # pragma: no cover
|
||||
logger.info("Returning demo stats data (empty database in demo mode)")
|
||||
return _generate_demo_stats()
|
||||
|
||||
# 1. User Growth (Last 30 days)
|
||||
# 1. User Growth (Last 30 days) - Improved calculation
|
||||
datetime.now(UTC) - timedelta(days=30)
|
||||
|
||||
# Get all users with their creation dates
|
||||
all_users_query = select(User).order_by(User.created_at)
|
||||
result = await db.execute(all_users_query)
|
||||
all_users = result.scalars().all()
|
||||
|
||||
# Build cumulative counts per day
|
||||
user_growth = []
|
||||
for i in range(29, -1, -1):
|
||||
date = datetime.now(UTC) - timedelta(days=i)
|
||||
date_start = date.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=UTC)
|
||||
date_end = date_start + timedelta(days=1)
|
||||
|
||||
# Count all users created before end of this day
|
||||
# Make comparison timezone-aware
|
||||
total_users_on_date = sum(
|
||||
1
|
||||
for u in all_users
|
||||
if u.created_at and u.created_at.replace(tzinfo=UTC) < date_end
|
||||
)
|
||||
# Count active users created before end of this day
|
||||
active_users_on_date = sum(
|
||||
1
|
||||
for u in all_users
|
||||
@@ -217,16 +227,27 @@ async def admin_get_stats(
|
||||
)
|
||||
|
||||
# 2. Organization Distribution - Top 6 organizations by member count
|
||||
org_rows = await organization_service.get_org_distribution(db, limit=6)
|
||||
org_dist = [OrgDistributionData(name=r["name"], value=r["value"]) for r in org_rows]
|
||||
org_query = (
|
||||
select(Organization.name, func.count(UserOrganization.user_id).label("count"))
|
||||
.join(UserOrganization, Organization.id == UserOrganization.organization_id)
|
||||
.group_by(Organization.name)
|
||||
.order_by(func.count(UserOrganization.user_id).desc())
|
||||
.limit(6)
|
||||
)
|
||||
result = await db.execute(org_query)
|
||||
org_dist = [
|
||||
OrgDistributionData(name=row.name, value=row.count) for row in result.all()
|
||||
]
|
||||
|
||||
# 3. User Registration Activity (Last 14 days)
|
||||
# 3. User Registration Activity (Last 14 days) - NEW
|
||||
registration_activity = []
|
||||
for i in range(13, -1, -1):
|
||||
date = datetime.now(UTC) - timedelta(days=i)
|
||||
date_start = date.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=UTC)
|
||||
date_end = date_start + timedelta(days=1)
|
||||
|
||||
# Count users created on this specific day
|
||||
# Make comparison timezone-aware
|
||||
day_registrations = sum(
|
||||
1
|
||||
for u in all_users
|
||||
@@ -242,8 +263,16 @@ async def admin_get_stats(
|
||||
)
|
||||
|
||||
# 4. User Status - Active vs Inactive
|
||||
active_query = select(func.count()).select_from(User).where(User.is_active)
|
||||
inactive_query = (
|
||||
select(func.count()).select_from(User).where(User.is_active.is_(False))
|
||||
)
|
||||
|
||||
active_count = (await db.execute(active_query)).scalar() or 0
|
||||
inactive_count = (await db.execute(inactive_query)).scalar() or 0
|
||||
|
||||
logger.info(
|
||||
"User status counts - Active: %s, Inactive: %s", active_count, inactive_count
|
||||
f"User status counts - Active: {active_count}, Inactive: {inactive_count}"
|
||||
)
|
||||
|
||||
user_status = [
|
||||
@@ -292,7 +321,7 @@ async def admin_list_users(
|
||||
filters["is_superuser"] = is_superuser
|
||||
|
||||
# Get users with search
|
||||
users, total = await user_service.list_users(
|
||||
users, total = await user_crud.get_multi_with_total(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
@@ -312,7 +341,7 @@ async def admin_list_users(
|
||||
return PaginatedResponse(data=users, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error listing users (admin): %s", e)
|
||||
logger.error(f"Error listing users (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -335,14 +364,14 @@ async def admin_create_user(
|
||||
Allows setting is_superuser and other fields.
|
||||
"""
|
||||
try:
|
||||
user = await user_service.create_user(db, user_in)
|
||||
logger.info("Admin %s created user %s", admin.email, user.email)
|
||||
user = await user_crud.create(db, obj_in=user_in)
|
||||
logger.info(f"Admin {admin.email} created user {user.email}")
|
||||
return user
|
||||
except DuplicateEntryError as e:
|
||||
logger.warning("Failed to create user: %s", e)
|
||||
raise DuplicateError(message=str(e), error_code=ErrorCode.USER_ALREADY_EXISTS)
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to create user: {e!s}")
|
||||
raise NotFoundError(message=str(e), error_code=ErrorCode.USER_ALREADY_EXISTS)
|
||||
except Exception as e:
|
||||
logger.exception("Error creating user (admin): %s", e)
|
||||
logger.error(f"Error creating user (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -359,7 +388,11 @@ async def admin_get_user(
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""Get detailed information about a specific user."""
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
return user
|
||||
|
||||
|
||||
@@ -378,13 +411,20 @@ async def admin_update_user(
|
||||
) -> Any:
|
||||
"""Update user information with admin privileges."""
|
||||
try:
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
updated_user = await user_service.update_user(db, user=user, obj_in=user_in)
|
||||
logger.info("Admin %s updated user %s", admin.email, updated_user.email)
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
|
||||
updated_user = await user_crud.update(db, db_obj=user, obj_in=user_in)
|
||||
logger.info(f"Admin {admin.email} updated user {updated_user.email}")
|
||||
return updated_user
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error updating user (admin): %s", e)
|
||||
logger.error(f"Error updating user (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -402,7 +442,11 @@ async def admin_delete_user(
|
||||
) -> Any:
|
||||
"""Soft delete a user (sets deleted_at timestamp)."""
|
||||
try:
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
|
||||
# Prevent deleting yourself
|
||||
if user.id == admin.id:
|
||||
@@ -412,15 +456,17 @@ async def admin_delete_user(
|
||||
error_code=ErrorCode.OPERATION_FORBIDDEN,
|
||||
)
|
||||
|
||||
await user_service.soft_delete_user(db, str(user_id))
|
||||
logger.info("Admin %s deleted user %s", admin.email, user.email)
|
||||
await user_crud.soft_delete(db, id=user_id)
|
||||
logger.info(f"Admin {admin.email} deleted user {user.email}")
|
||||
|
||||
return MessageResponse(
|
||||
success=True, message=f"User {user.email} has been deleted"
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error deleting user (admin): %s", e)
|
||||
logger.error(f"Error deleting user (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -438,16 +484,23 @@ async def admin_activate_user(
|
||||
) -> Any:
|
||||
"""Activate a user account."""
|
||||
try:
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
await user_service.update_user(db, user=user, obj_in={"is_active": True})
|
||||
logger.info("Admin %s activated user %s", admin.email, user.email)
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
|
||||
await user_crud.update(db, db_obj=user, obj_in={"is_active": True})
|
||||
logger.info(f"Admin {admin.email} activated user {user.email}")
|
||||
|
||||
return MessageResponse(
|
||||
success=True, message=f"User {user.email} has been activated"
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error activating user (admin): %s", e)
|
||||
logger.error(f"Error activating user (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -465,7 +518,11 @@ async def admin_deactivate_user(
|
||||
) -> Any:
|
||||
"""Deactivate a user account."""
|
||||
try:
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
|
||||
# Prevent deactivating yourself
|
||||
if user.id == admin.id:
|
||||
@@ -475,15 +532,17 @@ async def admin_deactivate_user(
|
||||
error_code=ErrorCode.OPERATION_FORBIDDEN,
|
||||
)
|
||||
|
||||
await user_service.update_user(db, user=user, obj_in={"is_active": False})
|
||||
logger.info("Admin %s deactivated user %s", admin.email, user.email)
|
||||
await user_crud.update(db, db_obj=user, obj_in={"is_active": False})
|
||||
logger.info(f"Admin {admin.email} deactivated user {user.email}")
|
||||
|
||||
return MessageResponse(
|
||||
success=True, message=f"User {user.email} has been deactivated"
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error deactivating user (admin): %s", e)
|
||||
logger.error(f"Error deactivating user (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -508,16 +567,16 @@ async def admin_bulk_user_action(
|
||||
try:
|
||||
# Use efficient bulk operations instead of loop
|
||||
if bulk_action.action == BulkAction.ACTIVATE:
|
||||
affected_count = await user_service.bulk_update_status(
|
||||
affected_count = await user_crud.bulk_update_status(
|
||||
db, user_ids=bulk_action.user_ids, is_active=True
|
||||
)
|
||||
elif bulk_action.action == BulkAction.DEACTIVATE:
|
||||
affected_count = await user_service.bulk_update_status(
|
||||
affected_count = await user_crud.bulk_update_status(
|
||||
db, user_ids=bulk_action.user_ids, is_active=False
|
||||
)
|
||||
elif bulk_action.action == BulkAction.DELETE:
|
||||
# bulk_soft_delete automatically excludes the admin user
|
||||
affected_count = await user_service.bulk_soft_delete(
|
||||
affected_count = await user_crud.bulk_soft_delete(
|
||||
db, user_ids=bulk_action.user_ids, exclude_user_id=admin.id
|
||||
)
|
||||
else: # pragma: no cover
|
||||
@@ -528,11 +587,8 @@ async def admin_bulk_user_action(
|
||||
failed_count = requested_count - affected_count
|
||||
|
||||
logger.info(
|
||||
"Admin %s performed bulk %s on %s users (%s skipped/failed)",
|
||||
admin.email,
|
||||
bulk_action.action.value,
|
||||
affected_count,
|
||||
failed_count,
|
||||
f"Admin {admin.email} performed bulk {bulk_action.action.value} "
|
||||
f"on {affected_count} users ({failed_count} skipped/failed)"
|
||||
)
|
||||
|
||||
return BulkActionResult(
|
||||
@@ -544,7 +600,7 @@ async def admin_bulk_user_action(
|
||||
)
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.exception("Error in bulk user action: %s", e)
|
||||
logger.error(f"Error in bulk user action: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -568,7 +624,7 @@ async def admin_list_organizations(
|
||||
"""List all organizations with filtering and search."""
|
||||
try:
|
||||
# Use optimized method that gets member counts in single query (no N+1)
|
||||
orgs_with_data, total = await organization_service.get_multi_with_member_counts(
|
||||
orgs_with_data, total = await organization_crud.get_multi_with_member_counts(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
@@ -605,7 +661,7 @@ async def admin_list_organizations(
|
||||
return PaginatedResponse(data=orgs_with_count, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error listing organizations (admin): %s", e)
|
||||
logger.error(f"Error listing organizations (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -624,8 +680,8 @@ async def admin_create_organization(
|
||||
) -> Any:
|
||||
"""Create a new organization."""
|
||||
try:
|
||||
org = await organization_service.create_organization(db, obj_in=org_in)
|
||||
logger.info("Admin %s created organization %s", admin.email, org.name)
|
||||
org = await organization_crud.create(db, obj_in=org_in)
|
||||
logger.info(f"Admin {admin.email} created organization {org.name}")
|
||||
|
||||
# Add member count
|
||||
org_dict = {
|
||||
@@ -641,11 +697,11 @@ async def admin_create_organization(
|
||||
}
|
||||
return OrganizationResponse(**org_dict)
|
||||
|
||||
except DuplicateEntryError as e:
|
||||
logger.warning("Failed to create organization: %s", e)
|
||||
raise DuplicateError(message=str(e), error_code=ErrorCode.ALREADY_EXISTS)
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to create organization: {e!s}")
|
||||
raise NotFoundError(message=str(e), error_code=ErrorCode.ALREADY_EXISTS)
|
||||
except Exception as e:
|
||||
logger.exception("Error creating organization (admin): %s", e)
|
||||
logger.error(f"Error creating organization (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -662,7 +718,12 @@ async def admin_get_organization(
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""Get detailed information about a specific organization."""
|
||||
org = await organization_service.get_organization(db, str(org_id))
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found", error_code=ErrorCode.NOT_FOUND
|
||||
)
|
||||
|
||||
org_dict = {
|
||||
"id": org.id,
|
||||
"name": org.name,
|
||||
@@ -672,7 +733,7 @@ async def admin_get_organization(
|
||||
"settings": org.settings,
|
||||
"created_at": org.created_at,
|
||||
"updated_at": org.updated_at,
|
||||
"member_count": await organization_service.get_member_count(
|
||||
"member_count": await organization_crud.get_member_count(
|
||||
db, organization_id=org.id
|
||||
),
|
||||
}
|
||||
@@ -694,11 +755,15 @@ async def admin_update_organization(
|
||||
) -> Any:
|
||||
"""Update organization information."""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(org_id))
|
||||
updated_org = await organization_service.update_organization(
|
||||
db, org=org, obj_in=org_in
|
||||
)
|
||||
logger.info("Admin %s updated organization %s", admin.email, updated_org.name)
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
updated_org = await organization_crud.update(db, db_obj=org, obj_in=org_in)
|
||||
logger.info(f"Admin {admin.email} updated organization {updated_org.name}")
|
||||
|
||||
org_dict = {
|
||||
"id": updated_org.id,
|
||||
@@ -709,14 +774,16 @@ async def admin_update_organization(
|
||||
"settings": updated_org.settings,
|
||||
"created_at": updated_org.created_at,
|
||||
"updated_at": updated_org.updated_at,
|
||||
"member_count": await organization_service.get_member_count(
|
||||
"member_count": await organization_crud.get_member_count(
|
||||
db, organization_id=updated_org.id
|
||||
),
|
||||
}
|
||||
return OrganizationResponse(**org_dict)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error updating organization (admin): %s", e)
|
||||
logger.error(f"Error updating organization (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -734,16 +801,24 @@ async def admin_delete_organization(
|
||||
) -> Any:
|
||||
"""Delete an organization and all its relationships."""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(org_id))
|
||||
await organization_service.remove_organization(db, str(org_id))
|
||||
logger.info("Admin %s deleted organization %s", admin.email, org.name)
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
await organization_crud.remove(db, id=org_id)
|
||||
logger.info(f"Admin {admin.email} deleted organization {org.name}")
|
||||
|
||||
return MessageResponse(
|
||||
success=True, message=f"Organization {org.name} has been deleted"
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error deleting organization (admin): %s", e)
|
||||
logger.error(f"Error deleting organization (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -763,8 +838,14 @@ async def admin_list_organization_members(
|
||||
) -> Any:
|
||||
"""List all members of an organization."""
|
||||
try:
|
||||
await organization_service.get_organization(db, str(org_id)) # validates exists
|
||||
members, total = await organization_service.get_organization_members(
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
members, total = await organization_crud.get_organization_members(
|
||||
db,
|
||||
organization_id=org_id,
|
||||
skip=pagination.offset,
|
||||
@@ -787,7 +868,9 @@ async def admin_list_organization_members(
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error listing organization members (admin): %s", e)
|
||||
logger.error(
|
||||
f"Error listing organization members (admin): {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -815,32 +898,45 @@ async def admin_add_organization_member(
|
||||
) -> Any:
|
||||
"""Add a user to an organization."""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(org_id))
|
||||
user = await user_service.get_user(db, str(request.user_id))
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
await organization_service.add_member(
|
||||
user = await user_crud.get(db, id=request.user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {request.user_id} not found",
|
||||
error_code=ErrorCode.USER_NOT_FOUND,
|
||||
)
|
||||
|
||||
await organization_crud.add_user(
|
||||
db, organization_id=org_id, user_id=request.user_id, role=request.role
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Admin %s added user %s to organization %s with role %s",
|
||||
admin.email,
|
||||
user.email,
|
||||
org.name,
|
||||
request.role.value,
|
||||
f"Admin {admin.email} added user {user.email} to organization {org.name} "
|
||||
f"with role {request.role.value}"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
success=True, message=f"User {user.email} added to organization {org.name}"
|
||||
)
|
||||
|
||||
except DuplicateEntryError as e:
|
||||
logger.warning("Failed to add user to organization: %s", e)
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to add user to organization: {e!s}")
|
||||
# Use DuplicateError for "already exists" scenarios
|
||||
raise DuplicateError(
|
||||
message=str(e), error_code=ErrorCode.USER_ALREADY_EXISTS, field="user_id"
|
||||
)
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error adding member to organization (admin): %s", e)
|
||||
logger.error(
|
||||
f"Error adding member to organization (admin): {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -859,10 +955,20 @@ async def admin_remove_organization_member(
|
||||
) -> Any:
|
||||
"""Remove a user from an organization."""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(org_id))
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
org = await organization_crud.get(db, id=org_id)
|
||||
if not org:
|
||||
raise NotFoundError(
|
||||
message=f"Organization {org_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
success = await organization_service.remove_member(
|
||||
user = await user_crud.get(db, id=user_id)
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User {user_id} not found", error_code=ErrorCode.USER_NOT_FOUND
|
||||
)
|
||||
|
||||
success = await organization_crud.remove_user(
|
||||
db, organization_id=org_id, user_id=user_id
|
||||
)
|
||||
|
||||
@@ -873,10 +979,7 @@ async def admin_remove_organization_member(
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Admin %s removed user %s from organization %s",
|
||||
admin.email,
|
||||
user.email,
|
||||
org.name,
|
||||
f"Admin {admin.email} removed user {user.email} from organization {org.name}"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
@@ -887,7 +990,9 @@ async def admin_remove_organization_member(
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.exception("Error removing member from organization (admin): %s", e)
|
||||
logger.error(
|
||||
f"Error removing member from organization (admin): {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -917,7 +1022,7 @@ async def admin_list_sessions(
|
||||
"""List all sessions across all users with filtering and pagination."""
|
||||
try:
|
||||
# Get sessions with user info (eager loaded to prevent N+1)
|
||||
sessions, total = await session_service.get_all_sessions(
|
||||
sessions, total = await session_crud.get_all_sessions(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
@@ -956,10 +1061,7 @@ async def admin_list_sessions(
|
||||
session_responses.append(session_response)
|
||||
|
||||
logger.info(
|
||||
"Admin %s listed %s sessions (total: %s)",
|
||||
admin.email,
|
||||
len(session_responses),
|
||||
total,
|
||||
f"Admin {admin.email} listed {len(session_responses)} sessions (total: {total})"
|
||||
)
|
||||
|
||||
pagination_meta = create_pagination_meta(
|
||||
@@ -972,5 +1074,5 @@ async def admin_list_sessions(
|
||||
return PaginatedResponse(data=session_responses, pagination=pagination_meta)
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.exception("Error listing sessions (admin): %s", e)
|
||||
logger.error(f"Error listing sessions (admin): {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
517
backend/app/api/routes/agent_types.py
Normal file
517
backend/app/api/routes/agent_types.py
Normal file
@@ -0,0 +1,517 @@
|
||||
# app/api/routes/agent_types.py
|
||||
"""
|
||||
AgentType configuration API endpoints.
|
||||
|
||||
Provides CRUD operations for managing AI agent type templates.
|
||||
Agent types define the base configuration (model, personality, expertise)
|
||||
from which agent instances are spawned for projects.
|
||||
|
||||
Authorization:
|
||||
- Read endpoints: Any authenticated user
|
||||
- Write endpoints (create, update, delete): Superusers only
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request, status
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.api.dependencies.permissions import require_superuser
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import (
|
||||
DuplicateError,
|
||||
ErrorCode,
|
||||
NotFoundError,
|
||||
)
|
||||
from app.crud.syndarix.agent_type import agent_type as agent_type_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
PaginatedResponse,
|
||||
PaginationParams,
|
||||
create_pagination_meta,
|
||||
)
|
||||
from app.schemas.syndarix import (
|
||||
AgentTypeCreate,
|
||||
AgentTypeResponse,
|
||||
AgentTypeUpdate,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Initialize limiter for this router
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
|
||||
# Use higher rate limits in test environment
|
||||
IS_TEST = os.getenv("IS_TEST", "False") == "True"
|
||||
RATE_MULTIPLIER = 100 if IS_TEST else 1
|
||||
|
||||
|
||||
def _build_agent_type_response(
|
||||
agent_type: Any,
|
||||
instance_count: int = 0,
|
||||
) -> AgentTypeResponse:
|
||||
"""
|
||||
Build an AgentTypeResponse from a database model.
|
||||
|
||||
Args:
|
||||
agent_type: AgentType model instance
|
||||
instance_count: Number of agent instances for this type
|
||||
|
||||
Returns:
|
||||
AgentTypeResponse schema
|
||||
"""
|
||||
return AgentTypeResponse(
|
||||
id=agent_type.id,
|
||||
name=agent_type.name,
|
||||
slug=agent_type.slug,
|
||||
description=agent_type.description,
|
||||
expertise=agent_type.expertise,
|
||||
personality_prompt=agent_type.personality_prompt,
|
||||
primary_model=agent_type.primary_model,
|
||||
fallback_models=agent_type.fallback_models,
|
||||
model_params=agent_type.model_params,
|
||||
mcp_servers=agent_type.mcp_servers,
|
||||
tool_permissions=agent_type.tool_permissions,
|
||||
is_active=agent_type.is_active,
|
||||
# Category and display fields
|
||||
category=agent_type.category,
|
||||
icon=agent_type.icon,
|
||||
color=agent_type.color,
|
||||
sort_order=agent_type.sort_order,
|
||||
typical_tasks=agent_type.typical_tasks or [],
|
||||
collaboration_hints=agent_type.collaboration_hints or [],
|
||||
created_at=agent_type.created_at,
|
||||
updated_at=agent_type.updated_at,
|
||||
instance_count=instance_count,
|
||||
)
|
||||
|
||||
|
||||
# ===== Write Endpoints (Admin Only) =====
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=AgentTypeResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create Agent Type",
|
||||
description="Create a new agent type configuration (admin only)",
|
||||
operation_id="create_agent_type",
|
||||
)
|
||||
@limiter.limit(f"{20 * RATE_MULTIPLIER}/minute")
|
||||
async def create_agent_type(
|
||||
request: Request,
|
||||
agent_type_in: AgentTypeCreate,
|
||||
admin: User = Depends(require_superuser),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Create a new agent type configuration.
|
||||
|
||||
Agent types define templates for AI agents including:
|
||||
- Model configuration (primary model, fallback models, parameters)
|
||||
- Personality and expertise areas
|
||||
- MCP server integrations and tool permissions
|
||||
|
||||
Requires superuser privileges.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
agent_type_in: Agent type creation data
|
||||
admin: Authenticated superuser
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
The created agent type configuration
|
||||
|
||||
Raises:
|
||||
DuplicateError: If slug already exists
|
||||
"""
|
||||
try:
|
||||
agent_type = await agent_type_crud.create(db, obj_in=agent_type_in)
|
||||
logger.info(
|
||||
f"Admin {admin.email} created agent type: {agent_type.name} "
|
||||
f"(slug: {agent_type.slug})"
|
||||
)
|
||||
return _build_agent_type_response(agent_type, instance_count=0)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to create agent type: {e!s}")
|
||||
raise DuplicateError(
|
||||
message=str(e),
|
||||
error_code=ErrorCode.ALREADY_EXISTS,
|
||||
field="slug",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating agent type: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/{agent_type_id}",
|
||||
response_model=AgentTypeResponse,
|
||||
summary="Update Agent Type",
|
||||
description="Update an existing agent type configuration (admin only)",
|
||||
operation_id="update_agent_type",
|
||||
)
|
||||
@limiter.limit(f"{30 * RATE_MULTIPLIER}/minute")
|
||||
async def update_agent_type(
|
||||
request: Request,
|
||||
agent_type_id: UUID,
|
||||
agent_type_in: AgentTypeUpdate,
|
||||
admin: User = Depends(require_superuser),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Update an existing agent type configuration.
|
||||
|
||||
Partial updates are supported - only provided fields will be updated.
|
||||
|
||||
Requires superuser privileges.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
agent_type_id: UUID of the agent type to update
|
||||
agent_type_in: Agent type update data
|
||||
admin: Authenticated superuser
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
The updated agent type configuration
|
||||
|
||||
Raises:
|
||||
NotFoundError: If agent type not found
|
||||
DuplicateError: If new slug already exists
|
||||
"""
|
||||
try:
|
||||
# Verify agent type exists
|
||||
result = await agent_type_crud.get_with_instance_count(
|
||||
db, agent_type_id=agent_type_id
|
||||
)
|
||||
if not result:
|
||||
raise NotFoundError(
|
||||
message=f"Agent type {agent_type_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
existing_type = result["agent_type"]
|
||||
instance_count = result["instance_count"]
|
||||
|
||||
# Perform update
|
||||
updated_type = await agent_type_crud.update(
|
||||
db, db_obj=existing_type, obj_in=agent_type_in
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Admin {admin.email} updated agent type: {updated_type.name} "
|
||||
f"(id: {agent_type_id})"
|
||||
)
|
||||
|
||||
return _build_agent_type_response(updated_type, instance_count=instance_count)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to update agent type {agent_type_id}: {e!s}")
|
||||
raise DuplicateError(
|
||||
message=str(e),
|
||||
error_code=ErrorCode.ALREADY_EXISTS,
|
||||
field="slug",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating agent type {agent_type_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{agent_type_id}",
|
||||
response_model=MessageResponse,
|
||||
summary="Deactivate Agent Type",
|
||||
description="Deactivate an agent type (soft delete, admin only)",
|
||||
operation_id="deactivate_agent_type",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def deactivate_agent_type(
|
||||
request: Request,
|
||||
agent_type_id: UUID,
|
||||
admin: User = Depends(require_superuser),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Deactivate an agent type (soft delete).
|
||||
|
||||
This sets is_active=False rather than deleting the record,
|
||||
preserving referential integrity with existing agent instances.
|
||||
|
||||
Requires superuser privileges.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
agent_type_id: UUID of the agent type to deactivate
|
||||
admin: Authenticated superuser
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
|
||||
Raises:
|
||||
NotFoundError: If agent type not found
|
||||
"""
|
||||
try:
|
||||
deactivated = await agent_type_crud.deactivate(db, agent_type_id=agent_type_id)
|
||||
|
||||
if not deactivated:
|
||||
raise NotFoundError(
|
||||
message=f"Agent type {agent_type_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Admin {admin.email} deactivated agent type: {deactivated.name} "
|
||||
f"(id: {agent_type_id})"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Agent type '{deactivated.name}' has been deactivated",
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error deactivating agent type {agent_type_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# ===== Read Endpoints (Authenticated Users) =====
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=PaginatedResponse[AgentTypeResponse],
|
||||
summary="List Agent Types",
|
||||
description="Get paginated list of active agent types",
|
||||
operation_id="list_agent_types",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def list_agent_types(
|
||||
request: Request,
|
||||
pagination: PaginationParams = Depends(),
|
||||
is_active: bool = Query(True, description="Filter by active status"),
|
||||
category: str | None = Query(None, description="Filter by category"),
|
||||
search: str | None = Query(None, description="Search by name, slug, description"),
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
List all agent types with pagination and filtering.
|
||||
|
||||
By default, returns only active agent types. Set is_active=false
|
||||
to include deactivated types (useful for admin views).
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
pagination: Pagination parameters (page, limit)
|
||||
is_active: Filter by active status (default: True)
|
||||
category: Filter by category (e.g., "development", "design")
|
||||
search: Optional search term for name, slug, description
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Paginated list of agent types with instance counts
|
||||
"""
|
||||
try:
|
||||
# Get agent types with instance counts
|
||||
results, total = await agent_type_crud.get_multi_with_instance_counts(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
is_active=is_active,
|
||||
category=category,
|
||||
search=search,
|
||||
)
|
||||
|
||||
# Build response objects
|
||||
agent_types_response = [
|
||||
_build_agent_type_response(
|
||||
item["agent_type"],
|
||||
instance_count=item["instance_count"],
|
||||
)
|
||||
for item in results
|
||||
]
|
||||
|
||||
pagination_meta = create_pagination_meta(
|
||||
total=total,
|
||||
page=pagination.page,
|
||||
limit=pagination.limit,
|
||||
items_count=len(agent_types_response),
|
||||
)
|
||||
|
||||
return PaginatedResponse(data=agent_types_response, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing agent types: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/grouped",
|
||||
response_model=dict[str, list[AgentTypeResponse]],
|
||||
summary="List Agent Types Grouped by Category",
|
||||
description="Get all agent types organized by category",
|
||||
operation_id="list_agent_types_grouped",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def list_agent_types_grouped(
|
||||
request: Request,
|
||||
is_active: bool = Query(True, description="Filter by active status"),
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get agent types grouped by category.
|
||||
|
||||
Returns a dictionary where keys are category names and values
|
||||
are lists of agent types, sorted by sort_order within each category.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
is_active: Filter by active status (default: True)
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Dictionary mapping category to list of agent types
|
||||
"""
|
||||
try:
|
||||
grouped = await agent_type_crud.get_grouped_by_category(db, is_active=is_active)
|
||||
|
||||
# Transform to response objects
|
||||
result: dict[str, list[AgentTypeResponse]] = {}
|
||||
for category, types in grouped.items():
|
||||
result[category] = [
|
||||
_build_agent_type_response(t, instance_count=0) for t in types
|
||||
]
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting grouped agent types: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{agent_type_id}",
|
||||
response_model=AgentTypeResponse,
|
||||
summary="Get Agent Type",
|
||||
description="Get agent type details by ID",
|
||||
operation_id="get_agent_type",
|
||||
)
|
||||
@limiter.limit(f"{100 * RATE_MULTIPLIER}/minute")
|
||||
async def get_agent_type(
|
||||
request: Request,
|
||||
agent_type_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about a specific agent type.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
agent_type_id: UUID of the agent type
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Agent type details with instance count
|
||||
|
||||
Raises:
|
||||
NotFoundError: If agent type not found
|
||||
"""
|
||||
try:
|
||||
result = await agent_type_crud.get_with_instance_count(
|
||||
db, agent_type_id=agent_type_id
|
||||
)
|
||||
|
||||
if not result:
|
||||
raise NotFoundError(
|
||||
message=f"Agent type {agent_type_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_agent_type_response(
|
||||
result["agent_type"],
|
||||
instance_count=result["instance_count"],
|
||||
)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent type {agent_type_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/slug/{slug}",
|
||||
response_model=AgentTypeResponse,
|
||||
summary="Get Agent Type by Slug",
|
||||
description="Get agent type details by slug",
|
||||
operation_id="get_agent_type_by_slug",
|
||||
)
|
||||
@limiter.limit(f"{100 * RATE_MULTIPLIER}/minute")
|
||||
async def get_agent_type_by_slug(
|
||||
request: Request,
|
||||
slug: str,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about an agent type by its slug.
|
||||
|
||||
Slugs are human-readable identifiers like "product-owner" or "backend-engineer".
|
||||
Useful for referencing agent types in configuration files or APIs.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
slug: Slug identifier of the agent type
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Agent type details with instance count
|
||||
|
||||
Raises:
|
||||
NotFoundError: If agent type not found
|
||||
"""
|
||||
try:
|
||||
agent_type = await agent_type_crud.get_by_slug(db, slug=slug)
|
||||
|
||||
if not agent_type:
|
||||
raise NotFoundError(
|
||||
message=f"Agent type with slug '{slug}' not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Get instance count separately
|
||||
result = await agent_type_crud.get_with_instance_count(
|
||||
db, agent_type_id=agent_type.id
|
||||
)
|
||||
instance_count = result["instance_count"] if result else 0
|
||||
|
||||
return _build_agent_type_response(agent_type, instance_count=instance_count)
|
||||
|
||||
except NotFoundError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent type by slug '{slug}': {e!s}", exc_info=True)
|
||||
raise
|
||||
984
backend/app/api/routes/agents.py
Normal file
984
backend/app/api/routes/agents.py
Normal file
@@ -0,0 +1,984 @@
|
||||
# app/api/routes/agents.py
|
||||
"""
|
||||
Agent Instance management endpoints for Syndarix projects.
|
||||
|
||||
These endpoints allow project owners and superusers to manage AI agent instances
|
||||
within their projects, including spawning, pausing, resuming, and terminating agents.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request, status
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import (
|
||||
AuthorizationError,
|
||||
NotFoundError,
|
||||
ValidationException,
|
||||
)
|
||||
from app.crud.syndarix.agent_instance import agent_instance as agent_instance_crud
|
||||
from app.crud.syndarix.agent_type import agent_type as agent_type_crud
|
||||
from app.crud.syndarix.project import project as project_crud
|
||||
from app.models.syndarix import AgentInstance, Project
|
||||
from app.models.syndarix.enums import AgentStatus
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
PaginatedResponse,
|
||||
PaginationParams,
|
||||
create_pagination_meta,
|
||||
)
|
||||
from app.schemas.errors import ErrorCode
|
||||
from app.schemas.syndarix.agent_instance import (
|
||||
AgentInstanceCreate,
|
||||
AgentInstanceMetrics,
|
||||
AgentInstanceResponse,
|
||||
AgentInstanceUpdate,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Initialize limiter for this router
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
|
||||
# Use higher rate limits in test environment
|
||||
IS_TEST = os.getenv("IS_TEST", "False") == "True"
|
||||
RATE_MULTIPLIER = 100 if IS_TEST else 1
|
||||
|
||||
|
||||
# Valid status transitions for agent lifecycle management
|
||||
VALID_STATUS_TRANSITIONS: dict[AgentStatus, set[AgentStatus]] = {
|
||||
AgentStatus.IDLE: {AgentStatus.WORKING, AgentStatus.PAUSED, AgentStatus.TERMINATED},
|
||||
AgentStatus.WORKING: {
|
||||
AgentStatus.IDLE,
|
||||
AgentStatus.WAITING,
|
||||
AgentStatus.PAUSED,
|
||||
AgentStatus.TERMINATED,
|
||||
},
|
||||
AgentStatus.WAITING: {
|
||||
AgentStatus.IDLE,
|
||||
AgentStatus.WORKING,
|
||||
AgentStatus.PAUSED,
|
||||
AgentStatus.TERMINATED,
|
||||
},
|
||||
AgentStatus.PAUSED: {AgentStatus.IDLE, AgentStatus.TERMINATED},
|
||||
AgentStatus.TERMINATED: set(), # Terminal state, no transitions allowed
|
||||
}
|
||||
|
||||
|
||||
async def verify_project_access(
|
||||
db: AsyncSession,
|
||||
project_id: UUID,
|
||||
user: User,
|
||||
) -> Project:
|
||||
"""
|
||||
Verify user has access to a project.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project to verify
|
||||
user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Project: The project if access is granted
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project does not exist
|
||||
AuthorizationError: If the user does not have access to the project
|
||||
"""
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if not user.is_superuser and project.owner_id != user.id:
|
||||
raise AuthorizationError(
|
||||
message="You do not have access to this project",
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
return project
|
||||
|
||||
|
||||
def validate_status_transition(
|
||||
current_status: AgentStatus,
|
||||
target_status: AgentStatus,
|
||||
) -> None:
|
||||
"""
|
||||
Validate that a status transition is allowed.
|
||||
|
||||
Args:
|
||||
current_status: The agent's current status
|
||||
target_status: The desired target status
|
||||
|
||||
Raises:
|
||||
ValidationException: If the transition is not allowed
|
||||
"""
|
||||
valid_targets = VALID_STATUS_TRANSITIONS.get(current_status, set())
|
||||
if target_status not in valid_targets:
|
||||
raise ValidationException(
|
||||
message=f"Cannot transition from {current_status.value} to {target_status.value}",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
|
||||
def build_agent_response(
|
||||
agent: AgentInstance,
|
||||
agent_type_name: str | None = None,
|
||||
agent_type_slug: str | None = None,
|
||||
project_name: str | None = None,
|
||||
project_slug: str | None = None,
|
||||
assigned_issues_count: int = 0,
|
||||
) -> AgentInstanceResponse:
|
||||
"""
|
||||
Build an AgentInstanceResponse from an AgentInstance model.
|
||||
|
||||
Args:
|
||||
agent: The agent instance model
|
||||
agent_type_name: Name of the agent type
|
||||
agent_type_slug: Slug of the agent type
|
||||
project_name: Name of the project
|
||||
project_slug: Slug of the project
|
||||
assigned_issues_count: Number of issues assigned to this agent
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The response schema
|
||||
"""
|
||||
return AgentInstanceResponse(
|
||||
id=agent.id,
|
||||
agent_type_id=agent.agent_type_id,
|
||||
project_id=agent.project_id,
|
||||
name=agent.name,
|
||||
status=agent.status,
|
||||
current_task=agent.current_task,
|
||||
short_term_memory=agent.short_term_memory or {},
|
||||
long_term_memory_ref=agent.long_term_memory_ref,
|
||||
session_id=agent.session_id,
|
||||
last_activity_at=agent.last_activity_at,
|
||||
terminated_at=agent.terminated_at,
|
||||
tasks_completed=agent.tasks_completed,
|
||||
tokens_used=agent.tokens_used,
|
||||
cost_incurred=agent.cost_incurred,
|
||||
created_at=agent.created_at,
|
||||
updated_at=agent.updated_at,
|
||||
agent_type_name=agent_type_name,
|
||||
agent_type_slug=agent_type_slug,
|
||||
project_name=project_name,
|
||||
project_slug=project_slug,
|
||||
assigned_issues_count=assigned_issues_count,
|
||||
)
|
||||
|
||||
|
||||
# ===== Agent Instance Management Endpoints =====
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/agents",
|
||||
response_model=AgentInstanceResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Spawn Agent Instance",
|
||||
description="Spawn a new agent instance in a project. Requires project ownership or superuser.",
|
||||
operation_id="spawn_agent",
|
||||
)
|
||||
@limiter.limit(f"{20 * RATE_MULTIPLIER}/minute")
|
||||
async def spawn_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_in: AgentInstanceCreate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Spawn a new agent instance in a project.
|
||||
|
||||
Creates a new agent instance from an agent type template and assigns it
|
||||
to the specified project. The agent starts in IDLE status by default.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project to spawn the agent in
|
||||
agent_in: Agent instance creation data
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The newly created agent instance
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
ValidationException: If the agent creation data is invalid
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
project = await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Ensure the agent is being created for the correct project
|
||||
if agent_in.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Agent project_id must match the URL project_id",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="project_id",
|
||||
)
|
||||
|
||||
# Validate that the agent type exists and is active
|
||||
agent_type = await agent_type_crud.get(db, id=agent_in.agent_type_id)
|
||||
if not agent_type:
|
||||
raise NotFoundError(
|
||||
message=f"Agent type {agent_in.agent_type_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if not agent_type.is_active:
|
||||
raise ValidationException(
|
||||
message=f"Agent type '{agent_type.name}' is inactive and cannot be used",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="agent_type_id",
|
||||
)
|
||||
|
||||
# Create the agent instance
|
||||
agent = await agent_instance_crud.create(db, obj_in=agent_in)
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} spawned agent '{agent.name}' "
|
||||
f"(id={agent.id}) in project {project.slug}"
|
||||
)
|
||||
|
||||
# Get agent details for response
|
||||
details = await agent_instance_crud.get_with_details(db, instance_id=agent.id)
|
||||
if details:
|
||||
return build_agent_response(
|
||||
agent=details["instance"],
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
|
||||
return build_agent_response(agent)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to spawn agent: {e!s}")
|
||||
raise ValidationException(
|
||||
message=str(e),
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error spawning agent: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/agents",
|
||||
response_model=PaginatedResponse[AgentInstanceResponse],
|
||||
summary="List Project Agents",
|
||||
description="List all agent instances in a project with optional filtering.",
|
||||
operation_id="list_project_agents",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def list_project_agents(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
pagination: PaginationParams = Depends(),
|
||||
status_filter: AgentStatus | None = Query(
|
||||
None, alias="status", description="Filter by agent status"
|
||||
),
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
List all agent instances in a project.
|
||||
|
||||
Returns a paginated list of agents with optional status filtering.
|
||||
Results are ordered by creation date (newest first).
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
pagination: Pagination parameters
|
||||
status_filter: Optional filter by agent status
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
PaginatedResponse[AgentInstanceResponse]: Paginated list of agents
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
project = await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get agents for the project
|
||||
agents, total = await agent_instance_crud.get_by_project(
|
||||
db,
|
||||
project_id=project_id,
|
||||
status=status_filter,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
)
|
||||
|
||||
# Build response objects
|
||||
agent_responses = []
|
||||
for agent in agents:
|
||||
# Get details for each agent (could be optimized with bulk query)
|
||||
details = await agent_instance_crud.get_with_details(
|
||||
db, instance_id=agent.id
|
||||
)
|
||||
if details:
|
||||
agent_responses.append(
|
||||
build_agent_response(
|
||||
agent=details["instance"],
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
)
|
||||
else:
|
||||
agent_responses.append(build_agent_response(agent))
|
||||
|
||||
pagination_meta = create_pagination_meta(
|
||||
total=total,
|
||||
page=pagination.page,
|
||||
limit=pagination.limit,
|
||||
items_count=len(agent_responses),
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"User {current_user.email} listed {len(agent_responses)} agents "
|
||||
f"in project {project.slug}"
|
||||
)
|
||||
|
||||
return PaginatedResponse(data=agent_responses, pagination=pagination_meta)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing project agents: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# ===== Project Agent Metrics Endpoint =====
|
||||
# NOTE: This endpoint MUST be defined before /{agent_id} routes
|
||||
# to prevent FastAPI from trying to parse "metrics" as a UUID
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/agents/metrics",
|
||||
response_model=AgentInstanceMetrics,
|
||||
summary="Get Project Agent Metrics",
|
||||
description="Get aggregated usage metrics for all agents in a project.",
|
||||
operation_id="get_project_agent_metrics",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_project_agent_metrics(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get aggregated usage metrics for all agents in a project.
|
||||
|
||||
Returns aggregated metrics across all agents including total
|
||||
tasks completed, tokens used, and cost incurred.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceMetrics: Aggregated project agent metrics
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
project = await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get aggregated metrics for the project
|
||||
metrics = await agent_instance_crud.get_project_metrics(
|
||||
db, project_id=project_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"User {current_user.email} retrieved project metrics for {project.slug}"
|
||||
)
|
||||
|
||||
return AgentInstanceMetrics(
|
||||
total_instances=metrics["total_instances"],
|
||||
active_instances=metrics["active_instances"],
|
||||
idle_instances=metrics["idle_instances"],
|
||||
total_tasks_completed=metrics["total_tasks_completed"],
|
||||
total_tokens_used=metrics["total_tokens_used"],
|
||||
total_cost_incurred=metrics["total_cost_incurred"],
|
||||
)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project agent metrics: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/agents/{agent_id}",
|
||||
response_model=AgentInstanceResponse,
|
||||
summary="Get Agent Details",
|
||||
description="Get detailed information about a specific agent instance.",
|
||||
operation_id="get_agent",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about a specific agent instance.
|
||||
|
||||
Returns full agent details including related entity information
|
||||
(agent type name, project name) and assigned issues count.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The agent instance details
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get agent with full details
|
||||
details = await agent_instance_crud.get_with_details(db, instance_id=agent_id)
|
||||
|
||||
if not details:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
agent = details["instance"]
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"User {current_user.email} retrieved agent {agent.name} (id={agent_id})"
|
||||
)
|
||||
|
||||
return build_agent_response(
|
||||
agent=agent,
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent details: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/projects/{project_id}/agents/{agent_id}",
|
||||
response_model=AgentInstanceResponse,
|
||||
summary="Update Agent",
|
||||
description="Update an agent instance's configuration and state.",
|
||||
operation_id="update_agent",
|
||||
)
|
||||
@limiter.limit(f"{30 * RATE_MULTIPLIER}/minute")
|
||||
async def update_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
agent_in: AgentInstanceUpdate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Update an agent instance's configuration and state.
|
||||
|
||||
Allows updating agent status, current task, memory, and other
|
||||
configurable fields. Status transitions are validated according
|
||||
to the agent lifecycle state machine.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
agent_in: Agent update data
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The updated agent instance
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
ValidationException: If the status transition is invalid
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get current agent
|
||||
agent = await agent_instance_crud.get(db, id=agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Validate status transition if status is being changed
|
||||
if agent_in.status is not None and agent_in.status != agent.status:
|
||||
validate_status_transition(agent.status, agent_in.status)
|
||||
|
||||
# Update the agent
|
||||
updated_agent = await agent_instance_crud.update(
|
||||
db, db_obj=agent, obj_in=agent_in
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} updated agent {updated_agent.name} "
|
||||
f"(id={agent_id})"
|
||||
)
|
||||
|
||||
# Get updated details
|
||||
details = await agent_instance_crud.get_with_details(
|
||||
db, instance_id=updated_agent.id
|
||||
)
|
||||
if details:
|
||||
return build_agent_response(
|
||||
agent=details["instance"],
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
|
||||
return build_agent_response(updated_agent)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating agent: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/agents/{agent_id}/pause",
|
||||
response_model=AgentInstanceResponse,
|
||||
summary="Pause Agent",
|
||||
description="Pause an agent instance, temporarily stopping its work.",
|
||||
operation_id="pause_agent",
|
||||
)
|
||||
@limiter.limit(f"{20 * RATE_MULTIPLIER}/minute")
|
||||
async def pause_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Pause an agent instance.
|
||||
|
||||
Transitions the agent to PAUSED status, temporarily stopping
|
||||
its work. The agent can be resumed later with the resume endpoint.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The paused agent instance
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
ValidationException: If the agent cannot be paused from its current state
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get current agent
|
||||
agent = await agent_instance_crud.get(db, id=agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Validate the transition to PAUSED
|
||||
validate_status_transition(agent.status, AgentStatus.PAUSED)
|
||||
|
||||
# Update status to PAUSED
|
||||
paused_agent = await agent_instance_crud.update_status(
|
||||
db,
|
||||
instance_id=agent_id,
|
||||
status=AgentStatus.PAUSED,
|
||||
)
|
||||
|
||||
if not paused_agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} paused agent {paused_agent.name} "
|
||||
f"(id={agent_id})"
|
||||
)
|
||||
|
||||
# Get updated details
|
||||
details = await agent_instance_crud.get_with_details(
|
||||
db, instance_id=paused_agent.id
|
||||
)
|
||||
if details:
|
||||
return build_agent_response(
|
||||
agent=details["instance"],
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
|
||||
return build_agent_response(paused_agent)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error pausing agent: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/agents/{agent_id}/resume",
|
||||
response_model=AgentInstanceResponse,
|
||||
summary="Resume Agent",
|
||||
description="Resume a paused agent instance.",
|
||||
operation_id="resume_agent",
|
||||
)
|
||||
@limiter.limit(f"{20 * RATE_MULTIPLIER}/minute")
|
||||
async def resume_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Resume a paused agent instance.
|
||||
|
||||
Transitions the agent from PAUSED back to IDLE status,
|
||||
allowing it to accept new work.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceResponse: The resumed agent instance
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
ValidationException: If the agent cannot be resumed from its current state
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get current agent
|
||||
agent = await agent_instance_crud.get(db, id=agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Validate the transition to IDLE (resume)
|
||||
validate_status_transition(agent.status, AgentStatus.IDLE)
|
||||
|
||||
# Update status to IDLE
|
||||
resumed_agent = await agent_instance_crud.update_status(
|
||||
db,
|
||||
instance_id=agent_id,
|
||||
status=AgentStatus.IDLE,
|
||||
)
|
||||
|
||||
if not resumed_agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} resumed agent {resumed_agent.name} "
|
||||
f"(id={agent_id})"
|
||||
)
|
||||
|
||||
# Get updated details
|
||||
details = await agent_instance_crud.get_with_details(
|
||||
db, instance_id=resumed_agent.id
|
||||
)
|
||||
if details:
|
||||
return build_agent_response(
|
||||
agent=details["instance"],
|
||||
agent_type_name=details.get("agent_type_name"),
|
||||
agent_type_slug=details.get("agent_type_slug"),
|
||||
project_name=details.get("project_name"),
|
||||
project_slug=details.get("project_slug"),
|
||||
assigned_issues_count=details.get("assigned_issues_count", 0),
|
||||
)
|
||||
|
||||
return build_agent_response(resumed_agent)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error resuming agent: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/projects/{project_id}/agents/{agent_id}",
|
||||
response_model=MessageResponse,
|
||||
summary="Terminate Agent",
|
||||
description="Terminate an agent instance, permanently stopping it.",
|
||||
operation_id="terminate_agent",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def terminate_agent(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Terminate an agent instance.
|
||||
|
||||
Permanently terminates the agent, setting its status to TERMINATED.
|
||||
This action cannot be undone - a new agent must be spawned if needed.
|
||||
The agent's session and current task are cleared.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
MessageResponse: Confirmation message
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
ValidationException: If the agent is already terminated
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get current agent
|
||||
agent = await agent_instance_crud.get(db, id=agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Check if already terminated
|
||||
if agent.status == AgentStatus.TERMINATED:
|
||||
raise ValidationException(
|
||||
message="Agent is already terminated",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
# Validate the transition to TERMINATED
|
||||
validate_status_transition(agent.status, AgentStatus.TERMINATED)
|
||||
|
||||
agent_name = agent.name
|
||||
|
||||
# Terminate the agent
|
||||
terminated_agent = await agent_instance_crud.terminate(db, instance_id=agent_id)
|
||||
|
||||
if not terminated_agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} terminated agent {agent_name} (id={agent_id})"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Agent '{agent_name}' has been terminated",
|
||||
)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error terminating agent: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/agents/{agent_id}/metrics",
|
||||
response_model=AgentInstanceMetrics,
|
||||
summary="Get Agent Metrics",
|
||||
description="Get usage metrics for a specific agent instance.",
|
||||
operation_id="get_agent_metrics",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_agent_metrics(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
agent_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get usage metrics for a specific agent instance.
|
||||
|
||||
Returns metrics including tasks completed, tokens used,
|
||||
and cost incurred for the specified agent.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project
|
||||
agent_id: UUID of the agent instance
|
||||
current_user: Current authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
AgentInstanceMetrics: Agent usage metrics
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the project or agent is not found
|
||||
AuthorizationError: If the user lacks access to the project
|
||||
"""
|
||||
try:
|
||||
# Verify project access
|
||||
await verify_project_access(db, project_id, current_user)
|
||||
|
||||
# Get agent
|
||||
agent = await agent_instance_crud.get(db, id=agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify agent belongs to the specified project
|
||||
if agent.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Agent {agent_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Calculate metrics for this single agent
|
||||
# For a single agent, we report its individual metrics
|
||||
is_active = agent.status == AgentStatus.WORKING
|
||||
is_idle = agent.status == AgentStatus.IDLE
|
||||
|
||||
logger.debug(
|
||||
f"User {current_user.email} retrieved metrics for agent {agent.name} "
|
||||
f"(id={agent_id})"
|
||||
)
|
||||
|
||||
return AgentInstanceMetrics(
|
||||
total_instances=1,
|
||||
active_instances=1 if is_active else 0,
|
||||
idle_instances=1 if is_idle else 0,
|
||||
total_tasks_completed=agent.tasks_completed,
|
||||
total_tokens_used=agent.tokens_used,
|
||||
total_cost_incurred=agent.cost_incurred,
|
||||
)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent metrics: {e!s}", exc_info=True)
|
||||
raise
|
||||
@@ -15,14 +15,16 @@ from app.core.auth import (
|
||||
TokenExpiredError,
|
||||
TokenInvalidError,
|
||||
decode_token,
|
||||
get_password_hash,
|
||||
)
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import (
|
||||
AuthenticationError as AuthError,
|
||||
DatabaseError,
|
||||
DuplicateError,
|
||||
ErrorCode,
|
||||
)
|
||||
from app.crud.session import session as session_crud
|
||||
from app.crud.user import user as user_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.common import MessageResponse
|
||||
from app.schemas.sessions import LogoutRequest, SessionCreate
|
||||
@@ -37,8 +39,6 @@ from app.schemas.users import (
|
||||
)
|
||||
from app.services.auth_service import AuthenticationError, AuthService
|
||||
from app.services.email_service import email_service
|
||||
from app.services.session_service import session_service
|
||||
from app.services.user_service import user_service
|
||||
from app.utils.device import extract_device_info
|
||||
from app.utils.security import create_password_reset_token, verify_password_reset_token
|
||||
|
||||
@@ -91,18 +91,17 @@ async def _create_login_session(
|
||||
location_country=device_info.location_country,
|
||||
)
|
||||
|
||||
await session_service.create_session(db, obj_in=session_data)
|
||||
await session_crud.create_session(db, obj_in=session_data)
|
||||
|
||||
logger.info(
|
||||
"%s successful: %s from %s (IP: %s)",
|
||||
login_type.capitalize(),
|
||||
user.email,
|
||||
device_info.device_name,
|
||||
device_info.ip_address,
|
||||
f"{login_type.capitalize()} successful: {user.email} from {device_info.device_name} "
|
||||
f"(IP: {device_info.ip_address})"
|
||||
)
|
||||
except Exception as session_err:
|
||||
# Log but don't fail login if session creation fails
|
||||
logger.exception("Failed to create session for %s: %s", user.email, session_err)
|
||||
logger.error(
|
||||
f"Failed to create session for {user.email}: {session_err!s}", exc_info=True
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
@@ -124,21 +123,15 @@ async def register_user(
|
||||
try:
|
||||
user = await AuthService.create_user(db, user_data)
|
||||
return user
|
||||
except DuplicateError:
|
||||
except AuthenticationError as e:
|
||||
# SECURITY: Don't reveal if email exists - generic error message
|
||||
logger.warning("Registration failed: duplicate email %s", user_data.email)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Registration failed. Please check your information and try again.",
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warning("Registration failed: %s", e)
|
||||
logger.warning(f"Registration failed: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Registration failed. Please check your information and try again.",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Unexpected error during registration: %s", e)
|
||||
logger.error(f"Unexpected error during registration: {e!s}", exc_info=True)
|
||||
raise DatabaseError(
|
||||
message="An unexpected error occurred. Please try again later.",
|
||||
error_code=ErrorCode.INTERNAL_ERROR,
|
||||
@@ -166,7 +159,7 @@ async def login(
|
||||
|
||||
# Explicitly check for None result and raise correct exception
|
||||
if user is None:
|
||||
logger.warning("Invalid login attempt for: %s", login_data.email)
|
||||
logger.warning(f"Invalid login attempt for: {login_data.email}")
|
||||
raise AuthError(
|
||||
message="Invalid email or password",
|
||||
error_code=ErrorCode.INVALID_CREDENTIALS,
|
||||
@@ -182,11 +175,14 @@ async def login(
|
||||
|
||||
except AuthenticationError as e:
|
||||
# Handle specific authentication errors like inactive accounts
|
||||
logger.warning("Authentication failed: %s", e)
|
||||
logger.warning(f"Authentication failed: {e!s}")
|
||||
raise AuthError(message=str(e), error_code=ErrorCode.INVALID_CREDENTIALS)
|
||||
except AuthError:
|
||||
# Re-raise custom auth exceptions without modification
|
||||
raise
|
||||
except Exception as e:
|
||||
# Handle unexpected errors
|
||||
logger.exception("Unexpected error during login: %s", e)
|
||||
logger.error(f"Unexpected error during login: {e!s}", exc_info=True)
|
||||
raise DatabaseError(
|
||||
message="An unexpected error occurred. Please try again later.",
|
||||
error_code=ErrorCode.INTERNAL_ERROR,
|
||||
@@ -228,10 +224,13 @@ async def login_oauth(
|
||||
# Return full token response with user data
|
||||
return tokens
|
||||
except AuthenticationError as e:
|
||||
logger.warning("OAuth authentication failed: %s", e)
|
||||
logger.warning(f"OAuth authentication failed: {e!s}")
|
||||
raise AuthError(message=str(e), error_code=ErrorCode.INVALID_CREDENTIALS)
|
||||
except AuthError:
|
||||
# Re-raise custom auth exceptions without modification
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Unexpected error during OAuth login: %s", e)
|
||||
logger.error(f"Unexpected error during OAuth login: {e!s}", exc_info=True)
|
||||
raise DatabaseError(
|
||||
message="An unexpected error occurred. Please try again later.",
|
||||
error_code=ErrorCode.INTERNAL_ERROR,
|
||||
@@ -260,12 +259,11 @@ async def refresh_token(
|
||||
)
|
||||
|
||||
# Check if session exists and is active
|
||||
session = await session_service.get_active_by_jti(db, jti=refresh_payload.jti)
|
||||
session = await session_crud.get_active_by_jti(db, jti=refresh_payload.jti)
|
||||
|
||||
if not session:
|
||||
logger.warning(
|
||||
"Refresh token used for inactive or non-existent session: %s",
|
||||
refresh_payload.jti,
|
||||
f"Refresh token used for inactive or non-existent session: {refresh_payload.jti}"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
@@ -281,14 +279,16 @@ async def refresh_token(
|
||||
|
||||
# Update session with new refresh token JTI and expiration
|
||||
try:
|
||||
await session_service.update_refresh_token(
|
||||
await session_crud.update_refresh_token(
|
||||
db,
|
||||
session=session,
|
||||
new_jti=new_refresh_payload.jti,
|
||||
new_expires_at=datetime.fromtimestamp(new_refresh_payload.exp, tz=UTC),
|
||||
)
|
||||
except Exception as session_err:
|
||||
logger.exception("Failed to update session %s: %s", session.id, session_err)
|
||||
logger.error(
|
||||
f"Failed to update session {session.id}: {session_err!s}", exc_info=True
|
||||
)
|
||||
# Continue anyway - tokens are already issued
|
||||
|
||||
return tokens
|
||||
@@ -311,7 +311,7 @@ async def refresh_token(
|
||||
# Re-raise HTTP exceptions (like session revoked)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Unexpected error during token refresh: %s", e)
|
||||
logger.error(f"Unexpected error during token refresh: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="An unexpected error occurred. Please try again later.",
|
||||
@@ -347,7 +347,7 @@ async def request_password_reset(
|
||||
"""
|
||||
try:
|
||||
# Look up user by email
|
||||
user = await user_service.get_by_email(db, email=reset_request.email)
|
||||
user = await user_crud.get_by_email(db, email=reset_request.email)
|
||||
|
||||
# Only send email if user exists and is active
|
||||
if user and user.is_active:
|
||||
@@ -358,12 +358,11 @@ async def request_password_reset(
|
||||
await email_service.send_password_reset_email(
|
||||
to_email=user.email, reset_token=reset_token, user_name=user.first_name
|
||||
)
|
||||
logger.info("Password reset requested for %s", user.email)
|
||||
logger.info(f"Password reset requested for {user.email}")
|
||||
else:
|
||||
# Log attempt but don't reveal if email exists
|
||||
logger.warning(
|
||||
"Password reset requested for non-existent or inactive email: %s",
|
||||
reset_request.email,
|
||||
f"Password reset requested for non-existent or inactive email: {reset_request.email}"
|
||||
)
|
||||
|
||||
# Always return success to prevent email enumeration
|
||||
@@ -372,7 +371,7 @@ async def request_password_reset(
|
||||
message="If your email is registered, you will receive a password reset link shortly",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Error processing password reset request: %s", e)
|
||||
logger.error(f"Error processing password reset request: {e!s}", exc_info=True)
|
||||
# Still return success to prevent information leakage
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
@@ -413,34 +412,40 @@ async def confirm_password_reset(
|
||||
detail="Invalid or expired password reset token",
|
||||
)
|
||||
|
||||
# Reset password via service (validates user exists and is active)
|
||||
try:
|
||||
user = await AuthService.reset_password(
|
||||
db, email=email, new_password=reset_confirm.new_password
|
||||
# Look up user
|
||||
user = await user_crud.get_by_email(db, email=email)
|
||||
|
||||
if not user:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="User not found"
|
||||
)
|
||||
except AuthenticationError as e:
|
||||
err_msg = str(e)
|
||||
if "inactive" in err_msg.lower():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST, detail=err_msg
|
||||
)
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=err_msg)
|
||||
|
||||
if not user.is_active:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="User account is inactive",
|
||||
)
|
||||
|
||||
# Update password
|
||||
user.password_hash = get_password_hash(reset_confirm.new_password)
|
||||
db.add(user)
|
||||
await db.commit()
|
||||
|
||||
# SECURITY: Invalidate all existing sessions after password reset
|
||||
# This prevents stolen sessions from being used after password change
|
||||
from app.crud.session import session as session_crud
|
||||
|
||||
try:
|
||||
deactivated_count = await session_service.deactivate_all_user_sessions(
|
||||
deactivated_count = await session_crud.deactivate_all_user_sessions(
|
||||
db, user_id=str(user.id)
|
||||
)
|
||||
logger.info(
|
||||
"Password reset successful for %s, invalidated %s sessions",
|
||||
user.email,
|
||||
deactivated_count,
|
||||
f"Password reset successful for {user.email}, invalidated {deactivated_count} sessions"
|
||||
)
|
||||
except Exception as session_error:
|
||||
# Log but don't fail password reset if session invalidation fails
|
||||
logger.error(
|
||||
"Failed to invalidate sessions after password reset: %s", session_error
|
||||
f"Failed to invalidate sessions after password reset: {session_error!s}"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
@@ -451,7 +456,7 @@ async def confirm_password_reset(
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error confirming password reset: %s", e)
|
||||
logger.error(f"Error confirming password reset: {e!s}", exc_info=True)
|
||||
await db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
@@ -501,21 +506,19 @@ async def logout(
|
||||
)
|
||||
except (TokenExpiredError, TokenInvalidError) as e:
|
||||
# Even if token is expired/invalid, try to deactivate session
|
||||
logger.warning("Logout with invalid/expired token: %s", e)
|
||||
logger.warning(f"Logout with invalid/expired token: {e!s}")
|
||||
# Don't fail - return success anyway
|
||||
return MessageResponse(success=True, message="Logged out successfully")
|
||||
|
||||
# Find the session by JTI
|
||||
session = await session_service.get_by_jti(db, jti=refresh_payload.jti)
|
||||
session = await session_crud.get_by_jti(db, jti=refresh_payload.jti)
|
||||
|
||||
if session:
|
||||
# Verify session belongs to current user (security check)
|
||||
if str(session.user_id) != str(current_user.id):
|
||||
logger.warning(
|
||||
"User %s attempted to logout session %s belonging to user %s",
|
||||
current_user.id,
|
||||
session.id,
|
||||
session.user_id,
|
||||
f"User {current_user.id} attempted to logout session {session.id} "
|
||||
f"belonging to user {session.user_id}"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
@@ -523,20 +526,17 @@ async def logout(
|
||||
)
|
||||
|
||||
# Deactivate the session
|
||||
await session_service.deactivate(db, session_id=str(session.id))
|
||||
await session_crud.deactivate(db, session_id=str(session.id))
|
||||
|
||||
logger.info(
|
||||
"User %s logged out from %s (session %s)",
|
||||
current_user.id,
|
||||
session.device_name,
|
||||
session.id,
|
||||
f"User {current_user.id} logged out from {session.device_name} "
|
||||
f"(session {session.id})"
|
||||
)
|
||||
else:
|
||||
# Session not found - maybe already deleted or never existed
|
||||
# Return success anyway (idempotent)
|
||||
logger.info(
|
||||
"Logout requested for non-existent session (JTI: %s)",
|
||||
refresh_payload.jti,
|
||||
f"Logout requested for non-existent session (JTI: {refresh_payload.jti})"
|
||||
)
|
||||
|
||||
return MessageResponse(success=True, message="Logged out successfully")
|
||||
@@ -544,7 +544,9 @@ async def logout(
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error during logout for user %s: %s", current_user.id, e)
|
||||
logger.error(
|
||||
f"Error during logout for user {current_user.id}: {e!s}", exc_info=True
|
||||
)
|
||||
# Don't expose error details
|
||||
return MessageResponse(success=True, message="Logged out successfully")
|
||||
|
||||
@@ -582,12 +584,12 @@ async def logout_all(
|
||||
"""
|
||||
try:
|
||||
# Deactivate all sessions for this user
|
||||
count = await session_service.deactivate_all_user_sessions(
|
||||
count = await session_crud.deactivate_all_user_sessions(
|
||||
db, user_id=str(current_user.id)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"User %s logged out from all devices (%s sessions)", current_user.id, count
|
||||
f"User {current_user.id} logged out from all devices ({count} sessions)"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
@@ -596,7 +598,9 @@ async def logout_all(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error during logout-all for user %s: %s", current_user.id, e)
|
||||
logger.error(
|
||||
f"Error during logout-all for user {current_user.id}: {e!s}", exc_info=True
|
||||
)
|
||||
await db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
|
||||
411
backend/app/api/routes/context.py
Normal file
411
backend/app/api/routes/context.py
Normal file
@@ -0,0 +1,411 @@
|
||||
"""
|
||||
Context Management API Endpoints.
|
||||
|
||||
Provides REST endpoints for context assembly and optimization
|
||||
for LLM requests using the ContextEngine.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.api.dependencies.permissions import require_superuser
|
||||
from app.models.user import User
|
||||
from app.services.context import (
|
||||
AssemblyTimeoutError,
|
||||
BudgetExceededError,
|
||||
ContextEngine,
|
||||
ContextSettings,
|
||||
create_context_engine,
|
||||
get_context_settings,
|
||||
)
|
||||
from app.services.mcp import MCPClientManager, get_mcp_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Singleton Engine Management
|
||||
# ============================================================================
|
||||
|
||||
_context_engine: ContextEngine | None = None
|
||||
|
||||
|
||||
def _get_or_create_engine(
|
||||
mcp: MCPClientManager,
|
||||
settings: ContextSettings | None = None,
|
||||
) -> ContextEngine:
|
||||
"""Get or create the singleton ContextEngine."""
|
||||
global _context_engine
|
||||
if _context_engine is None:
|
||||
_context_engine = create_context_engine(
|
||||
mcp_manager=mcp,
|
||||
redis=None, # Optional: add Redis caching later
|
||||
settings=settings or get_context_settings(),
|
||||
)
|
||||
logger.info("ContextEngine initialized")
|
||||
else:
|
||||
# Ensure MCP manager is up to date
|
||||
_context_engine.set_mcp_manager(mcp)
|
||||
return _context_engine
|
||||
|
||||
|
||||
async def get_context_engine(
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> ContextEngine:
|
||||
"""FastAPI dependency to get the ContextEngine."""
|
||||
return _get_or_create_engine(mcp)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Request/Response Schemas
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class ConversationTurn(BaseModel):
|
||||
"""A single conversation turn."""
|
||||
|
||||
role: str = Field(..., description="Role: 'user' or 'assistant'")
|
||||
content: str = Field(..., description="Message content")
|
||||
|
||||
|
||||
class ToolResult(BaseModel):
|
||||
"""A tool execution result."""
|
||||
|
||||
tool_name: str = Field(..., description="Name of the tool")
|
||||
content: str | dict[str, Any] = Field(..., description="Tool result content")
|
||||
status: str = Field(default="success", description="Execution status")
|
||||
|
||||
|
||||
class AssembleContextRequest(BaseModel):
|
||||
"""Request to assemble context for an LLM request."""
|
||||
|
||||
project_id: str = Field(..., description="Project identifier")
|
||||
agent_id: str = Field(..., description="Agent identifier")
|
||||
query: str = Field(..., description="User's query or current request")
|
||||
model: str = Field(
|
||||
default="claude-3-sonnet",
|
||||
description="Target model name",
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
None,
|
||||
description="Maximum context tokens (uses model default if None)",
|
||||
)
|
||||
system_prompt: str | None = Field(
|
||||
None,
|
||||
description="System prompt/instructions",
|
||||
)
|
||||
task_description: str | None = Field(
|
||||
None,
|
||||
description="Current task description",
|
||||
)
|
||||
knowledge_query: str | None = Field(
|
||||
None,
|
||||
description="Query for knowledge base search",
|
||||
)
|
||||
knowledge_limit: int = Field(
|
||||
default=10,
|
||||
ge=1,
|
||||
le=50,
|
||||
description="Max number of knowledge results",
|
||||
)
|
||||
conversation_history: list[ConversationTurn] | None = Field(
|
||||
None,
|
||||
description="Previous conversation turns",
|
||||
)
|
||||
tool_results: list[ToolResult] | None = Field(
|
||||
None,
|
||||
description="Tool execution results to include",
|
||||
)
|
||||
compress: bool = Field(
|
||||
default=True,
|
||||
description="Whether to apply compression",
|
||||
)
|
||||
use_cache: bool = Field(
|
||||
default=True,
|
||||
description="Whether to use caching",
|
||||
)
|
||||
|
||||
|
||||
class AssembledContextResponse(BaseModel):
|
||||
"""Response containing assembled context."""
|
||||
|
||||
content: str = Field(..., description="Assembled context content")
|
||||
total_tokens: int = Field(..., description="Total token count")
|
||||
context_count: int = Field(..., description="Number of context items included")
|
||||
compressed: bool = Field(..., description="Whether compression was applied")
|
||||
budget_used_percent: float = Field(
|
||||
...,
|
||||
description="Percentage of token budget used",
|
||||
)
|
||||
metadata: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Additional metadata",
|
||||
)
|
||||
|
||||
|
||||
class TokenCountRequest(BaseModel):
|
||||
"""Request to count tokens in content."""
|
||||
|
||||
content: str = Field(..., description="Content to count tokens in")
|
||||
model: str | None = Field(
|
||||
None,
|
||||
description="Model for model-specific tokenization",
|
||||
)
|
||||
|
||||
|
||||
class TokenCountResponse(BaseModel):
|
||||
"""Response containing token count."""
|
||||
|
||||
token_count: int = Field(..., description="Number of tokens")
|
||||
model: str | None = Field(None, description="Model used for counting")
|
||||
|
||||
|
||||
class BudgetInfoResponse(BaseModel):
|
||||
"""Response containing budget information for a model."""
|
||||
|
||||
model: str = Field(..., description="Model name")
|
||||
total_tokens: int = Field(..., description="Total token budget")
|
||||
system_tokens: int = Field(..., description="Tokens reserved for system")
|
||||
knowledge_tokens: int = Field(..., description="Tokens for knowledge")
|
||||
conversation_tokens: int = Field(..., description="Tokens for conversation")
|
||||
tool_tokens: int = Field(..., description="Tokens for tool results")
|
||||
response_reserve: int = Field(..., description="Tokens reserved for response")
|
||||
|
||||
|
||||
class ContextEngineStatsResponse(BaseModel):
|
||||
"""Response containing engine statistics."""
|
||||
|
||||
cache: dict[str, Any] = Field(..., description="Cache statistics")
|
||||
settings: dict[str, Any] = Field(..., description="Current settings")
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
"""Health check response."""
|
||||
|
||||
status: str = Field(..., description="Health status")
|
||||
mcp_connected: bool = Field(..., description="Whether MCP is connected")
|
||||
cache_enabled: bool = Field(..., description="Whether caching is enabled")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/health",
|
||||
response_model=HealthResponse,
|
||||
summary="Context Engine Health",
|
||||
description="Check health status of the context engine.",
|
||||
)
|
||||
async def health_check(
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> HealthResponse:
|
||||
"""Check context engine health."""
|
||||
stats = await engine.get_stats()
|
||||
return HealthResponse(
|
||||
status="healthy",
|
||||
mcp_connected=engine._mcp is not None,
|
||||
cache_enabled=stats.get("settings", {}).get("cache_enabled", False),
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/assemble",
|
||||
response_model=AssembledContextResponse,
|
||||
summary="Assemble Context",
|
||||
description="Assemble optimized context for an LLM request.",
|
||||
)
|
||||
async def assemble_context(
|
||||
request: AssembleContextRequest,
|
||||
current_user: User = Depends(require_superuser),
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> AssembledContextResponse:
|
||||
"""
|
||||
Assemble optimized context for an LLM request.
|
||||
|
||||
This endpoint gathers context from various sources, scores and ranks them,
|
||||
compresses if needed, and formats for the target model.
|
||||
"""
|
||||
logger.info(
|
||||
"Context assembly for project=%s agent=%s by user=%s",
|
||||
request.project_id,
|
||||
request.agent_id,
|
||||
current_user.id,
|
||||
)
|
||||
|
||||
# Convert conversation history to dict format
|
||||
conversation_history = None
|
||||
if request.conversation_history:
|
||||
conversation_history = [
|
||||
{"role": turn.role, "content": turn.content}
|
||||
for turn in request.conversation_history
|
||||
]
|
||||
|
||||
# Convert tool results to dict format
|
||||
tool_results = None
|
||||
if request.tool_results:
|
||||
tool_results = [
|
||||
{
|
||||
"tool_name": tr.tool_name,
|
||||
"content": tr.content,
|
||||
"status": tr.status,
|
||||
}
|
||||
for tr in request.tool_results
|
||||
]
|
||||
|
||||
try:
|
||||
result = await engine.assemble_context(
|
||||
project_id=request.project_id,
|
||||
agent_id=request.agent_id,
|
||||
query=request.query,
|
||||
model=request.model,
|
||||
max_tokens=request.max_tokens,
|
||||
system_prompt=request.system_prompt,
|
||||
task_description=request.task_description,
|
||||
knowledge_query=request.knowledge_query,
|
||||
knowledge_limit=request.knowledge_limit,
|
||||
conversation_history=conversation_history,
|
||||
tool_results=tool_results,
|
||||
compress=request.compress,
|
||||
use_cache=request.use_cache,
|
||||
)
|
||||
|
||||
# Calculate budget usage percentage
|
||||
budget = await engine.get_budget_for_model(request.model, request.max_tokens)
|
||||
budget_used_percent = (result.total_tokens / budget.total) * 100
|
||||
|
||||
# Check if compression was applied (from metadata if available)
|
||||
was_compressed = result.metadata.get("compressed_contexts", 0) > 0
|
||||
|
||||
return AssembledContextResponse(
|
||||
content=result.content,
|
||||
total_tokens=result.total_tokens,
|
||||
context_count=result.context_count,
|
||||
compressed=was_compressed,
|
||||
budget_used_percent=round(budget_used_percent, 2),
|
||||
metadata={
|
||||
"model": request.model,
|
||||
"query": request.query,
|
||||
"knowledge_included": bool(request.knowledge_query),
|
||||
"conversation_turns": len(request.conversation_history or []),
|
||||
"excluded_count": result.excluded_count,
|
||||
"assembly_time_ms": result.assembly_time_ms,
|
||||
},
|
||||
)
|
||||
|
||||
except AssemblyTimeoutError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_504_GATEWAY_TIMEOUT,
|
||||
detail=f"Context assembly timed out: {e}",
|
||||
) from e
|
||||
except BudgetExceededError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||
detail=f"Token budget exceeded: {e}",
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.exception("Context assembly failed")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Context assembly failed: {e}",
|
||||
) from e
|
||||
|
||||
|
||||
@router.post(
|
||||
"/count-tokens",
|
||||
response_model=TokenCountResponse,
|
||||
summary="Count Tokens",
|
||||
description="Count tokens in content using the LLM Gateway.",
|
||||
)
|
||||
async def count_tokens(
|
||||
request: TokenCountRequest,
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> TokenCountResponse:
|
||||
"""Count tokens in content."""
|
||||
try:
|
||||
count = await engine.count_tokens(
|
||||
content=request.content,
|
||||
model=request.model,
|
||||
)
|
||||
return TokenCountResponse(
|
||||
token_count=count,
|
||||
model=request.model,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Token counting failed: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Token counting failed: {e}",
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/budget/{model}",
|
||||
response_model=BudgetInfoResponse,
|
||||
summary="Get Token Budget",
|
||||
description="Get token budget allocation for a specific model.",
|
||||
)
|
||||
async def get_budget(
|
||||
model: str,
|
||||
max_tokens: Annotated[int | None, Query(description="Custom max tokens")] = None,
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> BudgetInfoResponse:
|
||||
"""Get token budget information for a model."""
|
||||
budget = await engine.get_budget_for_model(model, max_tokens)
|
||||
return BudgetInfoResponse(
|
||||
model=model,
|
||||
total_tokens=budget.total,
|
||||
system_tokens=budget.system,
|
||||
knowledge_tokens=budget.knowledge,
|
||||
conversation_tokens=budget.conversation,
|
||||
tool_tokens=budget.tools,
|
||||
response_reserve=budget.response_reserve,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/stats",
|
||||
response_model=ContextEngineStatsResponse,
|
||||
summary="Engine Statistics",
|
||||
description="Get context engine statistics and configuration.",
|
||||
)
|
||||
async def get_stats(
|
||||
current_user: User = Depends(require_superuser),
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> ContextEngineStatsResponse:
|
||||
"""Get engine statistics."""
|
||||
stats = await engine.get_stats()
|
||||
return ContextEngineStatsResponse(
|
||||
cache=stats.get("cache", {}),
|
||||
settings=stats.get("settings", {}),
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/cache/invalidate",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Invalidate Cache (Admin Only)",
|
||||
description="Invalidate context cache entries.",
|
||||
)
|
||||
async def invalidate_cache(
|
||||
project_id: Annotated[
|
||||
str | None, Query(description="Project to invalidate")
|
||||
] = None,
|
||||
pattern: Annotated[str | None, Query(description="Pattern to match")] = None,
|
||||
current_user: User = Depends(require_superuser),
|
||||
engine: ContextEngine = Depends(get_context_engine),
|
||||
) -> None:
|
||||
"""Invalidate cache entries."""
|
||||
logger.info(
|
||||
"Cache invalidation by user %s: project=%s pattern=%s",
|
||||
current_user.id,
|
||||
project_id,
|
||||
pattern,
|
||||
)
|
||||
await engine.invalidate_cache(project_id=project_id, pattern=pattern)
|
||||
316
backend/app/api/routes/events.py
Normal file
316
backend/app/api/routes/events.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""
|
||||
SSE endpoint for real-time project event streaming.
|
||||
|
||||
This module provides Server-Sent Events (SSE) endpoints for streaming
|
||||
project events to connected clients. Events are scoped to projects,
|
||||
with authorization checks to ensure clients only receive events
|
||||
for projects they have access to.
|
||||
|
||||
Features:
|
||||
- Real-time event streaming via SSE
|
||||
- Project-scoped authorization
|
||||
- Automatic reconnection support (Last-Event-ID)
|
||||
- Keepalive messages every 30 seconds
|
||||
- Graceful connection cleanup
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Header, Query, Request
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
|
||||
from app.api.dependencies.auth import get_current_user, get_current_user_sse
|
||||
from app.api.dependencies.event_bus import get_event_bus
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import AuthorizationError
|
||||
from app.models.user import User
|
||||
from app.schemas.errors import ErrorCode
|
||||
from app.schemas.events import EventType
|
||||
from app.services.event_bus import EventBus
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
|
||||
# Keepalive interval in seconds
|
||||
KEEPALIVE_INTERVAL = 30
|
||||
|
||||
|
||||
async def check_project_access(
|
||||
project_id: UUID,
|
||||
user: User,
|
||||
db: "AsyncSession",
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a user has access to a project's events.
|
||||
|
||||
Authorization rules:
|
||||
- Superusers can access all projects
|
||||
- Project owners can access their own projects
|
||||
|
||||
Args:
|
||||
project_id: The project to check access for
|
||||
user: The authenticated user
|
||||
db: Database session for project lookup
|
||||
|
||||
Returns:
|
||||
bool: True if user has access, False otherwise
|
||||
"""
|
||||
# Superusers can access all projects
|
||||
if user.is_superuser:
|
||||
logger.debug(
|
||||
f"Project access granted for superuser {user.id} on project {project_id}"
|
||||
)
|
||||
return True
|
||||
|
||||
# Check if user owns the project
|
||||
from app.crud.syndarix import project as project_crud
|
||||
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
if not project:
|
||||
logger.debug(f"Project {project_id} not found for access check")
|
||||
return False
|
||||
|
||||
has_access = bool(project.owner_id == user.id)
|
||||
logger.debug(
|
||||
f"Project access {'granted' if has_access else 'denied'} "
|
||||
f"for user {user.id} on project {project_id} (owner: {project.owner_id})"
|
||||
)
|
||||
return has_access
|
||||
|
||||
|
||||
async def event_generator(
|
||||
project_id: UUID,
|
||||
event_bus: EventBus,
|
||||
last_event_id: str | None = None,
|
||||
):
|
||||
"""
|
||||
Generate SSE events for a project.
|
||||
|
||||
This async generator yields SSE-formatted events from the event bus,
|
||||
including keepalive comments to maintain the connection.
|
||||
|
||||
Args:
|
||||
project_id: The project to stream events for
|
||||
event_bus: The EventBus instance
|
||||
last_event_id: Optional last received event ID for reconnection
|
||||
|
||||
Yields:
|
||||
dict: SSE event data with 'event', 'data', and optional 'id' fields
|
||||
"""
|
||||
try:
|
||||
async for event_data in event_bus.subscribe_sse(
|
||||
project_id=project_id,
|
||||
last_event_id=last_event_id,
|
||||
keepalive_interval=KEEPALIVE_INTERVAL,
|
||||
):
|
||||
if event_data == "":
|
||||
# Keepalive - yield SSE comment
|
||||
yield {"comment": "keepalive"}
|
||||
else:
|
||||
# Parse event to extract type and id
|
||||
try:
|
||||
event_dict = json.loads(event_data)
|
||||
event_type = event_dict.get("type", "message")
|
||||
event_id = event_dict.get("id")
|
||||
|
||||
yield {
|
||||
"event": event_type,
|
||||
"data": event_data,
|
||||
"id": event_id,
|
||||
}
|
||||
except json.JSONDecodeError:
|
||||
# If we can't parse, send as generic message
|
||||
yield {
|
||||
"event": "message",
|
||||
"data": event_data,
|
||||
}
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Event stream cancelled for project {project_id}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error in event stream for project {project_id}: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/events/stream",
|
||||
summary="Stream Project Events",
|
||||
description="""
|
||||
Stream real-time events for a project via Server-Sent Events (SSE).
|
||||
|
||||
**Authentication**: Required (Bearer token OR query parameter)
|
||||
**Authorization**: Must have access to the project
|
||||
|
||||
**Authentication Methods**:
|
||||
- Bearer token in Authorization header (preferred)
|
||||
- Query parameter `token` (for EventSource compatibility)
|
||||
|
||||
Note: EventSource API doesn't support custom headers, so the query parameter
|
||||
option is provided for browser-based SSE clients.
|
||||
|
||||
**SSE Event Format**:
|
||||
```
|
||||
event: agent.status_changed
|
||||
id: 550e8400-e29b-41d4-a716-446655440000
|
||||
data: {"id": "...", "type": "agent.status_changed", "project_id": "...", ...}
|
||||
|
||||
: keepalive
|
||||
|
||||
event: issue.created
|
||||
id: 550e8400-e29b-41d4-a716-446655440001
|
||||
data: {...}
|
||||
```
|
||||
|
||||
**Reconnection**: Include the `Last-Event-ID` header with the last received
|
||||
event ID to resume from where you left off.
|
||||
|
||||
**Keepalive**: The server sends a comment (`: keepalive`) every 30 seconds
|
||||
to keep the connection alive.
|
||||
|
||||
**Rate Limit**: 10 connections/minute per IP
|
||||
""",
|
||||
response_class=EventSourceResponse,
|
||||
responses={
|
||||
200: {
|
||||
"description": "SSE stream established",
|
||||
"content": {"text/event-stream": {}},
|
||||
},
|
||||
401: {"description": "Not authenticated"},
|
||||
403: {"description": "Not authorized to access this project"},
|
||||
404: {"description": "Project not found"},
|
||||
},
|
||||
operation_id="stream_project_events",
|
||||
)
|
||||
@limiter.limit("10/minute")
|
||||
async def stream_project_events(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
db: "AsyncSession" = Depends(get_db),
|
||||
event_bus: EventBus = Depends(get_event_bus),
|
||||
token: str | None = Query(
|
||||
None, description="Auth token (for EventSource compatibility)"
|
||||
),
|
||||
authorization: str | None = Header(None, alias="Authorization"),
|
||||
last_event_id: str | None = Header(None, alias="Last-Event-ID"),
|
||||
):
|
||||
"""
|
||||
Stream real-time events for a project via SSE.
|
||||
|
||||
This endpoint establishes a persistent SSE connection that streams
|
||||
project events to the client in real-time. The connection includes:
|
||||
|
||||
- Event streaming: All project events (agent updates, issues, etc.)
|
||||
- Keepalive: Comment every 30 seconds to maintain connection
|
||||
- Reconnection: Use Last-Event-ID header to resume after disconnect
|
||||
|
||||
The connection is automatically cleaned up when the client disconnects.
|
||||
"""
|
||||
# Authenticate user (supports both header and query param tokens)
|
||||
current_user = await get_current_user_sse(
|
||||
db=db, authorization=authorization, token=token
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"SSE connection request for project {project_id} "
|
||||
f"by user {current_user.id} "
|
||||
f"(last_event_id={last_event_id})"
|
||||
)
|
||||
|
||||
# Check project access
|
||||
has_access = await check_project_access(project_id, current_user, db)
|
||||
if not has_access:
|
||||
raise AuthorizationError(
|
||||
message=f"You don't have access to project {project_id}",
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
|
||||
# Return SSE response
|
||||
return EventSourceResponse(
|
||||
event_generator(
|
||||
project_id=project_id,
|
||||
event_bus=event_bus,
|
||||
last_event_id=last_event_id,
|
||||
),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no", # Disable nginx buffering
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/events/test",
|
||||
summary="Send Test Event (Development Only)",
|
||||
description="""
|
||||
Send a test event to a project's event stream. This endpoint is
|
||||
intended for development and testing purposes.
|
||||
|
||||
**Authentication**: Required (Bearer token)
|
||||
**Authorization**: Must have access to the project
|
||||
|
||||
**Note**: This endpoint should be disabled or restricted in production.
|
||||
""",
|
||||
response_model=dict,
|
||||
responses={
|
||||
200: {"description": "Test event sent"},
|
||||
401: {"description": "Not authenticated"},
|
||||
403: {"description": "Not authorized to access this project"},
|
||||
},
|
||||
operation_id="send_test_event",
|
||||
)
|
||||
async def send_test_event(
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
event_bus: EventBus = Depends(get_event_bus),
|
||||
db: "AsyncSession" = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Send a test event to the project's event stream.
|
||||
|
||||
This is useful for testing SSE connections during development.
|
||||
"""
|
||||
# Check project access
|
||||
has_access = await check_project_access(project_id, current_user, db)
|
||||
if not has_access:
|
||||
raise AuthorizationError(
|
||||
message=f"You don't have access to project {project_id}",
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
|
||||
# Create and publish test event using the Event schema
|
||||
event = EventBus.create_event(
|
||||
event_type=EventType.AGENT_MESSAGE,
|
||||
project_id=project_id,
|
||||
actor_type="user",
|
||||
actor_id=current_user.id,
|
||||
payload={
|
||||
"message": "Test event from SSE endpoint",
|
||||
"message_type": "info",
|
||||
},
|
||||
)
|
||||
|
||||
channel = event_bus.get_project_channel(project_id)
|
||||
await event_bus.publish(channel, event)
|
||||
|
||||
logger.info(f"Test event sent to project {project_id}: {event.id}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"event_id": event.id,
|
||||
"event_type": event.type.value,
|
||||
"message": "Test event sent successfully",
|
||||
}
|
||||
968
backend/app/api/routes/issues.py
Normal file
968
backend/app/api/routes/issues.py
Normal file
@@ -0,0 +1,968 @@
|
||||
# app/api/routes/issues.py
|
||||
"""
|
||||
Issue CRUD API endpoints for Syndarix projects.
|
||||
|
||||
Provides endpoints for managing issues within projects, including:
|
||||
- Create, read, update, delete operations
|
||||
- Filtering by status, priority, labels, sprint, assigned agent
|
||||
- Search across title and body
|
||||
- Assignment to agents
|
||||
- External issue tracker sync triggers
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request, status
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import (
|
||||
AuthorizationError,
|
||||
NotFoundError,
|
||||
ValidationException,
|
||||
)
|
||||
from app.crud.syndarix.agent_instance import agent_instance as agent_instance_crud
|
||||
from app.crud.syndarix.issue import issue as issue_crud
|
||||
from app.crud.syndarix.project import project as project_crud
|
||||
from app.crud.syndarix.sprint import sprint as sprint_crud
|
||||
from app.models.syndarix.enums import (
|
||||
AgentStatus,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
SprintStatus,
|
||||
SyncStatus,
|
||||
)
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
PaginatedResponse,
|
||||
PaginationParams,
|
||||
SortOrder,
|
||||
create_pagination_meta,
|
||||
)
|
||||
from app.schemas.errors import ErrorCode
|
||||
from app.schemas.syndarix.issue import (
|
||||
IssueAssign,
|
||||
IssueCreate,
|
||||
IssueResponse,
|
||||
IssueStats,
|
||||
IssueUpdate,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Initialize limiter for this router
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
|
||||
# Use higher rate limits in test environment
|
||||
IS_TEST = os.getenv("IS_TEST", "False") == "True"
|
||||
RATE_MULTIPLIER = 100 if IS_TEST else 1
|
||||
|
||||
|
||||
async def verify_project_ownership(
|
||||
db: AsyncSession,
|
||||
project_id: UUID,
|
||||
user: User,
|
||||
) -> None:
|
||||
"""
|
||||
Verify that the user owns the project or is a superuser.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: Project UUID to verify
|
||||
user: Current authenticated user
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project does not exist
|
||||
AuthorizationError: If user does not own the project
|
||||
"""
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
if not user.is_superuser and project.owner_id != user.id:
|
||||
raise AuthorizationError(
|
||||
message="You do not have access to this project",
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
|
||||
|
||||
def _build_issue_response(
|
||||
issue: Any,
|
||||
project_name: str | None = None,
|
||||
project_slug: str | None = None,
|
||||
sprint_name: str | None = None,
|
||||
assigned_agent_type_name: str | None = None,
|
||||
) -> IssueResponse:
|
||||
"""
|
||||
Build an IssueResponse from an Issue model instance.
|
||||
|
||||
Args:
|
||||
issue: Issue model instance
|
||||
project_name: Optional project name from relationship
|
||||
project_slug: Optional project slug from relationship
|
||||
sprint_name: Optional sprint name from relationship
|
||||
assigned_agent_type_name: Optional agent type name from relationship
|
||||
|
||||
Returns:
|
||||
IssueResponse schema instance
|
||||
"""
|
||||
return IssueResponse(
|
||||
id=issue.id,
|
||||
project_id=issue.project_id,
|
||||
title=issue.title,
|
||||
body=issue.body,
|
||||
status=issue.status,
|
||||
priority=issue.priority,
|
||||
labels=issue.labels or [],
|
||||
assigned_agent_id=issue.assigned_agent_id,
|
||||
human_assignee=issue.human_assignee,
|
||||
sprint_id=issue.sprint_id,
|
||||
story_points=issue.story_points,
|
||||
external_tracker_type=issue.external_tracker_type,
|
||||
external_issue_id=issue.external_issue_id,
|
||||
remote_url=issue.remote_url,
|
||||
external_issue_number=issue.external_issue_number,
|
||||
sync_status=issue.sync_status,
|
||||
last_synced_at=issue.last_synced_at,
|
||||
external_updated_at=issue.external_updated_at,
|
||||
closed_at=issue.closed_at,
|
||||
created_at=issue.created_at,
|
||||
updated_at=issue.updated_at,
|
||||
project_name=project_name,
|
||||
project_slug=project_slug,
|
||||
sprint_name=sprint_name,
|
||||
assigned_agent_type_name=assigned_agent_type_name,
|
||||
)
|
||||
|
||||
|
||||
# ===== Issue CRUD Endpoints =====
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/issues",
|
||||
response_model=IssueResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create Issue",
|
||||
description="Create a new issue in a project",
|
||||
operation_id="create_issue",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def create_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_in: IssueCreate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Create a new issue within a project.
|
||||
|
||||
The user must own the project or be a superuser.
|
||||
The project_id in the path takes precedence over any project_id in the body.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object (for rate limiting)
|
||||
project_id: UUID of the project to create the issue in
|
||||
issue_in: Issue creation data
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Created issue with full details
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project not found
|
||||
AuthorizationError: If user lacks access
|
||||
ValidationException: If assigned agent not in project
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Override project_id from path
|
||||
issue_in.project_id = project_id
|
||||
|
||||
# Validate assigned agent if provided
|
||||
if issue_in.assigned_agent_id:
|
||||
agent = await agent_instance_crud.get(db, id=issue_in.assigned_agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent instance {issue_in.assigned_agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if agent.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Agent instance does not belong to this project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
if agent.status == AgentStatus.TERMINATED:
|
||||
raise ValidationException(
|
||||
message="Cannot assign issue to a terminated agent",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
|
||||
# Validate sprint if provided (IDOR prevention)
|
||||
if issue_in.sprint_id:
|
||||
sprint = await sprint_crud.get(db, id=issue_in.sprint_id)
|
||||
if not sprint:
|
||||
raise NotFoundError(
|
||||
message=f"Sprint {issue_in.sprint_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if sprint.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Sprint does not belong to this project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="sprint_id",
|
||||
)
|
||||
|
||||
try:
|
||||
issue = await issue_crud.create(db, obj_in=issue_in)
|
||||
logger.info(
|
||||
f"User {current_user.email} created issue '{issue.title}' "
|
||||
f"in project {project_id}"
|
||||
)
|
||||
|
||||
# Get project details for response
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
|
||||
return _build_issue_response(
|
||||
issue,
|
||||
project_name=project.name if project else None,
|
||||
project_slug=project.slug if project else None,
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to create issue: {e!s}")
|
||||
raise ValidationException(
|
||||
message=str(e),
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating issue: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/issues",
|
||||
response_model=PaginatedResponse[IssueResponse],
|
||||
summary="List Issues",
|
||||
description="Get paginated list of issues in a project with filtering",
|
||||
operation_id="list_issues",
|
||||
)
|
||||
@limiter.limit(f"{120 * RATE_MULTIPLIER}/minute")
|
||||
async def list_issues(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
pagination: PaginationParams = Depends(),
|
||||
status_filter: IssueStatus | None = Query(
|
||||
None, alias="status", description="Filter by issue status"
|
||||
),
|
||||
priority: IssuePriority | None = Query(None, description="Filter by priority"),
|
||||
labels: list[str] | None = Query(
|
||||
None, description="Filter by labels (comma-separated)"
|
||||
),
|
||||
sprint_id: UUID | None = Query(None, description="Filter by sprint ID"),
|
||||
assigned_agent_id: UUID | None = Query(
|
||||
None, description="Filter by assigned agent ID"
|
||||
),
|
||||
sync_status: SyncStatus | None = Query(None, description="Filter by sync status"),
|
||||
search: str | None = Query(
|
||||
None, min_length=1, max_length=100, description="Search in title and body"
|
||||
),
|
||||
sort_by: str = Query(
|
||||
"created_at",
|
||||
description="Field to sort by (created_at, updated_at, priority, status, title)",
|
||||
),
|
||||
sort_order: SortOrder = Query(SortOrder.DESC, description="Sort order"),
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
List issues in a project with comprehensive filtering options.
|
||||
|
||||
Supports filtering by:
|
||||
- status: Issue status (open, in_progress, in_review, blocked, closed)
|
||||
- priority: Issue priority (low, medium, high, critical)
|
||||
- labels: Match issues containing any of the provided labels
|
||||
- sprint_id: Issues in a specific sprint
|
||||
- assigned_agent_id: Issues assigned to a specific agent
|
||||
- sync_status: External tracker sync status
|
||||
- search: Full-text search in title and body
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
pagination: Pagination parameters
|
||||
status_filter: Optional status filter
|
||||
priority: Optional priority filter
|
||||
labels: Optional labels filter
|
||||
sprint_id: Optional sprint filter
|
||||
assigned_agent_id: Optional agent assignment filter
|
||||
sync_status: Optional sync status filter
|
||||
search: Optional search query
|
||||
sort_by: Field to sort by
|
||||
sort_order: Sort direction
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Paginated list of issues matching filters
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
try:
|
||||
# Get filtered issues
|
||||
issues, total = await issue_crud.get_by_project(
|
||||
db,
|
||||
project_id=project_id,
|
||||
status=status_filter,
|
||||
priority=priority,
|
||||
sprint_id=sprint_id,
|
||||
assigned_agent_id=assigned_agent_id,
|
||||
labels=labels,
|
||||
search=search,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
sort_by=sort_by,
|
||||
sort_order=sort_order.value,
|
||||
)
|
||||
|
||||
# Build response objects
|
||||
issue_responses = [_build_issue_response(issue) for issue in issues]
|
||||
|
||||
pagination_meta = create_pagination_meta(
|
||||
total=total,
|
||||
page=pagination.page,
|
||||
limit=pagination.limit,
|
||||
items_count=len(issue_responses),
|
||||
)
|
||||
|
||||
return PaginatedResponse(data=issue_responses, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error listing issues for project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# ===== Issue Statistics Endpoint =====
|
||||
# NOTE: This endpoint MUST be defined before /{issue_id} routes
|
||||
# to prevent FastAPI from trying to parse "stats" as a UUID
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/issues/stats",
|
||||
response_model=IssueStats,
|
||||
summary="Get Issue Statistics",
|
||||
description="Get aggregated issue statistics for a project",
|
||||
operation_id="get_issue_stats",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_issue_stats(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get aggregated statistics for issues in a project.
|
||||
|
||||
Returns counts by status and priority, along with story point totals.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Issue statistics including counts by status/priority and story points
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project not found
|
||||
AuthorizationError: If user lacks access
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
try:
|
||||
stats = await issue_crud.get_project_stats(db, project_id=project_id)
|
||||
return IssueStats(**stats)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue stats for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/issues/{issue_id}",
|
||||
response_model=IssueResponse,
|
||||
summary="Get Issue",
|
||||
description="Get detailed information about a specific issue",
|
||||
operation_id="get_issue",
|
||||
)
|
||||
@limiter.limit(f"{120 * RATE_MULTIPLIER}/minute")
|
||||
async def get_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about a specific issue.
|
||||
|
||||
Returns the issue with expanded relationship data including
|
||||
project name, sprint name, and assigned agent type name.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
issue_id: Issue UUID
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Issue details with relationship data
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project or issue not found
|
||||
AuthorizationError: If user lacks access
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get issue with details
|
||||
issue_data = await issue_crud.get_with_details(db, issue_id=issue_id)
|
||||
|
||||
if not issue_data:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
issue = issue_data["issue"]
|
||||
|
||||
# Verify issue belongs to the project
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_issue_response(
|
||||
issue,
|
||||
project_name=issue_data.get("project_name"),
|
||||
project_slug=issue_data.get("project_slug"),
|
||||
sprint_name=issue_data.get("sprint_name"),
|
||||
assigned_agent_type_name=issue_data.get("assigned_agent_type_name"),
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/projects/{project_id}/issues/{issue_id}",
|
||||
response_model=IssueResponse,
|
||||
summary="Update Issue",
|
||||
description="Update an existing issue",
|
||||
operation_id="update_issue",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def update_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
issue_in: IssueUpdate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Update an existing issue.
|
||||
|
||||
All fields are optional - only provided fields will be updated.
|
||||
Validates that assigned agent belongs to the same project.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
issue_id: Issue UUID
|
||||
issue_in: Fields to update
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Updated issue details
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project or issue not found
|
||||
AuthorizationError: If user lacks access
|
||||
ValidationException: If validation fails
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get existing issue
|
||||
issue = await issue_crud.get(db, id=issue_id)
|
||||
if not issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify issue belongs to the project
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Validate assigned agent if being updated
|
||||
if issue_in.assigned_agent_id is not None:
|
||||
agent = await agent_instance_crud.get(db, id=issue_in.assigned_agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent instance {issue_in.assigned_agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if agent.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Agent instance does not belong to this project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
if agent.status == AgentStatus.TERMINATED:
|
||||
raise ValidationException(
|
||||
message="Cannot assign issue to a terminated agent",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
|
||||
# Validate sprint if being updated (IDOR prevention and status validation)
|
||||
if issue_in.sprint_id is not None:
|
||||
sprint = await sprint_crud.get(db, id=issue_in.sprint_id)
|
||||
if not sprint:
|
||||
raise NotFoundError(
|
||||
message=f"Sprint {issue_in.sprint_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if sprint.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Sprint does not belong to this project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="sprint_id",
|
||||
)
|
||||
# Cannot add issues to completed or cancelled sprints
|
||||
if sprint.status in [SprintStatus.COMPLETED, SprintStatus.CANCELLED]:
|
||||
raise ValidationException(
|
||||
message=f"Cannot add issues to sprint with status '{sprint.status.value}'",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="sprint_id",
|
||||
)
|
||||
|
||||
try:
|
||||
updated_issue = await issue_crud.update(db, db_obj=issue, obj_in=issue_in)
|
||||
logger.info(
|
||||
f"User {current_user.email} updated issue {issue_id} in project {project_id}"
|
||||
)
|
||||
|
||||
# Get full details for response
|
||||
issue_data = await issue_crud.get_with_details(db, issue_id=issue_id)
|
||||
|
||||
return _build_issue_response(
|
||||
updated_issue,
|
||||
project_name=issue_data.get("project_name") if issue_data else None,
|
||||
project_slug=issue_data.get("project_slug") if issue_data else None,
|
||||
sprint_name=issue_data.get("sprint_name") if issue_data else None,
|
||||
assigned_agent_type_name=issue_data.get("assigned_agent_type_name")
|
||||
if issue_data
|
||||
else None,
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to update issue {issue_id}: {e!s}")
|
||||
raise ValidationException(
|
||||
message=str(e),
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/projects/{project_id}/issues/{issue_id}",
|
||||
response_model=MessageResponse,
|
||||
summary="Delete Issue",
|
||||
description="Delete an issue permanently",
|
||||
operation_id="delete_issue",
|
||||
)
|
||||
@limiter.limit(f"{30 * RATE_MULTIPLIER}/minute")
|
||||
async def delete_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Delete an issue permanently.
|
||||
|
||||
The issue will be permanently removed from the database.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
issue_id: Issue UUID
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project or issue not found
|
||||
AuthorizationError: If user lacks access
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get existing issue
|
||||
issue = await issue_crud.get(db, id=issue_id)
|
||||
if not issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify issue belongs to the project
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
try:
|
||||
issue_title = issue.title
|
||||
await issue_crud.remove(db, id=issue_id)
|
||||
logger.info(
|
||||
f"User {current_user.email} deleted issue {issue_id} "
|
||||
f"('{issue_title}') from project {project_id}"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Issue '{issue_title}' has been deleted",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# ===== Issue Assignment Endpoint =====
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/issues/{issue_id}/assign",
|
||||
response_model=IssueResponse,
|
||||
summary="Assign Issue",
|
||||
description="Assign an issue to an agent or human",
|
||||
operation_id="assign_issue",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def assign_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
assignment: IssueAssign,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Assign an issue to an agent or human.
|
||||
|
||||
Only one type of assignment is allowed at a time:
|
||||
- assigned_agent_id: Assign to an AI agent instance
|
||||
- human_assignee: Assign to a human (name/email string)
|
||||
|
||||
To unassign, pass both as null/None.
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
issue_id: Issue UUID
|
||||
assignment: Assignment data
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Updated issue with assignment
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project, issue, or agent not found
|
||||
AuthorizationError: If user lacks access
|
||||
ValidationException: If agent not in project
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get existing issue
|
||||
issue = await issue_crud.get(db, id=issue_id)
|
||||
if not issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify issue belongs to the project
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Process assignment based on type
|
||||
if assignment.assigned_agent_id:
|
||||
# Validate agent exists and belongs to project
|
||||
agent = await agent_instance_crud.get(db, id=assignment.assigned_agent_id)
|
||||
if not agent:
|
||||
raise NotFoundError(
|
||||
message=f"Agent instance {assignment.assigned_agent_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
if agent.project_id != project_id:
|
||||
raise ValidationException(
|
||||
message="Agent instance does not belong to this project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
if agent.status == AgentStatus.TERMINATED:
|
||||
raise ValidationException(
|
||||
message="Cannot assign issue to a terminated agent",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="assigned_agent_id",
|
||||
)
|
||||
|
||||
updated_issue = await issue_crud.assign_to_agent(
|
||||
db, issue_id=issue_id, agent_id=assignment.assigned_agent_id
|
||||
)
|
||||
logger.info(
|
||||
f"User {current_user.email} assigned issue {issue_id} to agent {agent.name}"
|
||||
)
|
||||
|
||||
elif assignment.human_assignee:
|
||||
updated_issue = await issue_crud.assign_to_human(
|
||||
db, issue_id=issue_id, human_assignee=assignment.human_assignee
|
||||
)
|
||||
logger.info(
|
||||
f"User {current_user.email} assigned issue {issue_id} "
|
||||
f"to human '{assignment.human_assignee}'"
|
||||
)
|
||||
|
||||
else:
|
||||
# Unassign - clear both agent and human
|
||||
updated_issue = await issue_crud.assign_to_agent(
|
||||
db, issue_id=issue_id, agent_id=None
|
||||
)
|
||||
logger.info(f"User {current_user.email} unassigned issue {issue_id}")
|
||||
|
||||
if not updated_issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Get full details for response
|
||||
issue_data = await issue_crud.get_with_details(db, issue_id=issue_id)
|
||||
|
||||
return _build_issue_response(
|
||||
updated_issue,
|
||||
project_name=issue_data.get("project_name") if issue_data else None,
|
||||
project_slug=issue_data.get("project_slug") if issue_data else None,
|
||||
sprint_name=issue_data.get("sprint_name") if issue_data else None,
|
||||
assigned_agent_type_name=issue_data.get("assigned_agent_type_name")
|
||||
if issue_data
|
||||
else None,
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/projects/{project_id}/issues/{issue_id}/assignment",
|
||||
response_model=IssueResponse,
|
||||
summary="Unassign Issue",
|
||||
description="""
|
||||
Remove agent/human assignment from an issue.
|
||||
|
||||
**Authentication**: Required (Bearer token)
|
||||
**Authorization**: Project owner or superuser
|
||||
|
||||
This clears both agent and human assignee fields.
|
||||
|
||||
**Rate Limit**: 60 requests/minute
|
||||
""",
|
||||
operation_id="unassign_issue",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def unassign_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Remove assignment from an issue.
|
||||
|
||||
Clears both assigned_agent_id and human_assignee fields.
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get existing issue
|
||||
issue = await issue_crud.get(db, id=issue_id)
|
||||
if not issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify issue belongs to project (IDOR prevention)
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Unassign the issue
|
||||
updated_issue = await issue_crud.unassign(db, issue_id=issue_id)
|
||||
|
||||
if not updated_issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(f"User {current_user.email} unassigned issue {issue_id}")
|
||||
|
||||
# Get full details for response
|
||||
issue_data = await issue_crud.get_with_details(db, issue_id=issue_id)
|
||||
|
||||
return _build_issue_response(
|
||||
updated_issue,
|
||||
project_name=issue_data.get("project_name") if issue_data else None,
|
||||
project_slug=issue_data.get("project_slug") if issue_data else None,
|
||||
sprint_name=issue_data.get("sprint_name") if issue_data else None,
|
||||
assigned_agent_type_name=issue_data.get("assigned_agent_type_name")
|
||||
if issue_data
|
||||
else None,
|
||||
)
|
||||
|
||||
|
||||
# ===== Issue Sync Endpoint =====
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects/{project_id}/issues/{issue_id}/sync",
|
||||
response_model=MessageResponse,
|
||||
summary="Trigger Issue Sync",
|
||||
description="Trigger synchronization with external issue tracker",
|
||||
operation_id="sync_issue",
|
||||
)
|
||||
@limiter.limit(f"{30 * RATE_MULTIPLIER}/minute")
|
||||
async def sync_issue(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
issue_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Trigger synchronization of an issue with its external tracker.
|
||||
|
||||
This endpoint queues a sync task for the issue. The actual synchronization
|
||||
happens asynchronously via Celery.
|
||||
|
||||
Prerequisites:
|
||||
- Issue must have external_tracker_type configured
|
||||
- Project must have integration settings for the tracker
|
||||
|
||||
Args:
|
||||
request: FastAPI request object
|
||||
project_id: Project UUID
|
||||
issue_id: Issue UUID
|
||||
current_user: Authenticated user
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Message indicating sync has been triggered
|
||||
|
||||
Raises:
|
||||
NotFoundError: If project or issue not found
|
||||
AuthorizationError: If user lacks access
|
||||
ValidationException: If issue has no external tracker
|
||||
"""
|
||||
# Verify project access
|
||||
await verify_project_ownership(db, project_id, current_user)
|
||||
|
||||
# Get existing issue
|
||||
issue = await issue_crud.get(db, id=issue_id)
|
||||
if not issue:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Verify issue belongs to the project
|
||||
if issue.project_id != project_id:
|
||||
raise NotFoundError(
|
||||
message=f"Issue {issue_id} not found in project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
# Check if issue has external tracker configured
|
||||
if not issue.external_tracker_type:
|
||||
raise ValidationException(
|
||||
message="Issue does not have an external tracker configured",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="external_tracker_type",
|
||||
)
|
||||
|
||||
# Update sync status to pending
|
||||
await issue_crud.update_sync_status(
|
||||
db,
|
||||
issue_id=issue_id,
|
||||
sync_status=SyncStatus.PENDING,
|
||||
)
|
||||
|
||||
# TODO: Queue Celery task for actual sync
|
||||
# When Celery is set up, this will be:
|
||||
# from app.tasks.sync import sync_issue_task
|
||||
# sync_issue_task.delay(str(issue_id))
|
||||
|
||||
logger.info(
|
||||
f"User {current_user.email} triggered sync for issue {issue_id} "
|
||||
f"(tracker: {issue.external_tracker_type})"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Sync triggered for issue '{issue.title}'. "
|
||||
f"Status will update when complete.",
|
||||
)
|
||||
446
backend/app/api/routes/mcp.py
Normal file
446
backend/app/api/routes/mcp.py
Normal file
@@ -0,0 +1,446 @@
|
||||
"""
|
||||
MCP (Model Context Protocol) API Endpoints
|
||||
|
||||
Provides REST endpoints for managing MCP server connections
|
||||
and executing tool calls.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.api.dependencies.permissions import require_superuser
|
||||
from app.models.user import User
|
||||
from app.services.mcp import (
|
||||
MCPCircuitOpenError,
|
||||
MCPClientManager,
|
||||
MCPConnectionError,
|
||||
MCPError,
|
||||
MCPServerNotFoundError,
|
||||
MCPTimeoutError,
|
||||
MCPToolError,
|
||||
MCPToolNotFoundError,
|
||||
get_mcp_client,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# Server name validation pattern: alphanumeric, hyphens, underscores, 1-64 chars
|
||||
SERVER_NAME_PATTERN = re.compile(r"^[a-zA-Z0-9_-]{1,64}$")
|
||||
|
||||
# Type alias for validated server name path parameter
|
||||
ServerNamePath = Annotated[
|
||||
str,
|
||||
Path(
|
||||
description="MCP server name",
|
||||
min_length=1,
|
||||
max_length=64,
|
||||
pattern=r"^[a-zA-Z0-9_-]+$",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Request/Response Schemas
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class ServerInfo(BaseModel):
|
||||
"""Information about an MCP server."""
|
||||
|
||||
name: str = Field(..., description="Server name")
|
||||
url: str = Field(..., description="Server URL")
|
||||
enabled: bool = Field(..., description="Whether server is enabled")
|
||||
timeout: int = Field(..., description="Request timeout in seconds")
|
||||
transport: str = Field(..., description="Transport type (http, stdio, sse)")
|
||||
description: str | None = Field(None, description="Server description")
|
||||
|
||||
|
||||
class ServerListResponse(BaseModel):
|
||||
"""Response containing list of MCP servers."""
|
||||
|
||||
servers: list[ServerInfo]
|
||||
total: int
|
||||
|
||||
|
||||
class ToolInfoResponse(BaseModel):
|
||||
"""Information about an MCP tool."""
|
||||
|
||||
name: str = Field(..., description="Tool name")
|
||||
description: str | None = Field(None, description="Tool description")
|
||||
server_name: str | None = Field(None, description="Server providing the tool")
|
||||
input_schema: dict[str, Any] | None = Field(
|
||||
None, description="JSON schema for input"
|
||||
)
|
||||
|
||||
|
||||
class ToolListResponse(BaseModel):
|
||||
"""Response containing list of tools."""
|
||||
|
||||
tools: list[ToolInfoResponse]
|
||||
total: int
|
||||
|
||||
|
||||
class ServerHealthStatus(BaseModel):
|
||||
"""Health status for a server."""
|
||||
|
||||
name: str
|
||||
healthy: bool
|
||||
state: str
|
||||
url: str
|
||||
error: str | None = None
|
||||
tools_count: int = 0
|
||||
|
||||
|
||||
class HealthCheckResponse(BaseModel):
|
||||
"""Response containing health status of all servers."""
|
||||
|
||||
servers: dict[str, ServerHealthStatus]
|
||||
healthy_count: int
|
||||
unhealthy_count: int
|
||||
total: int
|
||||
|
||||
|
||||
class ToolCallRequest(BaseModel):
|
||||
"""Request to execute a tool."""
|
||||
|
||||
server: str = Field(..., description="MCP server name")
|
||||
tool: str = Field(..., description="Tool name to execute")
|
||||
arguments: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Tool arguments",
|
||||
)
|
||||
timeout: float | None = Field(
|
||||
None,
|
||||
description="Optional timeout override in seconds",
|
||||
)
|
||||
|
||||
|
||||
class ToolCallResponse(BaseModel):
|
||||
"""Response from tool execution."""
|
||||
|
||||
success: bool
|
||||
data: Any | None = None
|
||||
error: str | None = None
|
||||
error_code: str | None = None
|
||||
tool_name: str | None = None
|
||||
server_name: str | None = None
|
||||
execution_time_ms: float = 0.0
|
||||
request_id: str | None = None
|
||||
|
||||
|
||||
class CircuitBreakerStatus(BaseModel):
|
||||
"""Status of a circuit breaker."""
|
||||
|
||||
server_name: str
|
||||
state: str
|
||||
failure_count: int
|
||||
|
||||
|
||||
class CircuitBreakerListResponse(BaseModel):
|
||||
"""Response containing circuit breaker statuses."""
|
||||
|
||||
circuit_breakers: list[CircuitBreakerStatus]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/servers",
|
||||
response_model=ServerListResponse,
|
||||
summary="List MCP Servers",
|
||||
description="Get list of all registered MCP servers with their configurations.",
|
||||
)
|
||||
async def list_servers(
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> ServerListResponse:
|
||||
"""List all registered MCP servers."""
|
||||
servers = []
|
||||
|
||||
for name in mcp.list_servers():
|
||||
try:
|
||||
config = mcp.get_server_config(name)
|
||||
servers.append(
|
||||
ServerInfo(
|
||||
name=name,
|
||||
url=config.url,
|
||||
enabled=config.enabled,
|
||||
timeout=config.timeout,
|
||||
transport=config.transport.value,
|
||||
description=config.description,
|
||||
)
|
||||
)
|
||||
except MCPServerNotFoundError:
|
||||
continue
|
||||
|
||||
return ServerListResponse(
|
||||
servers=servers,
|
||||
total=len(servers),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/servers/{server_name}/tools",
|
||||
response_model=ToolListResponse,
|
||||
summary="List Server Tools",
|
||||
description="Get list of tools available on a specific MCP server.",
|
||||
)
|
||||
async def list_server_tools(
|
||||
server_name: ServerNamePath,
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> ToolListResponse:
|
||||
"""List all tools available on a specific server."""
|
||||
try:
|
||||
tools = await mcp.list_tools(server_name)
|
||||
return ToolListResponse(
|
||||
tools=[
|
||||
ToolInfoResponse(
|
||||
name=t.name,
|
||||
description=t.description,
|
||||
server_name=t.server_name,
|
||||
input_schema=t.input_schema,
|
||||
)
|
||||
for t in tools
|
||||
],
|
||||
total=len(tools),
|
||||
)
|
||||
except MCPServerNotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Server not found: {server_name}",
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/tools",
|
||||
response_model=ToolListResponse,
|
||||
summary="List All Tools",
|
||||
description="Get list of all tools from all MCP servers.",
|
||||
)
|
||||
async def list_all_tools(
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> ToolListResponse:
|
||||
"""List all tools from all servers."""
|
||||
tools = await mcp.list_all_tools()
|
||||
return ToolListResponse(
|
||||
tools=[
|
||||
ToolInfoResponse(
|
||||
name=t.name,
|
||||
description=t.description,
|
||||
server_name=t.server_name,
|
||||
input_schema=t.input_schema,
|
||||
)
|
||||
for t in tools
|
||||
],
|
||||
total=len(tools),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/health",
|
||||
response_model=HealthCheckResponse,
|
||||
summary="Health Check",
|
||||
description="Check health status of all MCP servers.",
|
||||
)
|
||||
async def health_check(
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> HealthCheckResponse:
|
||||
"""Perform health check on all MCP servers."""
|
||||
health_results = await mcp.health_check()
|
||||
|
||||
servers = {
|
||||
name: ServerHealthStatus(
|
||||
name=status.name,
|
||||
healthy=status.healthy,
|
||||
state=status.state,
|
||||
url=status.url,
|
||||
error=status.error,
|
||||
tools_count=status.tools_count,
|
||||
)
|
||||
for name, status in health_results.items()
|
||||
}
|
||||
|
||||
healthy_count = sum(1 for s in servers.values() if s.healthy)
|
||||
unhealthy_count = len(servers) - healthy_count
|
||||
|
||||
return HealthCheckResponse(
|
||||
servers=servers,
|
||||
healthy_count=healthy_count,
|
||||
unhealthy_count=unhealthy_count,
|
||||
total=len(servers),
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/call",
|
||||
response_model=ToolCallResponse,
|
||||
summary="Execute Tool (Admin Only)",
|
||||
description="Execute a tool on an MCP server. Requires superuser privileges.",
|
||||
)
|
||||
async def call_tool(
|
||||
request: ToolCallRequest,
|
||||
current_user: User = Depends(require_superuser),
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> ToolCallResponse:
|
||||
"""
|
||||
Execute a tool on an MCP server.
|
||||
|
||||
This endpoint is restricted to superusers for direct tool execution.
|
||||
Normal tool execution should go through agent workflows.
|
||||
"""
|
||||
logger.info(
|
||||
"Tool call by user %s: %s.%s",
|
||||
current_user.id,
|
||||
request.server,
|
||||
request.tool,
|
||||
)
|
||||
|
||||
try:
|
||||
result = await mcp.call_tool(
|
||||
server=request.server,
|
||||
tool=request.tool,
|
||||
args=request.arguments,
|
||||
timeout=request.timeout,
|
||||
)
|
||||
|
||||
return ToolCallResponse(
|
||||
success=result.success,
|
||||
data=result.data,
|
||||
error=result.error,
|
||||
error_code=result.error_code,
|
||||
tool_name=result.tool_name,
|
||||
server_name=result.server_name,
|
||||
execution_time_ms=result.execution_time_ms,
|
||||
request_id=result.request_id,
|
||||
)
|
||||
|
||||
except MCPCircuitOpenError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail=f"Server temporarily unavailable: {e.server_name}",
|
||||
) from e
|
||||
except MCPToolNotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Tool not found: {e.tool_name}",
|
||||
) from e
|
||||
except MCPServerNotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Server not found: {e.server_name}",
|
||||
) from e
|
||||
except MCPTimeoutError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_504_GATEWAY_TIMEOUT,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except MCPConnectionError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except MCPToolError as e:
|
||||
# Tool errors are returned in the response, not as HTTP errors
|
||||
return ToolCallResponse(
|
||||
success=False,
|
||||
error=str(e),
|
||||
error_code=e.error_code,
|
||||
tool_name=e.tool_name,
|
||||
server_name=e.server_name,
|
||||
)
|
||||
except MCPError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/circuit-breakers",
|
||||
response_model=CircuitBreakerListResponse,
|
||||
summary="List Circuit Breakers",
|
||||
description="Get status of all circuit breakers.",
|
||||
)
|
||||
async def list_circuit_breakers(
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> CircuitBreakerListResponse:
|
||||
"""Get status of all circuit breakers."""
|
||||
status_dict = mcp.get_circuit_breaker_status()
|
||||
|
||||
return CircuitBreakerListResponse(
|
||||
circuit_breakers=[
|
||||
CircuitBreakerStatus(
|
||||
server_name=name,
|
||||
state=info.get("state", "unknown"),
|
||||
failure_count=info.get("failure_count", 0),
|
||||
)
|
||||
for name, info in status_dict.items()
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/circuit-breakers/{server_name}/reset",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Reset Circuit Breaker (Admin Only)",
|
||||
description="Manually reset a circuit breaker for a server.",
|
||||
)
|
||||
async def reset_circuit_breaker(
|
||||
server_name: ServerNamePath,
|
||||
current_user: User = Depends(require_superuser),
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> None:
|
||||
"""Manually reset a circuit breaker."""
|
||||
logger.info(
|
||||
"Circuit breaker reset by user %s for server %s",
|
||||
current_user.id,
|
||||
server_name,
|
||||
)
|
||||
|
||||
success = await mcp.reset_circuit_breaker(server_name)
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"No circuit breaker found for server: {server_name}",
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/servers/{server_name}/reconnect",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Reconnect to Server (Admin Only)",
|
||||
description="Force reconnection to an MCP server.",
|
||||
)
|
||||
async def reconnect_server(
|
||||
server_name: ServerNamePath,
|
||||
current_user: User = Depends(require_superuser),
|
||||
mcp: MCPClientManager = Depends(get_mcp_client),
|
||||
) -> None:
|
||||
"""Force reconnection to an MCP server."""
|
||||
logger.info(
|
||||
"Reconnect requested by user %s for server %s",
|
||||
current_user.id,
|
||||
server_name,
|
||||
)
|
||||
|
||||
try:
|
||||
await mcp.disconnect(server_name)
|
||||
await mcp.connect(server_name)
|
||||
except MCPServerNotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Server not found: {server_name}",
|
||||
) from e
|
||||
except MCPConnectionError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Failed to reconnect: {e}",
|
||||
) from e
|
||||
@@ -25,6 +25,8 @@ from app.core.auth import decode_token
|
||||
from app.core.config import settings
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import AuthenticationError as AuthError
|
||||
from app.crud import oauth_account
|
||||
from app.crud.session import session as session_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.oauth import (
|
||||
OAuthAccountsListResponse,
|
||||
@@ -36,7 +38,6 @@ from app.schemas.oauth import (
|
||||
from app.schemas.sessions import SessionCreate
|
||||
from app.schemas.users import Token
|
||||
from app.services.oauth_service import OAuthService
|
||||
from app.services.session_service import session_service
|
||||
from app.utils.device import extract_device_info
|
||||
|
||||
router = APIRouter()
|
||||
@@ -81,19 +82,17 @@ async def _create_oauth_login_session(
|
||||
location_country=device_info.location_country,
|
||||
)
|
||||
|
||||
await session_service.create_session(db, obj_in=session_data)
|
||||
await session_crud.create_session(db, obj_in=session_data)
|
||||
|
||||
logger.info(
|
||||
"OAuth login successful: %s via %s from %s (IP: %s)",
|
||||
user.email,
|
||||
provider,
|
||||
device_info.device_name,
|
||||
device_info.ip_address,
|
||||
f"OAuth login successful: {user.email} via {provider} "
|
||||
f"from {device_info.device_name} (IP: {device_info.ip_address})"
|
||||
)
|
||||
except Exception as session_err:
|
||||
# Log but don't fail login if session creation fails
|
||||
logger.exception(
|
||||
"Failed to create session for OAuth login %s: %s", user.email, session_err
|
||||
logger.error(
|
||||
f"Failed to create session for OAuth login {user.email}: {session_err!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -178,13 +177,13 @@ async def get_authorization_url(
|
||||
}
|
||||
|
||||
except AuthError as e:
|
||||
logger.warning("OAuth authorization failed: %s", e)
|
||||
logger.warning(f"OAuth authorization failed: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("OAuth authorization error: %s", e)
|
||||
logger.error(f"OAuth authorization error: {e!s}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to create authorization URL",
|
||||
@@ -252,13 +251,13 @@ async def handle_callback(
|
||||
return result
|
||||
|
||||
except AuthError as e:
|
||||
logger.warning("OAuth callback failed: %s", e)
|
||||
logger.warning(f"OAuth callback failed: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("OAuth callback error: %s", e)
|
||||
logger.error(f"OAuth callback error: {e!s}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="OAuth authentication failed",
|
||||
@@ -290,7 +289,7 @@ async def list_accounts(
|
||||
Returns:
|
||||
List of linked OAuth accounts
|
||||
"""
|
||||
accounts = await OAuthService.get_user_accounts(db, user_id=current_user.id)
|
||||
accounts = await oauth_account.get_user_accounts(db, user_id=current_user.id)
|
||||
return OAuthAccountsListResponse(accounts=accounts)
|
||||
|
||||
|
||||
@@ -339,13 +338,13 @@ async def unlink_account(
|
||||
)
|
||||
|
||||
except AuthError as e:
|
||||
logger.warning("OAuth unlink failed for %s: %s", current_user.email, e)
|
||||
logger.warning(f"OAuth unlink failed for {current_user.email}: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("OAuth unlink error: %s", e)
|
||||
logger.error(f"OAuth unlink error: {e!s}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to unlink OAuth account",
|
||||
@@ -398,7 +397,7 @@ async def start_link(
|
||||
)
|
||||
|
||||
# Check if user already has this provider linked
|
||||
existing = await OAuthService.get_user_account_by_provider(
|
||||
existing = await oauth_account.get_user_account_by_provider(
|
||||
db, user_id=current_user.id, provider=provider
|
||||
)
|
||||
if existing:
|
||||
@@ -421,13 +420,13 @@ async def start_link(
|
||||
}
|
||||
|
||||
except AuthError as e:
|
||||
logger.warning("OAuth link authorization failed: %s", e)
|
||||
logger.warning(f"OAuth link authorization failed: {e!s}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("OAuth link error: %s", e)
|
||||
logger.error(f"OAuth link error: {e!s}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to create authorization URL",
|
||||
|
||||
@@ -34,6 +34,7 @@ from app.api.dependencies.auth import (
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.core.database import get_db
|
||||
from app.crud import oauth_client as oauth_client_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.oauth import (
|
||||
OAuthClientCreate,
|
||||
@@ -452,7 +453,7 @@ async def token(
|
||||
except Exception as e:
|
||||
# Log malformed Basic auth for security monitoring
|
||||
logger.warning(
|
||||
"Malformed Basic auth header in token request: %s", type(e).__name__
|
||||
f"Malformed Basic auth header in token request: {type(e).__name__}"
|
||||
)
|
||||
# Fall back to form body
|
||||
|
||||
@@ -563,8 +564,7 @@ async def revoke(
|
||||
except Exception as e:
|
||||
# Log malformed Basic auth for security monitoring
|
||||
logger.warning(
|
||||
"Malformed Basic auth header in revoke request: %s",
|
||||
type(e).__name__,
|
||||
f"Malformed Basic auth header in revoke request: {type(e).__name__}"
|
||||
)
|
||||
# Fall back to form body
|
||||
|
||||
@@ -586,7 +586,7 @@ async def revoke(
|
||||
)
|
||||
except Exception as e:
|
||||
# Log but don't expose errors per RFC 7009
|
||||
logger.warning("Token revocation error: %s", e)
|
||||
logger.warning(f"Token revocation error: {e}")
|
||||
|
||||
# Always return 200 OK per RFC 7009
|
||||
return {"status": "ok"}
|
||||
@@ -635,8 +635,7 @@ async def introspect(
|
||||
except Exception as e:
|
||||
# Log malformed Basic auth for security monitoring
|
||||
logger.warning(
|
||||
"Malformed Basic auth header in introspect request: %s",
|
||||
type(e).__name__,
|
||||
f"Malformed Basic auth header in introspect request: {type(e).__name__}"
|
||||
)
|
||||
# Fall back to form body
|
||||
|
||||
@@ -656,8 +655,8 @@ async def introspect(
|
||||
headers={"WWW-Authenticate": "Basic"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Token introspection error: %s", e)
|
||||
return OAuthTokenIntrospectionResponse(active=False) # pyright: ignore[reportCallIssue]
|
||||
logger.warning(f"Token introspection error: {e}")
|
||||
return OAuthTokenIntrospectionResponse(active=False)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -713,7 +712,7 @@ async def register_client(
|
||||
client_type=client_type,
|
||||
)
|
||||
|
||||
client, secret = await provider_service.register_client(db, client_data)
|
||||
client, secret = await oauth_client_crud.create_client(db, obj_in=client_data)
|
||||
|
||||
# Update MCP server URL if provided
|
||||
if mcp_server_url:
|
||||
@@ -751,7 +750,7 @@ async def list_clients(
|
||||
current_user: User = Depends(get_current_superuser),
|
||||
) -> list[OAuthClientResponse]:
|
||||
"""List all OAuth clients."""
|
||||
clients = await provider_service.list_clients(db)
|
||||
clients = await oauth_client_crud.get_all_clients(db)
|
||||
return [OAuthClientResponse.model_validate(c) for c in clients]
|
||||
|
||||
|
||||
@@ -777,7 +776,7 @@ async def delete_client(
|
||||
detail="Client not found",
|
||||
)
|
||||
|
||||
await provider_service.delete_client_by_id(db, client_id=client_id)
|
||||
await oauth_client_crud.delete_client(db, client_id=client_id)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -798,7 +797,30 @@ async def list_my_consents(
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
) -> list[dict]:
|
||||
"""List applications the user has authorized."""
|
||||
return await provider_service.list_user_consents(db, user_id=current_user.id)
|
||||
from sqlalchemy import select
|
||||
|
||||
from app.models.oauth_client import OAuthClient
|
||||
from app.models.oauth_provider_token import OAuthConsent
|
||||
|
||||
result = await db.execute(
|
||||
select(OAuthConsent, OAuthClient)
|
||||
.join(OAuthClient, OAuthConsent.client_id == OAuthClient.client_id)
|
||||
.where(OAuthConsent.user_id == current_user.id)
|
||||
)
|
||||
rows = result.all()
|
||||
|
||||
return [
|
||||
{
|
||||
"client_id": consent.client_id,
|
||||
"client_name": client.client_name,
|
||||
"client_description": client.client_description,
|
||||
"granted_scopes": consent.granted_scopes.split()
|
||||
if consent.granted_scopes
|
||||
else [],
|
||||
"granted_at": consent.created_at.isoformat(),
|
||||
}
|
||||
for consent, client in rows
|
||||
]
|
||||
|
||||
|
||||
@router.delete(
|
||||
|
||||
@@ -15,6 +15,8 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.api.dependencies.permissions import require_org_admin, require_org_membership
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import ErrorCode, NotFoundError
|
||||
from app.crud.organization import organization as organization_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
PaginatedResponse,
|
||||
@@ -26,7 +28,6 @@ from app.schemas.organizations import (
|
||||
OrganizationResponse,
|
||||
OrganizationUpdate,
|
||||
)
|
||||
from app.services.organization_service import organization_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -53,7 +54,7 @@ async def get_my_organizations(
|
||||
"""
|
||||
try:
|
||||
# Get all org data in single query with JOIN and subquery
|
||||
orgs_data = await organization_service.get_user_organizations_with_details(
|
||||
orgs_data = await organization_crud.get_user_organizations_with_details(
|
||||
db, user_id=current_user.id, is_active=is_active
|
||||
)
|
||||
|
||||
@@ -77,7 +78,7 @@ async def get_my_organizations(
|
||||
return orgs_with_data
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error getting user organizations: %s", e)
|
||||
logger.error(f"Error getting user organizations: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -99,7 +100,13 @@ async def get_organization(
|
||||
User must be a member of the organization.
|
||||
"""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(organization_id))
|
||||
org = await organization_crud.get(db, id=organization_id)
|
||||
if not org: # pragma: no cover - Permission check prevents this (see docs/UNREACHABLE_DEFENSIVE_CODE_ANALYSIS.md)
|
||||
raise NotFoundError(
|
||||
detail=f"Organization {organization_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
org_dict = {
|
||||
"id": org.id,
|
||||
"name": org.name,
|
||||
@@ -109,14 +116,16 @@ async def get_organization(
|
||||
"settings": org.settings,
|
||||
"created_at": org.created_at,
|
||||
"updated_at": org.updated_at,
|
||||
"member_count": await organization_service.get_member_count(
|
||||
"member_count": await organization_crud.get_member_count(
|
||||
db, organization_id=org.id
|
||||
),
|
||||
}
|
||||
return OrganizationResponse(**org_dict)
|
||||
|
||||
except NotFoundError: # pragma: no cover - See above
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error getting organization: %s", e)
|
||||
logger.error(f"Error getting organization: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -140,7 +149,7 @@ async def get_organization_members(
|
||||
User must be a member of the organization to view members.
|
||||
"""
|
||||
try:
|
||||
members, total = await organization_service.get_organization_members(
|
||||
members, total = await organization_crud.get_organization_members(
|
||||
db,
|
||||
organization_id=organization_id,
|
||||
skip=pagination.offset,
|
||||
@@ -160,7 +169,7 @@ async def get_organization_members(
|
||||
return PaginatedResponse(data=member_responses, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error getting organization members: %s", e)
|
||||
logger.error(f"Error getting organization members: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -183,12 +192,16 @@ async def update_organization(
|
||||
Requires owner or admin role in the organization.
|
||||
"""
|
||||
try:
|
||||
org = await organization_service.get_organization(db, str(organization_id))
|
||||
updated_org = await organization_service.update_organization(
|
||||
db, org=org, obj_in=org_in
|
||||
)
|
||||
org = await organization_crud.get(db, id=organization_id)
|
||||
if not org: # pragma: no cover - Permission check prevents this (see docs/UNREACHABLE_DEFENSIVE_CODE_ANALYSIS.md)
|
||||
raise NotFoundError(
|
||||
detail=f"Organization {organization_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
updated_org = await organization_crud.update(db, db_obj=org, obj_in=org_in)
|
||||
logger.info(
|
||||
"User %s updated organization %s", current_user.email, updated_org.name
|
||||
f"User {current_user.email} updated organization {updated_org.name}"
|
||||
)
|
||||
|
||||
org_dict = {
|
||||
@@ -200,12 +213,14 @@ async def update_organization(
|
||||
"settings": updated_org.settings,
|
||||
"created_at": updated_org.created_at,
|
||||
"updated_at": updated_org.updated_at,
|
||||
"member_count": await organization_service.get_member_count(
|
||||
"member_count": await organization_crud.get_member_count(
|
||||
db, organization_id=updated_org.id
|
||||
),
|
||||
}
|
||||
return OrganizationResponse(**org_dict)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error updating organization: %s", e)
|
||||
except NotFoundError: # pragma: no cover - See above
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating organization: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
659
backend/app/api/routes/projects.py
Normal file
659
backend/app/api/routes/projects.py
Normal file
@@ -0,0 +1,659 @@
|
||||
# app/api/routes/projects.py
|
||||
"""
|
||||
Project management API endpoints for Syndarix.
|
||||
|
||||
These endpoints allow users to manage their AI-powered software consulting projects.
|
||||
Users can create, read, update, and manage the lifecycle of their projects.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request, status
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import (
|
||||
AuthorizationError,
|
||||
DuplicateError,
|
||||
ErrorCode,
|
||||
NotFoundError,
|
||||
ValidationException,
|
||||
)
|
||||
from app.crud.syndarix.project import project as project_crud
|
||||
from app.models.syndarix.enums import ProjectStatus
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
PaginatedResponse,
|
||||
PaginationParams,
|
||||
create_pagination_meta,
|
||||
)
|
||||
from app.schemas.syndarix.project import (
|
||||
ProjectCreate,
|
||||
ProjectResponse,
|
||||
ProjectUpdate,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Initialize rate limiter
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
|
||||
# Use higher rate limits in test environment
|
||||
IS_TEST = os.getenv("IS_TEST", "False") == "True"
|
||||
RATE_MULTIPLIER = 100 if IS_TEST else 1
|
||||
|
||||
|
||||
def _build_project_response(project_data: dict[str, Any]) -> ProjectResponse:
|
||||
"""
|
||||
Build a ProjectResponse from project data dictionary.
|
||||
|
||||
Args:
|
||||
project_data: Dictionary containing project and related counts
|
||||
|
||||
Returns:
|
||||
ProjectResponse with all fields populated
|
||||
"""
|
||||
project = project_data["project"]
|
||||
return ProjectResponse(
|
||||
id=project.id,
|
||||
name=project.name,
|
||||
slug=project.slug,
|
||||
description=project.description,
|
||||
autonomy_level=project.autonomy_level,
|
||||
status=project.status,
|
||||
settings=project.settings,
|
||||
owner_id=project.owner_id,
|
||||
created_at=project.created_at,
|
||||
updated_at=project.updated_at,
|
||||
agent_count=project_data.get("agent_count", 0),
|
||||
issue_count=project_data.get("issue_count", 0),
|
||||
active_sprint_name=project_data.get("active_sprint_name"),
|
||||
)
|
||||
|
||||
|
||||
def _check_project_ownership(project: Any, current_user: User) -> None:
|
||||
"""
|
||||
Check if the current user owns the project or is a superuser.
|
||||
|
||||
Args:
|
||||
project: The project to check ownership of
|
||||
current_user: The authenticated user
|
||||
|
||||
Raises:
|
||||
AuthorizationError: If user doesn't own the project and isn't a superuser
|
||||
"""
|
||||
if not current_user.is_superuser and project.owner_id != current_user.id:
|
||||
raise AuthorizationError(
|
||||
message="You do not have permission to access this project",
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Project CRUD Endpoints
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=ProjectResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create Project",
|
||||
description="""
|
||||
Create a new project for the current user.
|
||||
|
||||
The project will be owned by the authenticated user.
|
||||
A unique slug is required for URL-friendly project identification.
|
||||
|
||||
**Rate Limit**: 10 requests/minute
|
||||
""",
|
||||
operation_id="create_project",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def create_project(
|
||||
request: Request,
|
||||
project_in: ProjectCreate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Create a new project.
|
||||
|
||||
The authenticated user becomes the owner of the project.
|
||||
"""
|
||||
try:
|
||||
# Set the owner to the current user
|
||||
project_data = ProjectCreate(
|
||||
name=project_in.name,
|
||||
slug=project_in.slug,
|
||||
description=project_in.description,
|
||||
autonomy_level=project_in.autonomy_level,
|
||||
status=project_in.status,
|
||||
settings=project_in.settings,
|
||||
owner_id=current_user.id,
|
||||
)
|
||||
|
||||
project = await project_crud.create(db, obj_in=project_data)
|
||||
logger.info(f"User {current_user.email} created project {project.slug}")
|
||||
|
||||
return ProjectResponse(
|
||||
id=project.id,
|
||||
name=project.name,
|
||||
slug=project.slug,
|
||||
description=project.description,
|
||||
autonomy_level=project.autonomy_level,
|
||||
status=project.status,
|
||||
settings=project.settings,
|
||||
owner_id=project.owner_id,
|
||||
created_at=project.created_at,
|
||||
updated_at=project.updated_at,
|
||||
agent_count=0,
|
||||
issue_count=0,
|
||||
active_sprint_name=None,
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
error_msg = str(e)
|
||||
if "already exists" in error_msg.lower():
|
||||
logger.warning(f"Duplicate project slug attempted: {project_in.slug}")
|
||||
raise DuplicateError(
|
||||
message=error_msg,
|
||||
error_code=ErrorCode.DUPLICATE_ENTRY,
|
||||
field="slug",
|
||||
)
|
||||
logger.error(f"Error creating project: {error_msg}", exc_info=True)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error creating project: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=PaginatedResponse[ProjectResponse],
|
||||
summary="List Projects",
|
||||
description="""
|
||||
List projects for the current user with filtering and pagination.
|
||||
|
||||
Regular users see only their own projects.
|
||||
Superusers can see all projects by setting `all_projects=true`.
|
||||
|
||||
**Rate Limit**: 30 requests/minute
|
||||
""",
|
||||
operation_id="list_projects",
|
||||
)
|
||||
@limiter.limit(f"{30 * RATE_MULTIPLIER}/minute")
|
||||
async def list_projects(
|
||||
request: Request,
|
||||
pagination: PaginationParams = Depends(),
|
||||
status_filter: ProjectStatus | None = Query(
|
||||
None, alias="status", description="Filter by project status"
|
||||
),
|
||||
search: str | None = Query(
|
||||
None, description="Search by name, slug, or description"
|
||||
),
|
||||
all_projects: bool = Query(False, description="Show all projects (superuser only)"),
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
List projects with filtering, search, and pagination.
|
||||
|
||||
Regular users only see their own projects.
|
||||
Superusers can view all projects if all_projects is true.
|
||||
"""
|
||||
try:
|
||||
# Determine owner filter based on user role and request
|
||||
owner_id = (
|
||||
None if (current_user.is_superuser and all_projects) else current_user.id
|
||||
)
|
||||
|
||||
projects_data, total = await project_crud.get_multi_with_counts(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
status=status_filter,
|
||||
owner_id=owner_id,
|
||||
search=search,
|
||||
)
|
||||
|
||||
# Build response objects
|
||||
project_responses = [_build_project_response(data) for data in projects_data]
|
||||
|
||||
pagination_meta = create_pagination_meta(
|
||||
total=total,
|
||||
page=pagination.page,
|
||||
limit=pagination.limit,
|
||||
items_count=len(project_responses),
|
||||
)
|
||||
|
||||
return PaginatedResponse(data=project_responses, pagination=pagination_meta)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing projects: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{project_id}",
|
||||
response_model=ProjectResponse,
|
||||
summary="Get Project",
|
||||
description="""
|
||||
Get detailed information about a specific project.
|
||||
|
||||
Users can only access their own projects unless they are superusers.
|
||||
|
||||
**Rate Limit**: 60 requests/minute
|
||||
""",
|
||||
operation_id="get_project",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_project(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about a project by ID.
|
||||
|
||||
Includes agent count, issue count, and active sprint name.
|
||||
"""
|
||||
try:
|
||||
project_data = await project_crud.get_with_counts(db, project_id=project_id)
|
||||
|
||||
if not project_data:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
project = project_data["project"]
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
return _build_project_response(project_data)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.get(
|
||||
"/slug/{slug}",
|
||||
response_model=ProjectResponse,
|
||||
summary="Get Project by Slug",
|
||||
description="""
|
||||
Get detailed information about a project by its slug.
|
||||
|
||||
Users can only access their own projects unless they are superusers.
|
||||
|
||||
**Rate Limit**: 60 requests/minute
|
||||
""",
|
||||
operation_id="get_project_by_slug",
|
||||
)
|
||||
@limiter.limit(f"{60 * RATE_MULTIPLIER}/minute")
|
||||
async def get_project_by_slug(
|
||||
request: Request,
|
||||
slug: str,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Get detailed information about a project by slug.
|
||||
|
||||
Includes agent count, issue count, and active sprint name.
|
||||
"""
|
||||
try:
|
||||
project = await project_crud.get_by_slug(db, slug=slug)
|
||||
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project with slug '{slug}' not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
# Get project with counts
|
||||
project_data = await project_crud.get_with_counts(db, project_id=project.id)
|
||||
|
||||
if not project_data:
|
||||
raise NotFoundError(
|
||||
message=f"Project with slug '{slug}' not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_project_response(project_data)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project by slug {slug}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/{project_id}",
|
||||
response_model=ProjectResponse,
|
||||
summary="Update Project",
|
||||
description="""
|
||||
Update an existing project.
|
||||
|
||||
Only the project owner or a superuser can update a project.
|
||||
Only provided fields will be updated.
|
||||
|
||||
**Rate Limit**: 20 requests/minute
|
||||
""",
|
||||
operation_id="update_project",
|
||||
)
|
||||
@limiter.limit(f"{20 * RATE_MULTIPLIER}/minute")
|
||||
async def update_project(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
project_in: ProjectUpdate,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Update a project's information.
|
||||
|
||||
Only the project owner or superusers can perform updates.
|
||||
"""
|
||||
try:
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
# Update the project
|
||||
updated_project = await project_crud.update(
|
||||
db, db_obj=project, obj_in=project_in
|
||||
)
|
||||
logger.info(f"User {current_user.email} updated project {updated_project.slug}")
|
||||
|
||||
# Get updated project with counts
|
||||
project_data = await project_crud.get_with_counts(
|
||||
db, project_id=updated_project.id
|
||||
)
|
||||
|
||||
if not project_data:
|
||||
# This shouldn't happen, but handle gracefully
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found after update",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_project_response(project_data)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except ValueError as e:
|
||||
error_msg = str(e)
|
||||
if "already exists" in error_msg.lower():
|
||||
logger.warning(f"Duplicate project slug attempted: {project_in.slug}")
|
||||
raise DuplicateError(
|
||||
message=error_msg,
|
||||
error_code=ErrorCode.DUPLICATE_ENTRY,
|
||||
field="slug",
|
||||
)
|
||||
logger.error(f"Error updating project: {error_msg}", exc_info=True)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{project_id}",
|
||||
response_model=MessageResponse,
|
||||
summary="Archive Project",
|
||||
description="""
|
||||
Archive a project (soft delete).
|
||||
|
||||
Only the project owner or a superuser can archive a project.
|
||||
Archived projects are not deleted but are no longer accessible for active work.
|
||||
|
||||
**Rate Limit**: 10 requests/minute
|
||||
""",
|
||||
operation_id="archive_project",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def archive_project(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Archive a project by setting its status to ARCHIVED.
|
||||
|
||||
This is a soft delete operation. The project data is preserved.
|
||||
"""
|
||||
try:
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
# Check if project is already archived
|
||||
if project.status == ProjectStatus.ARCHIVED:
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Project '{project.name}' is already archived",
|
||||
)
|
||||
|
||||
archived_project = await project_crud.archive_project(db, project_id=project_id)
|
||||
|
||||
if not archived_project:
|
||||
raise NotFoundError(
|
||||
message=f"Failed to archive project {project_id}",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
logger.info(f"User {current_user.email} archived project {project.slug}")
|
||||
|
||||
return MessageResponse(
|
||||
success=True,
|
||||
message=f"Project '{archived_project.name}' has been archived",
|
||||
)
|
||||
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error archiving project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Project Lifecycle Endpoints
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{project_id}/pause",
|
||||
response_model=ProjectResponse,
|
||||
summary="Pause Project",
|
||||
description="""
|
||||
Pause an active project.
|
||||
|
||||
Only ACTIVE projects can be paused.
|
||||
Only the project owner or a superuser can pause a project.
|
||||
|
||||
**Rate Limit**: 10 requests/minute
|
||||
""",
|
||||
operation_id="pause_project",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def pause_project(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Pause an active project.
|
||||
|
||||
Sets the project status to PAUSED. Only ACTIVE projects can be paused.
|
||||
"""
|
||||
try:
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
# Validate current status (business logic validation, not authorization)
|
||||
if project.status == ProjectStatus.PAUSED:
|
||||
raise ValidationException(
|
||||
message="Project is already paused",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
if project.status == ProjectStatus.ARCHIVED:
|
||||
raise ValidationException(
|
||||
message="Cannot pause an archived project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
if project.status == ProjectStatus.COMPLETED:
|
||||
raise ValidationException(
|
||||
message="Cannot pause a completed project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
# Update status to PAUSED
|
||||
updated_project = await project_crud.update(
|
||||
db, db_obj=project, obj_in=ProjectUpdate(status=ProjectStatus.PAUSED)
|
||||
)
|
||||
logger.info(f"User {current_user.email} paused project {project.slug}")
|
||||
|
||||
# Get project with counts
|
||||
project_data = await project_crud.get_with_counts(
|
||||
db, project_id=updated_project.id
|
||||
)
|
||||
|
||||
if not project_data:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found after update",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_project_response(project_data)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error pausing project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{project_id}/resume",
|
||||
response_model=ProjectResponse,
|
||||
summary="Resume Project",
|
||||
description="""
|
||||
Resume a paused project.
|
||||
|
||||
Only PAUSED projects can be resumed.
|
||||
Only the project owner or a superuser can resume a project.
|
||||
|
||||
**Rate Limit**: 10 requests/minute
|
||||
""",
|
||||
operation_id="resume_project",
|
||||
)
|
||||
@limiter.limit(f"{10 * RATE_MULTIPLIER}/minute")
|
||||
async def resume_project(
|
||||
request: Request,
|
||||
project_id: UUID,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> Any:
|
||||
"""
|
||||
Resume a paused project.
|
||||
|
||||
Sets the project status back to ACTIVE. Only PAUSED projects can be resumed.
|
||||
"""
|
||||
try:
|
||||
project = await project_crud.get(db, id=project_id)
|
||||
|
||||
if not project:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
_check_project_ownership(project, current_user)
|
||||
|
||||
# Validate current status (business logic validation, not authorization)
|
||||
if project.status == ProjectStatus.ACTIVE:
|
||||
raise ValidationException(
|
||||
message="Project is already active",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
if project.status == ProjectStatus.ARCHIVED:
|
||||
raise ValidationException(
|
||||
message="Cannot resume an archived project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
if project.status == ProjectStatus.COMPLETED:
|
||||
raise ValidationException(
|
||||
message="Cannot resume a completed project",
|
||||
error_code=ErrorCode.VALIDATION_ERROR,
|
||||
field="status",
|
||||
)
|
||||
|
||||
# Update status to ACTIVE
|
||||
updated_project = await project_crud.update(
|
||||
db, db_obj=project, obj_in=ProjectUpdate(status=ProjectStatus.ACTIVE)
|
||||
)
|
||||
logger.info(f"User {current_user.email} resumed project {project.slug}")
|
||||
|
||||
# Get project with counts
|
||||
project_data = await project_crud.get_with_counts(
|
||||
db, project_id=updated_project.id
|
||||
)
|
||||
|
||||
if not project_data:
|
||||
raise NotFoundError(
|
||||
message=f"Project {project_id} not found after update",
|
||||
error_code=ErrorCode.NOT_FOUND,
|
||||
)
|
||||
|
||||
return _build_project_response(project_data)
|
||||
|
||||
except (NotFoundError, AuthorizationError, ValidationException):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error resuming project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
@@ -17,10 +17,10 @@ from app.api.dependencies.auth import get_current_user
|
||||
from app.core.auth import decode_token
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import AuthorizationError, ErrorCode, NotFoundError
|
||||
from app.crud.session import session as session_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.common import MessageResponse
|
||||
from app.schemas.sessions import SessionListResponse, SessionResponse
|
||||
from app.services.session_service import session_service
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -60,7 +60,7 @@ async def list_my_sessions(
|
||||
"""
|
||||
try:
|
||||
# Get all active sessions for user
|
||||
sessions = await session_service.get_user_sessions(
|
||||
sessions = await session_crud.get_user_sessions(
|
||||
db, user_id=str(current_user.id), active_only=True
|
||||
)
|
||||
|
||||
@@ -74,7 +74,9 @@ async def list_my_sessions(
|
||||
# For now, we'll mark current based on most recent activity
|
||||
except Exception as e:
|
||||
# Optional token parsing - silently ignore failures
|
||||
logger.debug("Failed to decode access token for session marking: %s", e)
|
||||
logger.debug(
|
||||
f"Failed to decode access token for session marking: {e!s}"
|
||||
)
|
||||
|
||||
# Convert to response format
|
||||
session_responses = []
|
||||
@@ -96,7 +98,7 @@ async def list_my_sessions(
|
||||
session_responses.append(session_response)
|
||||
|
||||
logger.info(
|
||||
"User %s listed %s active sessions", current_user.id, len(session_responses)
|
||||
f"User {current_user.id} listed {len(session_responses)} active sessions"
|
||||
)
|
||||
|
||||
return SessionListResponse(
|
||||
@@ -104,7 +106,9 @@ async def list_my_sessions(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error listing sessions for user %s: %s", current_user.id, e)
|
||||
logger.error(
|
||||
f"Error listing sessions for user {current_user.id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve sessions",
|
||||
@@ -146,7 +150,7 @@ async def revoke_session(
|
||||
"""
|
||||
try:
|
||||
# Get the session
|
||||
session = await session_service.get_session(db, str(session_id))
|
||||
session = await session_crud.get(db, id=str(session_id))
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(
|
||||
@@ -157,10 +161,8 @@ async def revoke_session(
|
||||
# Verify session belongs to current user
|
||||
if str(session.user_id) != str(current_user.id):
|
||||
logger.warning(
|
||||
"User %s attempted to revoke session %s belonging to user %s",
|
||||
current_user.id,
|
||||
session_id,
|
||||
session.user_id,
|
||||
f"User {current_user.id} attempted to revoke session {session_id} "
|
||||
f"belonging to user {session.user_id}"
|
||||
)
|
||||
raise AuthorizationError(
|
||||
message="You can only revoke your own sessions",
|
||||
@@ -168,13 +170,11 @@ async def revoke_session(
|
||||
)
|
||||
|
||||
# Deactivate the session
|
||||
await session_service.deactivate(db, session_id=str(session_id))
|
||||
await session_crud.deactivate(db, session_id=str(session_id))
|
||||
|
||||
logger.info(
|
||||
"User %s revoked session %s (%s)",
|
||||
current_user.id,
|
||||
session_id,
|
||||
session.device_name,
|
||||
f"User {current_user.id} revoked session {session_id} "
|
||||
f"({session.device_name})"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
@@ -185,7 +185,7 @@ async def revoke_session(
|
||||
except (NotFoundError, AuthorizationError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Error revoking session %s: %s", session_id, e)
|
||||
logger.error(f"Error revoking session {session_id}: {e!s}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to revoke session",
|
||||
@@ -224,12 +224,12 @@ async def cleanup_expired_sessions(
|
||||
"""
|
||||
try:
|
||||
# Use optimized bulk DELETE instead of N individual deletes
|
||||
deleted_count = await session_service.cleanup_expired_for_user(
|
||||
deleted_count = await session_crud.cleanup_expired_for_user(
|
||||
db, user_id=str(current_user.id)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"User %s cleaned up %s expired sessions", current_user.id, deleted_count
|
||||
f"User {current_user.id} cleaned up {deleted_count} expired sessions"
|
||||
)
|
||||
|
||||
return MessageResponse(
|
||||
@@ -237,8 +237,9 @@ async def cleanup_expired_sessions(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Error cleaning up sessions for user %s: %s", current_user.id, e
|
||||
logger.error(
|
||||
f"Error cleaning up sessions for user {current_user.id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
await db.rollback()
|
||||
raise HTTPException(
|
||||
|
||||
1186
backend/app/api/routes/sprints.py
Normal file
1186
backend/app/api/routes/sprints.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
User management endpoints for database operations.
|
||||
User management endpoints for CRUD operations.
|
||||
"""
|
||||
|
||||
import logging
|
||||
@@ -13,7 +13,8 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.api.dependencies.auth import get_current_superuser, get_current_user
|
||||
from app.core.database import get_db
|
||||
from app.core.exceptions import AuthorizationError, ErrorCode
|
||||
from app.core.exceptions import AuthorizationError, ErrorCode, NotFoundError
|
||||
from app.crud.user import user as user_crud
|
||||
from app.models.user import User
|
||||
from app.schemas.common import (
|
||||
MessageResponse,
|
||||
@@ -24,7 +25,6 @@ from app.schemas.common import (
|
||||
)
|
||||
from app.schemas.users import PasswordChange, UserResponse, UserUpdate
|
||||
from app.services.auth_service import AuthenticationError, AuthService
|
||||
from app.services.user_service import user_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -71,7 +71,7 @@ async def list_users(
|
||||
filters["is_superuser"] = is_superuser
|
||||
|
||||
# Get paginated users with total count
|
||||
users, total = await user_service.list_users(
|
||||
users, total = await user_crud.get_multi_with_total(
|
||||
db,
|
||||
skip=pagination.offset,
|
||||
limit=pagination.limit,
|
||||
@@ -90,7 +90,7 @@ async def list_users(
|
||||
|
||||
return PaginatedResponse(data=users, pagination=pagination_meta)
|
||||
except Exception as e:
|
||||
logger.exception("Error listing users: %s", e)
|
||||
logger.error(f"Error listing users: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -107,9 +107,7 @@ async def list_users(
|
||||
""",
|
||||
operation_id="get_current_user_profile",
|
||||
)
|
||||
async def get_current_user_profile(
|
||||
current_user: User = Depends(get_current_user),
|
||||
) -> Any:
|
||||
def get_current_user_profile(current_user: User = Depends(get_current_user)) -> Any:
|
||||
"""Get current user's profile."""
|
||||
return current_user
|
||||
|
||||
@@ -140,16 +138,18 @@ async def update_current_user(
|
||||
Users cannot elevate their own permissions (protected by UserUpdate schema validator).
|
||||
"""
|
||||
try:
|
||||
updated_user = await user_service.update_user(
|
||||
db, user=current_user, obj_in=user_update
|
||||
updated_user = await user_crud.update(
|
||||
db, db_obj=current_user, obj_in=user_update
|
||||
)
|
||||
logger.info("User %s updated their profile", current_user.id)
|
||||
logger.info(f"User {current_user.id} updated their profile")
|
||||
return updated_user
|
||||
except ValueError as e:
|
||||
logger.error("Error updating user %s: %s", current_user.id, e)
|
||||
logger.error(f"Error updating user {current_user.id}: {e!s}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Unexpected error updating user %s: %s", current_user.id, e)
|
||||
logger.error(
|
||||
f"Unexpected error updating user {current_user.id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -182,9 +182,7 @@ async def get_user_by_id(
|
||||
# Check permissions
|
||||
if str(user_id) != str(current_user.id) and not current_user.is_superuser:
|
||||
logger.warning(
|
||||
"User %s attempted to access user %s without permission",
|
||||
current_user.id,
|
||||
user_id,
|
||||
f"User {current_user.id} attempted to access user {user_id} without permission"
|
||||
)
|
||||
raise AuthorizationError(
|
||||
message="Not enough permissions to view this user",
|
||||
@@ -192,7 +190,13 @@ async def get_user_by_id(
|
||||
)
|
||||
|
||||
# Get user
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
user = await user_crud.get(db, id=str(user_id))
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User with id {user_id} not found",
|
||||
error_code=ErrorCode.USER_NOT_FOUND,
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
@@ -229,9 +233,7 @@ async def update_user(
|
||||
|
||||
if not is_own_profile and not current_user.is_superuser:
|
||||
logger.warning(
|
||||
"User %s attempted to update user %s without permission",
|
||||
current_user.id,
|
||||
user_id,
|
||||
f"User {current_user.id} attempted to update user {user_id} without permission"
|
||||
)
|
||||
raise AuthorizationError(
|
||||
message="Not enough permissions to update this user",
|
||||
@@ -239,17 +241,22 @@ async def update_user(
|
||||
)
|
||||
|
||||
# Get user
|
||||
user = await user_service.get_user(db, str(user_id))
|
||||
user = await user_crud.get(db, id=str(user_id))
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User with id {user_id} not found",
|
||||
error_code=ErrorCode.USER_NOT_FOUND,
|
||||
)
|
||||
|
||||
try:
|
||||
updated_user = await user_service.update_user(db, user=user, obj_in=user_update)
|
||||
logger.info("User %s updated by %s", user_id, current_user.id)
|
||||
updated_user = await user_crud.update(db, db_obj=user, obj_in=user_update)
|
||||
logger.info(f"User {user_id} updated by {current_user.id}")
|
||||
return updated_user
|
||||
except ValueError as e:
|
||||
logger.error("Error updating user %s: %s", user_id, e)
|
||||
logger.error(f"Error updating user {user_id}: {e!s}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Unexpected error updating user %s: %s", user_id, e)
|
||||
logger.error(f"Unexpected error updating user {user_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -289,19 +296,19 @@ async def change_current_user_password(
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info("User %s changed their password", current_user.id)
|
||||
logger.info(f"User {current_user.id} changed their password")
|
||||
return MessageResponse(
|
||||
success=True, message="Password changed successfully"
|
||||
)
|
||||
except AuthenticationError as e:
|
||||
logger.warning(
|
||||
"Failed password change attempt for user %s: %s", current_user.id, e
|
||||
f"Failed password change attempt for user {current_user.id}: {e!s}"
|
||||
)
|
||||
raise AuthorizationError(
|
||||
message=str(e), error_code=ErrorCode.INVALID_CREDENTIALS
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error changing password for user %s: %s", current_user.id, e)
|
||||
logger.error(f"Error changing password for user {current_user.id}: {e!s}")
|
||||
raise
|
||||
|
||||
|
||||
@@ -339,19 +346,24 @@ async def delete_user(
|
||||
error_code=ErrorCode.INSUFFICIENT_PERMISSIONS,
|
||||
)
|
||||
|
||||
# Get user (raises NotFoundError if not found)
|
||||
await user_service.get_user(db, str(user_id))
|
||||
# Get user
|
||||
user = await user_crud.get(db, id=str(user_id))
|
||||
if not user:
|
||||
raise NotFoundError(
|
||||
message=f"User with id {user_id} not found",
|
||||
error_code=ErrorCode.USER_NOT_FOUND,
|
||||
)
|
||||
|
||||
try:
|
||||
# Use soft delete instead of hard delete
|
||||
await user_service.soft_delete_user(db, str(user_id))
|
||||
logger.info("User %s soft-deleted by %s", user_id, current_user.id)
|
||||
await user_crud.soft_delete(db, id=str(user_id))
|
||||
logger.info(f"User {user_id} soft-deleted by {current_user.id}")
|
||||
return MessageResponse(
|
||||
success=True, message=f"User {user_id} deleted successfully"
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error("Error deleting user %s: %s", user_id, e)
|
||||
logger.error(f"Error deleting user {user_id}: {e!s}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Unexpected error deleting user %s: %s", user_id, e)
|
||||
logger.error(f"Unexpected error deleting user {user_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
116
backend/app/celery_app.py
Normal file
116
backend/app/celery_app.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# app/celery_app.py
|
||||
"""
|
||||
Celery application configuration for Syndarix.
|
||||
|
||||
This module configures the Celery app for background task processing:
|
||||
- Agent execution tasks (LLM calls, tool execution)
|
||||
- Git operations (clone, commit, push, PR creation)
|
||||
- Issue synchronization with external trackers
|
||||
- Workflow state management
|
||||
- Cost tracking and budget monitoring
|
||||
|
||||
Architecture:
|
||||
- Redis as message broker and result backend
|
||||
- Queue routing for task isolation
|
||||
- JSON serialization for cross-language compatibility
|
||||
- Beat scheduler for periodic tasks
|
||||
"""
|
||||
|
||||
from celery import Celery
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
# Create Celery application instance
|
||||
celery_app = Celery(
|
||||
"syndarix",
|
||||
broker=settings.celery_broker_url,
|
||||
backend=settings.celery_result_backend,
|
||||
)
|
||||
|
||||
# Define task queues with their own exchanges and routing keys
|
||||
TASK_QUEUES = {
|
||||
"agent": {"exchange": "agent", "routing_key": "agent"},
|
||||
"git": {"exchange": "git", "routing_key": "git"},
|
||||
"sync": {"exchange": "sync", "routing_key": "sync"},
|
||||
"default": {"exchange": "default", "routing_key": "default"},
|
||||
}
|
||||
|
||||
# Configure Celery
|
||||
celery_app.conf.update(
|
||||
# Serialization
|
||||
task_serializer="json",
|
||||
accept_content=["json"],
|
||||
result_serializer="json",
|
||||
# Timezone
|
||||
timezone="UTC",
|
||||
enable_utc=True,
|
||||
# Task imports for auto-discovery
|
||||
imports=("app.tasks",),
|
||||
# Default queue
|
||||
task_default_queue="default",
|
||||
# Task queues configuration
|
||||
task_queues=TASK_QUEUES,
|
||||
# Task routing - route tasks to appropriate queues
|
||||
task_routes={
|
||||
"app.tasks.agent.*": {"queue": "agent"},
|
||||
"app.tasks.git.*": {"queue": "git"},
|
||||
"app.tasks.sync.*": {"queue": "sync"},
|
||||
"app.tasks.*": {"queue": "default"},
|
||||
},
|
||||
# Time limits per ADR-003
|
||||
task_soft_time_limit=300, # 5 minutes soft limit
|
||||
task_time_limit=600, # 10 minutes hard limit
|
||||
# Result expiration - 24 hours
|
||||
result_expires=86400,
|
||||
# Broker connection retry
|
||||
broker_connection_retry_on_startup=True,
|
||||
# Retry configuration per ADR-003 (built-in retry with backoff)
|
||||
task_autoretry_for=(Exception,), # Retry on all exceptions
|
||||
task_retry_kwargs={"max_retries": 3, "countdown": 5}, # Initial 5s delay
|
||||
task_retry_backoff=True, # Enable exponential backoff
|
||||
task_retry_backoff_max=600, # Max 10 minutes between retries
|
||||
task_retry_jitter=True, # Add jitter to prevent thundering herd
|
||||
# Beat schedule for periodic tasks
|
||||
beat_schedule={
|
||||
# Cost aggregation every hour per ADR-012
|
||||
"aggregate-daily-costs": {
|
||||
"task": "app.tasks.cost.aggregate_daily_costs",
|
||||
"schedule": 3600.0, # 1 hour in seconds
|
||||
},
|
||||
# Reset daily budget counters at midnight UTC
|
||||
"reset-daily-budget-counters": {
|
||||
"task": "app.tasks.cost.reset_daily_budget_counters",
|
||||
"schedule": 86400.0, # 24 hours in seconds
|
||||
},
|
||||
# Check for stale workflows every 5 minutes
|
||||
"recover-stale-workflows": {
|
||||
"task": "app.tasks.workflow.recover_stale_workflows",
|
||||
"schedule": 300.0, # 5 minutes in seconds
|
||||
},
|
||||
# Incremental issue sync every minute per ADR-011
|
||||
"sync-issues-incremental": {
|
||||
"task": "app.tasks.sync.sync_issues_incremental",
|
||||
"schedule": 60.0, # 1 minute in seconds
|
||||
},
|
||||
# Full issue reconciliation every 15 minutes per ADR-011
|
||||
"sync-issues-full": {
|
||||
"task": "app.tasks.sync.sync_issues_full",
|
||||
"schedule": 900.0, # 15 minutes in seconds
|
||||
},
|
||||
},
|
||||
# Task execution settings
|
||||
task_acks_late=True, # Acknowledge tasks after execution
|
||||
task_reject_on_worker_lost=True, # Reject tasks if worker dies
|
||||
worker_prefetch_multiplier=1, # Fair task distribution
|
||||
)
|
||||
|
||||
# Auto-discover tasks from task modules
|
||||
celery_app.autodiscover_tasks(
|
||||
[
|
||||
"app.tasks.agent",
|
||||
"app.tasks.git",
|
||||
"app.tasks.sync",
|
||||
"app.tasks.workflow",
|
||||
"app.tasks.cost",
|
||||
]
|
||||
)
|
||||
@@ -1,21 +1,23 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
import bcrypt
|
||||
import jwt
|
||||
from jwt.exceptions import (
|
||||
ExpiredSignatureError,
|
||||
InvalidTokenError,
|
||||
MissingRequiredClaimError,
|
||||
)
|
||||
from jose import JWTError, jwt
|
||||
from passlib.context import CryptContext
|
||||
from pydantic import ValidationError
|
||||
|
||||
from app.core.config import settings
|
||||
from app.schemas.users import TokenData, TokenPayload
|
||||
|
||||
# Suppress passlib bcrypt warnings about ident
|
||||
logging.getLogger("passlib").setLevel(logging.ERROR)
|
||||
|
||||
# Password hashing context
|
||||
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
||||
|
||||
|
||||
# Custom exceptions for auth
|
||||
class AuthError(Exception):
|
||||
@@ -35,16 +37,13 @@ class TokenMissingClaimError(AuthError):
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
"""Verify a password against a bcrypt hash."""
|
||||
return bcrypt.checkpw(
|
||||
plain_password.encode("utf-8"), hashed_password.encode("utf-8")
|
||||
)
|
||||
"""Verify a password against a hash."""
|
||||
return pwd_context.verify(plain_password, hashed_password)
|
||||
|
||||
|
||||
def get_password_hash(password: str) -> str:
|
||||
"""Generate a bcrypt password hash."""
|
||||
salt = bcrypt.gensalt()
|
||||
return bcrypt.hashpw(password.encode("utf-8"), salt).decode("utf-8")
|
||||
"""Generate a password hash."""
|
||||
return pwd_context.hash(password)
|
||||
|
||||
|
||||
async def verify_password_async(plain_password: str, hashed_password: str) -> bool:
|
||||
@@ -61,9 +60,9 @@ async def verify_password_async(plain_password: str, hashed_password: str) -> bo
|
||||
Returns:
|
||||
True if password matches, False otherwise
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None, partial(verify_password, plain_password, hashed_password)
|
||||
None, partial(pwd_context.verify, plain_password, hashed_password)
|
||||
)
|
||||
|
||||
|
||||
@@ -81,8 +80,8 @@ async def get_password_hash_async(password: str) -> str:
|
||||
Returns:
|
||||
Hashed password string
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(None, get_password_hash, password)
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, pwd_context.hash, password)
|
||||
|
||||
|
||||
def create_access_token(
|
||||
@@ -122,7 +121,11 @@ def create_access_token(
|
||||
to_encode.update(claims)
|
||||
|
||||
# Create the JWT
|
||||
return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM)
|
||||
encoded_jwt = jwt.encode(
|
||||
to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM
|
||||
)
|
||||
|
||||
return encoded_jwt
|
||||
|
||||
|
||||
def create_refresh_token(
|
||||
@@ -151,7 +154,11 @@ def create_refresh_token(
|
||||
"type": "refresh",
|
||||
}
|
||||
|
||||
return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM)
|
||||
encoded_jwt = jwt.encode(
|
||||
to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM
|
||||
)
|
||||
|
||||
return encoded_jwt
|
||||
|
||||
|
||||
def decode_token(token: str, verify_type: str | None = None) -> TokenPayload:
|
||||
@@ -191,7 +198,7 @@ def decode_token(token: str, verify_type: str | None = None) -> TokenPayload:
|
||||
|
||||
# Reject weak or unexpected algorithms
|
||||
# NOTE: These are defensive checks that provide defense-in-depth.
|
||||
# PyJWT rejects these tokens BEFORE we reach here,
|
||||
# The python-jose library rejects these tokens BEFORE we reach here,
|
||||
# but we keep these checks in case the library changes or is misconfigured.
|
||||
# Coverage: Marked as pragma since library catches first (see tests/core/test_auth_security.py)
|
||||
if token_algorithm == "NONE": # pragma: no cover
|
||||
@@ -212,11 +219,10 @@ def decode_token(token: str, verify_type: str | None = None) -> TokenPayload:
|
||||
token_data = TokenPayload(**payload)
|
||||
return token_data
|
||||
|
||||
except ExpiredSignatureError:
|
||||
raise TokenExpiredError("Token has expired")
|
||||
except MissingRequiredClaimError as e:
|
||||
raise TokenMissingClaimError(f"Token missing required claim: {e}")
|
||||
except InvalidTokenError:
|
||||
except JWTError as e:
|
||||
# Check if the error is due to an expired token
|
||||
if "expired" in str(e).lower():
|
||||
raise TokenExpiredError("Token has expired")
|
||||
raise TokenInvalidError("Invalid authentication token")
|
||||
except ValidationError:
|
||||
raise TokenInvalidError("Invalid token payload")
|
||||
|
||||
@@ -5,7 +5,7 @@ from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
PROJECT_NAME: str = "PragmaStack"
|
||||
PROJECT_NAME: str = "Syndarix"
|
||||
VERSION: str = "1.0.0"
|
||||
API_V1_STR: str = "/api/v1"
|
||||
|
||||
@@ -39,6 +39,32 @@ class Settings(BaseSettings):
|
||||
db_pool_timeout: int = 30 # Seconds to wait for a connection
|
||||
db_pool_recycle: int = 3600 # Recycle connections after 1 hour
|
||||
|
||||
# Redis configuration (Syndarix: cache, pub/sub, Celery broker)
|
||||
REDIS_URL: str = Field(
|
||||
default="redis://localhost:6379/0",
|
||||
description="Redis URL for cache, pub/sub, and Celery broker",
|
||||
)
|
||||
|
||||
# Celery configuration (Syndarix: background task processing)
|
||||
CELERY_BROKER_URL: str | None = Field(
|
||||
default=None,
|
||||
description="Celery broker URL (defaults to REDIS_URL if not set)",
|
||||
)
|
||||
CELERY_RESULT_BACKEND: str | None = Field(
|
||||
default=None,
|
||||
description="Celery result backend URL (defaults to REDIS_URL if not set)",
|
||||
)
|
||||
|
||||
@property
|
||||
def celery_broker_url(self) -> str:
|
||||
"""Get Celery broker URL, defaulting to Redis."""
|
||||
return self.CELERY_BROKER_URL or self.REDIS_URL
|
||||
|
||||
@property
|
||||
def celery_result_backend(self) -> str:
|
||||
"""Get Celery result backend URL, defaulting to Redis."""
|
||||
return self.CELERY_RESULT_BACKEND or self.REDIS_URL
|
||||
|
||||
# SQL debugging (disable in production)
|
||||
sql_echo: bool = False # Log SQL statements
|
||||
sql_echo_pool: bool = False # Log connection pool events
|
||||
|
||||
@@ -128,8 +128,8 @@ async def async_transaction_scope() -> AsyncGenerator[AsyncSession, None]:
|
||||
|
||||
Usage:
|
||||
async with async_transaction_scope() as db:
|
||||
user = await user_repo.create(db, obj_in=user_create)
|
||||
profile = await profile_repo.create(db, obj_in=profile_create)
|
||||
user = await user_crud.create(db, obj_in=user_create)
|
||||
profile = await profile_crud.create(db, obj_in=profile_create)
|
||||
# Both operations committed together
|
||||
"""
|
||||
async with SessionLocal() as session:
|
||||
@@ -139,7 +139,7 @@ async def async_transaction_scope() -> AsyncGenerator[AsyncSession, None]:
|
||||
logger.debug("Async transaction committed successfully")
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
logger.error("Async transaction failed, rolling back: %s", e)
|
||||
logger.error(f"Async transaction failed, rolling back: {e!s}")
|
||||
raise
|
||||
finally:
|
||||
await session.close()
|
||||
@@ -155,7 +155,7 @@ async def check_async_database_health() -> bool:
|
||||
await db.execute(text("SELECT 1"))
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Async database health check failed: %s", e)
|
||||
logger.error(f"Async database health check failed: {e!s}")
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -1,366 +0,0 @@
|
||||
{
|
||||
"organizations": [
|
||||
{
|
||||
"name": "Acme Corp",
|
||||
"slug": "acme-corp",
|
||||
"description": "A leading provider of coyote-catching equipment."
|
||||
},
|
||||
{
|
||||
"name": "Globex Corporation",
|
||||
"slug": "globex",
|
||||
"description": "We own the East Coast."
|
||||
},
|
||||
{
|
||||
"name": "Soylent Corp",
|
||||
"slug": "soylent",
|
||||
"description": "Making food for the future."
|
||||
},
|
||||
{
|
||||
"name": "Initech",
|
||||
"slug": "initech",
|
||||
"description": "Software for the soul."
|
||||
},
|
||||
{
|
||||
"name": "Umbrella Corporation",
|
||||
"slug": "umbrella",
|
||||
"description": "Our business is life itself."
|
||||
},
|
||||
{
|
||||
"name": "Massive Dynamic",
|
||||
"slug": "massive-dynamic",
|
||||
"description": "What don't we do?"
|
||||
}
|
||||
],
|
||||
"users": [
|
||||
{
|
||||
"email": "demo@example.com",
|
||||
"password": "DemoPass1234!",
|
||||
"first_name": "Demo",
|
||||
"last_name": "User",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "acme-corp",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "alice@acme.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Alice",
|
||||
"last_name": "Smith",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "acme-corp",
|
||||
"role": "admin",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "bob@acme.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Bob",
|
||||
"last_name": "Jones",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "acme-corp",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "charlie@acme.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Charlie",
|
||||
"last_name": "Brown",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "acme-corp",
|
||||
"role": "member",
|
||||
"is_active": false
|
||||
},
|
||||
{
|
||||
"email": "diana@acme.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Diana",
|
||||
"last_name": "Prince",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "acme-corp",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "carol@globex.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Carol",
|
||||
"last_name": "Williams",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "globex",
|
||||
"role": "owner",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "dan@globex.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Dan",
|
||||
"last_name": "Miller",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "globex",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "ellen@globex.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Ellen",
|
||||
"last_name": "Ripley",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "globex",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "fred@globex.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Fred",
|
||||
"last_name": "Flintstone",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "globex",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "dave@soylent.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Dave",
|
||||
"last_name": "Brown",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "soylent",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "gina@soylent.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Gina",
|
||||
"last_name": "Torres",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "soylent",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "harry@soylent.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Harry",
|
||||
"last_name": "Potter",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "soylent",
|
||||
"role": "admin",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "eve@initech.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Eve",
|
||||
"last_name": "Davis",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "initech",
|
||||
"role": "admin",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "iris@initech.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Iris",
|
||||
"last_name": "West",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "initech",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "jack@initech.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Jack",
|
||||
"last_name": "Sparrow",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "initech",
|
||||
"role": "member",
|
||||
"is_active": false
|
||||
},
|
||||
{
|
||||
"email": "frank@umbrella.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Frank",
|
||||
"last_name": "Miller",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "umbrella",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "george@umbrella.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "George",
|
||||
"last_name": "Costanza",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "umbrella",
|
||||
"role": "member",
|
||||
"is_active": false
|
||||
},
|
||||
{
|
||||
"email": "kate@umbrella.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Kate",
|
||||
"last_name": "Bishop",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "umbrella",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "leo@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Leo",
|
||||
"last_name": "Messi",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "owner",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "mary@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Mary",
|
||||
"last_name": "Jane",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "nathan@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Nathan",
|
||||
"last_name": "Drake",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "olivia@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Olivia",
|
||||
"last_name": "Dunham",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "admin",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "peter@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Peter",
|
||||
"last_name": "Parker",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "quinn@massive.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Quinn",
|
||||
"last_name": "Mallory",
|
||||
"is_superuser": false,
|
||||
"organization_slug": "massive-dynamic",
|
||||
"role": "member",
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "grace@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Grace",
|
||||
"last_name": "Hopper",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "heidi@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Heidi",
|
||||
"last_name": "Klum",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "ivan@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Ivan",
|
||||
"last_name": "Drago",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": false
|
||||
},
|
||||
{
|
||||
"email": "rachel@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Rachel",
|
||||
"last_name": "Green",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "sam@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Sam",
|
||||
"last_name": "Wilson",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "tony@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Tony",
|
||||
"last_name": "Stark",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "una@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Una",
|
||||
"last_name": "Chin-Riley",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": false
|
||||
},
|
||||
{
|
||||
"email": "victor@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Victor",
|
||||
"last_name": "Von Doom",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
},
|
||||
{
|
||||
"email": "wanda@example.com",
|
||||
"password": "Demo123!",
|
||||
"first_name": "Wanda",
|
||||
"last_name": "Maximoff",
|
||||
"is_superuser": false,
|
||||
"organization_slug": null,
|
||||
"role": null,
|
||||
"is_active": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -143,11 +143,8 @@ async def api_exception_handler(request: Request, exc: APIException) -> JSONResp
|
||||
Returns a standardized error response with error code and message.
|
||||
"""
|
||||
logger.warning(
|
||||
"API exception: %s - %s (status: %s, path: %s)",
|
||||
exc.error_code,
|
||||
exc.message,
|
||||
exc.status_code,
|
||||
request.url.path,
|
||||
f"API exception: {exc.error_code} - {exc.message} "
|
||||
f"(status: {exc.status_code}, path: {request.url.path})"
|
||||
)
|
||||
|
||||
error_response = ErrorResponse(
|
||||
@@ -189,9 +186,7 @@ async def validation_exception_handler(
|
||||
)
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
"Validation error: %s errors (path: %s)", len(errors), request.url.path
|
||||
)
|
||||
logger.warning(f"Validation error: {len(errors)} errors (path: {request.url.path})")
|
||||
|
||||
error_response = ErrorResponse(errors=errors)
|
||||
|
||||
@@ -223,14 +218,11 @@ async def http_exception_handler(request: Request, exc: HTTPException) -> JSONRe
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
"HTTP exception: %s - %s (path: %s)",
|
||||
exc.status_code,
|
||||
exc.detail,
|
||||
request.url.path,
|
||||
f"HTTP exception: {exc.status_code} - {exc.detail} (path: {request.url.path})"
|
||||
)
|
||||
|
||||
error_response = ErrorResponse(
|
||||
errors=[ErrorDetail(code=error_code, message=str(exc.detail), field=None)]
|
||||
errors=[ErrorDetail(code=error_code, message=str(exc.detail))]
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
@@ -247,11 +239,10 @@ async def unhandled_exception_handler(request: Request, exc: Exception) -> JSONR
|
||||
Logs the full exception and returns a generic error response to avoid
|
||||
leaking sensitive information in production.
|
||||
"""
|
||||
logger.exception(
|
||||
"Unhandled exception: %s - %s (path: %s)",
|
||||
type(exc).__name__,
|
||||
exc,
|
||||
request.url.path,
|
||||
logger.error(
|
||||
f"Unhandled exception: {type(exc).__name__} - {exc!s} "
|
||||
f"(path: {request.url.path})",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# In production, don't expose internal error details
|
||||
@@ -263,7 +254,7 @@ async def unhandled_exception_handler(request: Request, exc: Exception) -> JSONR
|
||||
message = f"{type(exc).__name__}: {exc!s}"
|
||||
|
||||
error_response = ErrorResponse(
|
||||
errors=[ErrorDetail(code=ErrorCode.INTERNAL_ERROR, message=message, field=None)]
|
||||
errors=[ErrorDetail(code=ErrorCode.INTERNAL_ERROR, message=message)]
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
|
||||
474
backend/app/core/redis.py
Normal file
474
backend/app/core/redis.py
Normal file
@@ -0,0 +1,474 @@
|
||||
# app/core/redis.py
|
||||
"""
|
||||
Redis client configuration for caching and pub/sub.
|
||||
|
||||
This module provides async Redis connectivity with connection pooling
|
||||
for FastAPI endpoints and background tasks.
|
||||
|
||||
Features:
|
||||
- Connection pooling for efficient resource usage
|
||||
- Cache operations (get, set, delete, expire)
|
||||
- Pub/sub operations (publish, subscribe)
|
||||
- Health check for monitoring
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Any
|
||||
|
||||
from redis.asyncio import ConnectionPool, Redis
|
||||
from redis.asyncio.client import PubSub
|
||||
from redis.exceptions import ConnectionError, RedisError, TimeoutError
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default TTL for cache entries (1 hour)
|
||||
DEFAULT_CACHE_TTL = 3600
|
||||
|
||||
# Connection pool settings
|
||||
POOL_MAX_CONNECTIONS = 50
|
||||
POOL_TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
class RedisClient:
|
||||
"""
|
||||
Async Redis client with connection pooling.
|
||||
|
||||
Provides high-level operations for caching and pub/sub
|
||||
with proper error handling and connection management.
|
||||
"""
|
||||
|
||||
def __init__(self, url: str | None = None) -> None:
|
||||
"""
|
||||
Initialize Redis client.
|
||||
|
||||
Args:
|
||||
url: Redis connection URL. Defaults to settings.REDIS_URL.
|
||||
"""
|
||||
self._url = url or settings.REDIS_URL
|
||||
self._pool: ConnectionPool | None = None
|
||||
self._client: Redis | None = None
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def _ensure_pool(self) -> ConnectionPool:
|
||||
"""Ensure connection pool is initialized (thread-safe)."""
|
||||
if self._pool is None:
|
||||
async with self._lock:
|
||||
# Double-check after acquiring lock
|
||||
if self._pool is None:
|
||||
self._pool = ConnectionPool.from_url(
|
||||
self._url,
|
||||
max_connections=POOL_MAX_CONNECTIONS,
|
||||
socket_timeout=POOL_TIMEOUT,
|
||||
socket_connect_timeout=POOL_TIMEOUT,
|
||||
decode_responses=True,
|
||||
health_check_interval=30,
|
||||
)
|
||||
logger.info("Redis connection pool initialized")
|
||||
return self._pool
|
||||
|
||||
async def _get_client(self) -> Redis:
|
||||
"""Get Redis client instance from pool."""
|
||||
pool = await self._ensure_pool()
|
||||
if self._client is None:
|
||||
self._client = Redis(connection_pool=pool)
|
||||
return self._client
|
||||
|
||||
# =========================================================================
|
||||
# Cache Operations
|
||||
# =========================================================================
|
||||
|
||||
async def cache_get(self, key: str) -> str | None:
|
||||
"""
|
||||
Get a value from cache.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
|
||||
Returns:
|
||||
Cached value or None if not found.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
value = await client.get(key)
|
||||
if value is not None:
|
||||
logger.debug(f"Cache hit for key: {key}")
|
||||
else:
|
||||
logger.debug(f"Cache miss for key: {key}")
|
||||
return value
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_get failed for key '{key}': {e}")
|
||||
return None
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_get for key '{key}': {e}")
|
||||
return None
|
||||
|
||||
async def cache_get_json(self, key: str) -> Any | None:
|
||||
"""
|
||||
Get a JSON-serialized value from cache.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
|
||||
Returns:
|
||||
Deserialized value or None if not found.
|
||||
"""
|
||||
value = await self.cache_get(key)
|
||||
if value is not None:
|
||||
try:
|
||||
return json.loads(value)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to decode JSON for key '{key}': {e}")
|
||||
return None
|
||||
return None
|
||||
|
||||
async def cache_set(
|
||||
self,
|
||||
key: str,
|
||||
value: str,
|
||||
ttl: int | None = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Set a value in cache.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
value: Value to cache.
|
||||
ttl: Time-to-live in seconds. Defaults to DEFAULT_CACHE_TTL.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
ttl = ttl if ttl is not None else DEFAULT_CACHE_TTL
|
||||
await client.set(key, value, ex=ttl)
|
||||
logger.debug(f"Cache set for key: {key} (TTL: {ttl}s)")
|
||||
return True
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_set failed for key '{key}': {e}")
|
||||
return False
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_set for key '{key}': {e}")
|
||||
return False
|
||||
|
||||
async def cache_set_json(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
ttl: int | None = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Set a JSON-serialized value in cache.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
value: Value to serialize and cache.
|
||||
ttl: Time-to-live in seconds.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
serialized = json.dumps(value)
|
||||
return await self.cache_set(key, serialized, ttl)
|
||||
except (TypeError, ValueError) as e:
|
||||
logger.error(f"Failed to serialize value for key '{key}': {e}")
|
||||
return False
|
||||
|
||||
async def cache_delete(self, key: str) -> bool:
|
||||
"""
|
||||
Delete a key from cache.
|
||||
|
||||
Args:
|
||||
key: Cache key to delete.
|
||||
|
||||
Returns:
|
||||
True if key was deleted, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
result = await client.delete(key)
|
||||
logger.debug(f"Cache delete for key: {key} (deleted: {result > 0})")
|
||||
return result > 0
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_delete failed for key '{key}': {e}")
|
||||
return False
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_delete for key '{key}': {e}")
|
||||
return False
|
||||
|
||||
async def cache_delete_pattern(self, pattern: str) -> int:
|
||||
"""
|
||||
Delete all keys matching a pattern.
|
||||
|
||||
Args:
|
||||
pattern: Glob-style pattern (e.g., "user:*").
|
||||
|
||||
Returns:
|
||||
Number of keys deleted.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
deleted = 0
|
||||
async for key in client.scan_iter(pattern):
|
||||
await client.delete(key)
|
||||
deleted += 1
|
||||
logger.debug(f"Cache delete pattern '{pattern}': {deleted} keys deleted")
|
||||
return deleted
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_delete_pattern failed for '{pattern}': {e}")
|
||||
return 0
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_delete_pattern for '{pattern}': {e}")
|
||||
return 0
|
||||
|
||||
async def cache_expire(self, key: str, ttl: int) -> bool:
|
||||
"""
|
||||
Set or update TTL for a key.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
ttl: New TTL in seconds.
|
||||
|
||||
Returns:
|
||||
True if TTL was set, False if key doesn't exist.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
result = await client.expire(key, ttl)
|
||||
logger.debug(
|
||||
f"Cache expire for key: {key} (TTL: {ttl}s, success: {result})"
|
||||
)
|
||||
return result
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_expire failed for key '{key}': {e}")
|
||||
return False
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_expire for key '{key}': {e}")
|
||||
return False
|
||||
|
||||
async def cache_exists(self, key: str) -> bool:
|
||||
"""
|
||||
Check if a key exists in cache.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
|
||||
Returns:
|
||||
True if key exists, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
result = await client.exists(key)
|
||||
return result > 0
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_exists failed for key '{key}': {e}")
|
||||
return False
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_exists for key '{key}': {e}")
|
||||
return False
|
||||
|
||||
async def cache_ttl(self, key: str) -> int:
|
||||
"""
|
||||
Get remaining TTL for a key.
|
||||
|
||||
Args:
|
||||
key: Cache key.
|
||||
|
||||
Returns:
|
||||
TTL in seconds, -1 if no TTL, -2 if key doesn't exist.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
return await client.ttl(key)
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis cache_ttl failed for key '{key}': {e}")
|
||||
return -2
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in cache_ttl for key '{key}': {e}")
|
||||
return -2
|
||||
|
||||
# =========================================================================
|
||||
# Pub/Sub Operations
|
||||
# =========================================================================
|
||||
|
||||
async def publish(self, channel: str, message: str | dict) -> int:
|
||||
"""
|
||||
Publish a message to a channel.
|
||||
|
||||
Args:
|
||||
channel: Channel name.
|
||||
message: Message to publish (string or dict for JSON serialization).
|
||||
|
||||
Returns:
|
||||
Number of subscribers that received the message.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
if isinstance(message, dict):
|
||||
message = json.dumps(message)
|
||||
result = await client.publish(channel, message)
|
||||
logger.debug(f"Published to channel '{channel}': {result} subscribers")
|
||||
return result
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis publish failed for channel '{channel}': {e}")
|
||||
return 0
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis error in publish for channel '{channel}': {e}")
|
||||
return 0
|
||||
|
||||
@asynccontextmanager
|
||||
async def subscribe(self, *channels: str) -> AsyncGenerator[PubSub, None]:
|
||||
"""
|
||||
Subscribe to one or more channels.
|
||||
|
||||
Usage:
|
||||
async with redis_client.subscribe("channel1", "channel2") as pubsub:
|
||||
async for message in pubsub.listen():
|
||||
if message["type"] == "message":
|
||||
print(message["data"])
|
||||
|
||||
Args:
|
||||
channels: Channel names to subscribe to.
|
||||
|
||||
Yields:
|
||||
PubSub instance for receiving messages.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
pubsub = client.pubsub()
|
||||
try:
|
||||
await pubsub.subscribe(*channels)
|
||||
logger.debug(f"Subscribed to channels: {channels}")
|
||||
yield pubsub
|
||||
finally:
|
||||
await pubsub.unsubscribe(*channels)
|
||||
await pubsub.close()
|
||||
logger.debug(f"Unsubscribed from channels: {channels}")
|
||||
|
||||
@asynccontextmanager
|
||||
async def psubscribe(self, *patterns: str) -> AsyncGenerator[PubSub, None]:
|
||||
"""
|
||||
Subscribe to channels matching patterns.
|
||||
|
||||
Usage:
|
||||
async with redis_client.psubscribe("user:*") as pubsub:
|
||||
async for message in pubsub.listen():
|
||||
if message["type"] == "pmessage":
|
||||
print(message["pattern"], message["channel"], message["data"])
|
||||
|
||||
Args:
|
||||
patterns: Glob-style patterns to subscribe to.
|
||||
|
||||
Yields:
|
||||
PubSub instance for receiving messages.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
pubsub = client.pubsub()
|
||||
try:
|
||||
await pubsub.psubscribe(*patterns)
|
||||
logger.debug(f"Pattern subscribed: {patterns}")
|
||||
yield pubsub
|
||||
finally:
|
||||
await pubsub.punsubscribe(*patterns)
|
||||
await pubsub.close()
|
||||
logger.debug(f"Pattern unsubscribed: {patterns}")
|
||||
|
||||
# =========================================================================
|
||||
# Health & Connection Management
|
||||
# =========================================================================
|
||||
|
||||
async def health_check(self) -> bool:
|
||||
"""
|
||||
Check if Redis connection is healthy.
|
||||
|
||||
Returns:
|
||||
True if connection is successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = await self._get_client()
|
||||
result = await client.ping()
|
||||
return result is True
|
||||
except (ConnectionError, TimeoutError) as e:
|
||||
logger.error(f"Redis health check failed: {e}")
|
||||
return False
|
||||
except RedisError as e:
|
||||
logger.error(f"Redis health check error: {e}")
|
||||
return False
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close Redis connections and cleanup resources.
|
||||
|
||||
Should be called during application shutdown.
|
||||
"""
|
||||
if self._client:
|
||||
await self._client.close()
|
||||
self._client = None
|
||||
logger.debug("Redis client closed")
|
||||
|
||||
if self._pool:
|
||||
await self._pool.disconnect()
|
||||
self._pool = None
|
||||
logger.info("Redis connection pool closed")
|
||||
|
||||
async def get_pool_info(self) -> dict[str, Any]:
|
||||
"""
|
||||
Get connection pool statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with pool information.
|
||||
"""
|
||||
if self._pool is None:
|
||||
return {"status": "not_initialized"}
|
||||
|
||||
return {
|
||||
"status": "active",
|
||||
"max_connections": POOL_MAX_CONNECTIONS,
|
||||
"url": self._url.split("@")[-1] if "@" in self._url else self._url,
|
||||
}
|
||||
|
||||
|
||||
# Global Redis client instance
|
||||
redis_client = RedisClient()
|
||||
|
||||
|
||||
# FastAPI dependency for Redis client
|
||||
async def get_redis() -> AsyncGenerator[RedisClient, None]:
|
||||
"""
|
||||
FastAPI dependency that provides the Redis client.
|
||||
|
||||
Usage:
|
||||
@router.get("/cached-data")
|
||||
async def get_data(redis: RedisClient = Depends(get_redis)):
|
||||
cached = await redis.cache_get("my-key")
|
||||
...
|
||||
"""
|
||||
yield redis_client
|
||||
|
||||
|
||||
# Health check function for use in /health endpoint
|
||||
async def check_redis_health() -> bool:
|
||||
"""
|
||||
Check if Redis connection is healthy.
|
||||
|
||||
Returns:
|
||||
True if connection is successful, False otherwise.
|
||||
"""
|
||||
return await redis_client.health_check()
|
||||
|
||||
|
||||
# Cleanup function for application shutdown
|
||||
async def close_redis() -> None:
|
||||
"""
|
||||
Close Redis connections.
|
||||
|
||||
Should be called during application shutdown.
|
||||
"""
|
||||
await redis_client.close()
|
||||
@@ -1,26 +0,0 @@
|
||||
"""
|
||||
Custom exceptions for the repository layer.
|
||||
|
||||
These exceptions allow services and routes to handle database-level errors
|
||||
with proper semantics, without leaking SQLAlchemy internals.
|
||||
"""
|
||||
|
||||
|
||||
class RepositoryError(Exception):
|
||||
"""Base for all repository-layer errors."""
|
||||
|
||||
|
||||
class DuplicateEntryError(RepositoryError):
|
||||
"""Raised on unique constraint violations. Maps to HTTP 409 Conflict."""
|
||||
|
||||
|
||||
class IntegrityConstraintError(RepositoryError):
|
||||
"""Raised on FK or check constraint violations."""
|
||||
|
||||
|
||||
class RecordNotFoundError(RepositoryError):
|
||||
"""Raised when an expected record doesn't exist."""
|
||||
|
||||
|
||||
class InvalidInputError(RepositoryError):
|
||||
"""Raised on bad pagination params, invalid UUIDs, or other invalid inputs."""
|
||||
14
backend/app/crud/__init__.py
Normal file
14
backend/app/crud/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# app/crud/__init__.py
|
||||
from .oauth import oauth_account, oauth_client, oauth_state
|
||||
from .organization import organization
|
||||
from .session import session as session_crud
|
||||
from .user import user
|
||||
|
||||
__all__ = [
|
||||
"oauth_account",
|
||||
"oauth_client",
|
||||
"oauth_state",
|
||||
"organization",
|
||||
"session_crud",
|
||||
"user",
|
||||
]
|
||||
177
backend/app/repositories/base.py → backend/app/crud/base.py
Normal file → Executable file
177
backend/app/repositories/base.py → backend/app/crud/base.py
Normal file → Executable file
@@ -1,6 +1,6 @@
|
||||
# app/repositories/base.py
|
||||
# app/crud/base_async.py
|
||||
"""
|
||||
Base repository class for async database operations using SQLAlchemy 2.0 async patterns.
|
||||
Async CRUD operations base class using SQLAlchemy 2.0 async patterns.
|
||||
|
||||
Provides reusable create, read, update, and delete operations for all models.
|
||||
"""
|
||||
@@ -18,11 +18,6 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import Load
|
||||
|
||||
from app.core.database import Base
|
||||
from app.core.repository_exceptions import (
|
||||
DuplicateEntryError,
|
||||
IntegrityConstraintError,
|
||||
InvalidInputError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -31,16 +26,16 @@ CreateSchemaType = TypeVar("CreateSchemaType", bound=BaseModel)
|
||||
UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel)
|
||||
|
||||
|
||||
class BaseRepository[
|
||||
class CRUDBase[
|
||||
ModelType: Base,
|
||||
CreateSchemaType: BaseModel,
|
||||
UpdateSchemaType: BaseModel,
|
||||
]:
|
||||
"""Async repository operations for a model."""
|
||||
"""Async CRUD operations for a model."""
|
||||
|
||||
def __init__(self, model: type[ModelType]):
|
||||
"""
|
||||
Repository object with default async methods to Create, Read, Update, Delete.
|
||||
CRUD object with default async methods to Create, Read, Update, Delete.
|
||||
|
||||
Parameters:
|
||||
model: A SQLAlchemy model class
|
||||
@@ -61,19 +56,26 @@ class BaseRepository[
|
||||
|
||||
Returns:
|
||||
Model instance or None if not found
|
||||
|
||||
Example:
|
||||
# Eager load user relationship
|
||||
from sqlalchemy.orm import joinedload
|
||||
session = await session_crud.get(db, id=session_id, options=[joinedload(UserSession.user)])
|
||||
"""
|
||||
# Validate UUID format and convert to UUID object if string
|
||||
try:
|
||||
if isinstance(id, uuid.UUID):
|
||||
uuid_obj = id
|
||||
else:
|
||||
uuid_obj = uuid.UUID(str(id))
|
||||
except (ValueError, AttributeError, TypeError) as e:
|
||||
logger.warning("Invalid UUID format: %s - %s", id, e)
|
||||
logger.warning(f"Invalid UUID format: {id} - {e!s}")
|
||||
return None
|
||||
|
||||
try:
|
||||
query = select(self.model).where(self.model.id == uuid_obj)
|
||||
|
||||
# Apply eager loading options if provided
|
||||
if options:
|
||||
for option in options:
|
||||
query = query.options(option)
|
||||
@@ -81,9 +83,7 @@ class BaseRepository[
|
||||
result = await db.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error retrieving %s with id %s: %s", self.model.__name__, id, e
|
||||
)
|
||||
logger.error(f"Error retrieving {self.model.__name__} with id {id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_multi(
|
||||
@@ -96,17 +96,28 @@ class BaseRepository[
|
||||
) -> list[ModelType]:
|
||||
"""
|
||||
Get multiple records with pagination validation and optional eager loading.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
options: Optional list of SQLAlchemy load options for eager loading
|
||||
|
||||
Returns:
|
||||
List of model instances
|
||||
"""
|
||||
# Validate pagination parameters
|
||||
if skip < 0:
|
||||
raise InvalidInputError("skip must be non-negative")
|
||||
raise ValueError("skip must be non-negative")
|
||||
if limit < 0:
|
||||
raise InvalidInputError("limit must be non-negative")
|
||||
raise ValueError("limit must be non-negative")
|
||||
if limit > 1000:
|
||||
raise InvalidInputError("Maximum limit is 1000")
|
||||
raise ValueError("Maximum limit is 1000")
|
||||
|
||||
try:
|
||||
query = select(self.model).order_by(self.model.id).offset(skip).limit(limit)
|
||||
query = select(self.model).offset(skip).limit(limit)
|
||||
|
||||
# Apply eager loading options if provided
|
||||
if options:
|
||||
for option in options:
|
||||
query = query.options(option)
|
||||
@@ -115,7 +126,7 @@ class BaseRepository[
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error retrieving multiple %s records: %s", self.model.__name__, e
|
||||
f"Error retrieving multiple {self.model.__name__} records: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -125,8 +136,9 @@ class BaseRepository[
|
||||
"""Create a new record with error handling.
|
||||
|
||||
NOTE: This method is defensive code that's never called in practice.
|
||||
All repository subclasses override this method with their own implementations.
|
||||
Marked as pragma: no cover to avoid false coverage gaps.
|
||||
All CRUD subclasses (CRUDUser, CRUDOrganization, CRUDSession) override this method
|
||||
with their own implementations, so the base implementation and its exception handlers
|
||||
are never executed. Marked as pragma: no cover to avoid false coverage gaps.
|
||||
"""
|
||||
try: # pragma: no cover
|
||||
obj_in_data = jsonable_encoder(obj_in)
|
||||
@@ -140,24 +152,22 @@ class BaseRepository[
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "unique" in error_msg.lower() or "duplicate" in error_msg.lower():
|
||||
logger.warning(
|
||||
"Duplicate entry attempted for %s: %s",
|
||||
self.model.__name__,
|
||||
error_msg,
|
||||
f"Duplicate entry attempted for {self.model.__name__}: {error_msg}"
|
||||
)
|
||||
raise DuplicateEntryError(
|
||||
raise ValueError(
|
||||
f"A {self.model.__name__} with this data already exists"
|
||||
)
|
||||
logger.error(
|
||||
"Integrity error creating %s: %s", self.model.__name__, error_msg
|
||||
)
|
||||
raise IntegrityConstraintError(f"Database integrity error: {error_msg}")
|
||||
logger.error(f"Integrity error creating {self.model.__name__}: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except (OperationalError, DataError) as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Database error creating %s: %s", self.model.__name__, e)
|
||||
raise IntegrityConstraintError(f"Database operation failed: {e!s}")
|
||||
logger.error(f"Database error creating {self.model.__name__}: {e!s}")
|
||||
raise ValueError(f"Database operation failed: {e!s}")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.exception("Unexpected error creating %s: %s", self.model.__name__, e)
|
||||
logger.error(
|
||||
f"Unexpected error creating {self.model.__name__}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def update(
|
||||
@@ -188,35 +198,34 @@ class BaseRepository[
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "unique" in error_msg.lower() or "duplicate" in error_msg.lower():
|
||||
logger.warning(
|
||||
"Duplicate entry attempted for %s: %s",
|
||||
self.model.__name__,
|
||||
error_msg,
|
||||
f"Duplicate entry attempted for {self.model.__name__}: {error_msg}"
|
||||
)
|
||||
raise DuplicateEntryError(
|
||||
raise ValueError(
|
||||
f"A {self.model.__name__} with this data already exists"
|
||||
)
|
||||
logger.error(
|
||||
"Integrity error updating %s: %s", self.model.__name__, error_msg
|
||||
)
|
||||
raise IntegrityConstraintError(f"Database integrity error: {error_msg}")
|
||||
logger.error(f"Integrity error updating {self.model.__name__}: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except (OperationalError, DataError) as e:
|
||||
await db.rollback()
|
||||
logger.error("Database error updating %s: %s", self.model.__name__, e)
|
||||
raise IntegrityConstraintError(f"Database operation failed: {e!s}")
|
||||
logger.error(f"Database error updating {self.model.__name__}: {e!s}")
|
||||
raise ValueError(f"Database operation failed: {e!s}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Unexpected error updating %s: %s", self.model.__name__, e)
|
||||
logger.error(
|
||||
f"Unexpected error updating {self.model.__name__}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def remove(self, db: AsyncSession, *, id: str) -> ModelType | None:
|
||||
"""Delete a record with error handling and null check."""
|
||||
# Validate UUID format and convert to UUID object if string
|
||||
try:
|
||||
if isinstance(id, uuid.UUID):
|
||||
uuid_obj = id
|
||||
else:
|
||||
uuid_obj = uuid.UUID(str(id))
|
||||
except (ValueError, AttributeError, TypeError) as e:
|
||||
logger.warning("Invalid UUID format for deletion: %s - %s", id, e)
|
||||
logger.warning(f"Invalid UUID format for deletion: {id} - {e!s}")
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -227,7 +236,7 @@ class BaseRepository[
|
||||
|
||||
if obj is None:
|
||||
logger.warning(
|
||||
"%s with id %s not found for deletion", self.model.__name__, id
|
||||
f"{self.model.__name__} with id {id} not found for deletion"
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -237,16 +246,15 @@ class BaseRepository[
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(
|
||||
"Integrity error deleting %s: %s", self.model.__name__, error_msg
|
||||
)
|
||||
raise IntegrityConstraintError(
|
||||
logger.error(f"Integrity error deleting {self.model.__name__}: {error_msg}")
|
||||
raise ValueError(
|
||||
f"Cannot delete {self.model.__name__}: referenced by other records"
|
||||
)
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception(
|
||||
"Error deleting %s with id %s: %s", self.model.__name__, id, e
|
||||
logger.error(
|
||||
f"Error deleting {self.model.__name__} with id {id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -264,40 +272,57 @@ class BaseRepository[
|
||||
Get multiple records with total count, filtering, and sorting.
|
||||
|
||||
NOTE: This method is defensive code that's never called in practice.
|
||||
All repository subclasses override this method with their own implementations.
|
||||
All CRUD subclasses (CRUDUser, CRUDOrganization, CRUDSession) override this method
|
||||
with their own implementations that include additional parameters like search.
|
||||
Marked as pragma: no cover to avoid false coverage gaps.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
sort_by: Field name to sort by (must be a valid model attribute)
|
||||
sort_order: Sort order ("asc" or "desc")
|
||||
filters: Dictionary of filters (field_name: value)
|
||||
|
||||
Returns:
|
||||
Tuple of (items, total_count)
|
||||
"""
|
||||
# Validate pagination parameters
|
||||
if skip < 0:
|
||||
raise InvalidInputError("skip must be non-negative")
|
||||
raise ValueError("skip must be non-negative")
|
||||
if limit < 0:
|
||||
raise InvalidInputError("limit must be non-negative")
|
||||
raise ValueError("limit must be non-negative")
|
||||
if limit > 1000:
|
||||
raise InvalidInputError("Maximum limit is 1000")
|
||||
raise ValueError("Maximum limit is 1000")
|
||||
|
||||
try:
|
||||
# Build base query
|
||||
query = select(self.model)
|
||||
|
||||
# Exclude soft-deleted records by default
|
||||
if hasattr(self.model, "deleted_at"):
|
||||
query = query.where(self.model.deleted_at.is_(None))
|
||||
|
||||
# Apply filters
|
||||
if filters:
|
||||
for field, value in filters.items():
|
||||
if hasattr(self.model, field) and value is not None:
|
||||
query = query.where(getattr(self.model, field) == value)
|
||||
|
||||
# Get total count (before pagination)
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
if sort_by and hasattr(self.model, sort_by):
|
||||
sort_column = getattr(self.model, sort_by)
|
||||
if sort_order.lower() == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
else:
|
||||
query = query.order_by(self.model.id)
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
items_result = await db.execute(query)
|
||||
items = list(items_result.scalars().all())
|
||||
@@ -305,7 +330,7 @@ class BaseRepository[
|
||||
return items, total
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(
|
||||
"Error retrieving paginated %s records: %s", self.model.__name__, e
|
||||
f"Error retrieving paginated {self.model.__name__} records: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -315,7 +340,7 @@ class BaseRepository[
|
||||
result = await db.execute(select(func.count(self.model.id)))
|
||||
return result.scalar_one()
|
||||
except Exception as e:
|
||||
logger.error("Error counting %s records: %s", self.model.__name__, e)
|
||||
logger.error(f"Error counting {self.model.__name__} records: {e!s}")
|
||||
raise
|
||||
|
||||
async def exists(self, db: AsyncSession, id: str) -> bool:
|
||||
@@ -331,13 +356,14 @@ class BaseRepository[
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
# Validate UUID format and convert to UUID object if string
|
||||
try:
|
||||
if isinstance(id, uuid.UUID):
|
||||
uuid_obj = id
|
||||
else:
|
||||
uuid_obj = uuid.UUID(str(id))
|
||||
except (ValueError, AttributeError, TypeError) as e:
|
||||
logger.warning("Invalid UUID format for soft deletion: %s - %s", id, e)
|
||||
logger.warning(f"Invalid UUID format for soft deletion: {id} - {e!s}")
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -348,16 +374,18 @@ class BaseRepository[
|
||||
|
||||
if obj is None:
|
||||
logger.warning(
|
||||
"%s with id %s not found for soft deletion", self.model.__name__, id
|
||||
f"{self.model.__name__} with id {id} not found for soft deletion"
|
||||
)
|
||||
return None
|
||||
|
||||
# Check if model supports soft deletes
|
||||
if not hasattr(self.model, "deleted_at"):
|
||||
logger.error("%s does not support soft deletes", self.model.__name__)
|
||||
raise InvalidInputError(
|
||||
logger.error(f"{self.model.__name__} does not support soft deletes")
|
||||
raise ValueError(
|
||||
f"{self.model.__name__} does not have a deleted_at column"
|
||||
)
|
||||
|
||||
# Set deleted_at timestamp
|
||||
obj.deleted_at = datetime.now(UTC)
|
||||
db.add(obj)
|
||||
await db.commit()
|
||||
@@ -365,8 +393,9 @@ class BaseRepository[
|
||||
return obj
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception(
|
||||
"Error soft deleting %s with id %s: %s", self.model.__name__, id, e
|
||||
logger.error(
|
||||
f"Error soft deleting {self.model.__name__} with id {id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -376,16 +405,18 @@ class BaseRepository[
|
||||
|
||||
Only works if the model has a 'deleted_at' column.
|
||||
"""
|
||||
# Validate UUID format
|
||||
try:
|
||||
if isinstance(id, uuid.UUID):
|
||||
uuid_obj = id
|
||||
else:
|
||||
uuid_obj = uuid.UUID(str(id))
|
||||
except (ValueError, AttributeError, TypeError) as e:
|
||||
logger.warning("Invalid UUID format for restoration: %s - %s", id, e)
|
||||
logger.warning(f"Invalid UUID format for restoration: {id} - {e!s}")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Find the soft-deleted record
|
||||
if hasattr(self.model, "deleted_at"):
|
||||
result = await db.execute(
|
||||
select(self.model).where(
|
||||
@@ -394,19 +425,18 @@ class BaseRepository[
|
||||
)
|
||||
obj = result.scalar_one_or_none()
|
||||
else:
|
||||
logger.error("%s does not support soft deletes", self.model.__name__)
|
||||
raise InvalidInputError(
|
||||
logger.error(f"{self.model.__name__} does not support soft deletes")
|
||||
raise ValueError(
|
||||
f"{self.model.__name__} does not have a deleted_at column"
|
||||
)
|
||||
|
||||
if obj is None:
|
||||
logger.warning(
|
||||
"Soft-deleted %s with id %s not found for restoration",
|
||||
self.model.__name__,
|
||||
id,
|
||||
f"Soft-deleted {self.model.__name__} with id {id} not found for restoration"
|
||||
)
|
||||
return None
|
||||
|
||||
# Clear deleted_at timestamp
|
||||
obj.deleted_at = None
|
||||
db.add(obj)
|
||||
await db.commit()
|
||||
@@ -414,7 +444,8 @@ class BaseRepository[
|
||||
return obj
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception(
|
||||
"Error restoring %s with id %s: %s", self.model.__name__, id, e
|
||||
logger.error(
|
||||
f"Error restoring {self.model.__name__} with id {id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
718
backend/app/crud/oauth.py
Executable file
718
backend/app/crud/oauth.py
Executable file
@@ -0,0 +1,718 @@
|
||||
"""
|
||||
Async CRUD operations for OAuth models using SQLAlchemy 2.0 patterns.
|
||||
|
||||
Provides operations for:
|
||||
- OAuthAccount: Managing linked OAuth provider accounts
|
||||
- OAuthState: CSRF protection state during OAuth flows
|
||||
- OAuthClient: Registered OAuth clients (provider mode skeleton)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import secrets
|
||||
from datetime import UTC, datetime
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import and_, delete, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.oauth_account import OAuthAccount
|
||||
from app.models.oauth_client import OAuthClient
|
||||
from app.models.oauth_state import OAuthState
|
||||
from app.schemas.oauth import OAuthAccountCreate, OAuthClientCreate, OAuthStateCreate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# OAuth Account CRUD
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class EmptySchema(BaseModel):
|
||||
"""Placeholder schema for CRUD operations that don't need update schemas."""
|
||||
|
||||
|
||||
class CRUDOAuthAccount(CRUDBase[OAuthAccount, OAuthAccountCreate, EmptySchema]):
|
||||
"""CRUD operations for OAuth account links."""
|
||||
|
||||
async def get_by_provider_id(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
provider: str,
|
||||
provider_user_id: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""
|
||||
Get OAuth account by provider and provider user ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
provider: OAuth provider name (google, github)
|
||||
provider_user_id: User ID from the OAuth provider
|
||||
|
||||
Returns:
|
||||
OAuthAccount if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(
|
||||
and_(
|
||||
OAuthAccount.provider == provider,
|
||||
OAuthAccount.provider_user_id == provider_user_id,
|
||||
)
|
||||
)
|
||||
.options(joinedload(OAuthAccount.user))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover # pragma: no cover
|
||||
logger.error(
|
||||
f"Error getting OAuth account for {provider}:{provider_user_id}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_provider_email(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
provider: str,
|
||||
email: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""
|
||||
Get OAuth account by provider and email.
|
||||
|
||||
Used for auto-linking existing accounts by email.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
provider: OAuth provider name
|
||||
email: Email address from the OAuth provider
|
||||
|
||||
Returns:
|
||||
OAuthAccount if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(
|
||||
and_(
|
||||
OAuthAccount.provider == provider,
|
||||
OAuthAccount.provider_email == email,
|
||||
)
|
||||
)
|
||||
.options(joinedload(OAuthAccount.user))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover # pragma: no cover
|
||||
logger.error(
|
||||
f"Error getting OAuth account for {provider} email {email}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_user_accounts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
) -> list[OAuthAccount]:
|
||||
"""
|
||||
Get all OAuth accounts linked to a user.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
|
||||
Returns:
|
||||
List of OAuthAccount objects
|
||||
"""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(OAuthAccount.user_id == user_uuid)
|
||||
.order_by(OAuthAccount.created_at.desc())
|
||||
)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(f"Error getting OAuth accounts for user {user_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_user_account_by_provider(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
provider: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""
|
||||
Get a specific OAuth account for a user and provider.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
provider: OAuth provider name
|
||||
|
||||
Returns:
|
||||
OAuthAccount if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
select(OAuthAccount).where(
|
||||
and_(
|
||||
OAuthAccount.user_id == user_uuid,
|
||||
OAuthAccount.provider == provider,
|
||||
)
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(
|
||||
f"Error getting OAuth account for user {user_id}, provider {provider}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def create_account(
|
||||
self, db: AsyncSession, *, obj_in: OAuthAccountCreate
|
||||
) -> OAuthAccount:
|
||||
"""
|
||||
Create a new OAuth account link.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
obj_in: OAuth account creation data
|
||||
|
||||
Returns:
|
||||
Created OAuthAccount
|
||||
|
||||
Raises:
|
||||
ValueError: If account already exists or creation fails
|
||||
"""
|
||||
try:
|
||||
db_obj = OAuthAccount(
|
||||
user_id=obj_in.user_id,
|
||||
provider=obj_in.provider,
|
||||
provider_user_id=obj_in.provider_user_id,
|
||||
provider_email=obj_in.provider_email,
|
||||
access_token_encrypted=obj_in.access_token_encrypted,
|
||||
refresh_token_encrypted=obj_in.refresh_token_encrypted,
|
||||
token_expires_at=obj_in.token_expires_at,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.info(
|
||||
f"OAuth account created: {obj_in.provider} linked to user {obj_in.user_id}"
|
||||
)
|
||||
return db_obj
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "uq_oauth_provider_user" in error_msg.lower():
|
||||
logger.warning(
|
||||
f"OAuth account already exists: {obj_in.provider}:{obj_in.provider_user_id}"
|
||||
)
|
||||
raise ValueError(
|
||||
f"This {obj_in.provider} account is already linked to another user"
|
||||
)
|
||||
logger.error(f"Integrity error creating OAuth account: {error_msg}")
|
||||
raise ValueError(f"Failed to create OAuth account: {error_msg}")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error creating OAuth account: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def delete_account(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
provider: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Delete an OAuth account link.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
provider: OAuth provider name
|
||||
|
||||
Returns:
|
||||
True if deleted, False if not found
|
||||
"""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
delete(OAuthAccount).where(
|
||||
and_(
|
||||
OAuthAccount.user_id == user_uuid,
|
||||
OAuthAccount.provider == provider,
|
||||
)
|
||||
)
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
deleted = result.rowcount > 0
|
||||
if deleted:
|
||||
logger.info(
|
||||
f"OAuth account deleted: {provider} unlinked from user {user_id}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"OAuth account not found for deletion: {provider} for user {user_id}"
|
||||
)
|
||||
|
||||
return deleted
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error deleting OAuth account {provider} for user {user_id}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def update_tokens(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
account: OAuthAccount,
|
||||
access_token_encrypted: str | None = None,
|
||||
refresh_token_encrypted: str | None = None,
|
||||
token_expires_at: datetime | None = None,
|
||||
) -> OAuthAccount:
|
||||
"""
|
||||
Update OAuth tokens for an account.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
account: OAuthAccount to update
|
||||
access_token_encrypted: New encrypted access token
|
||||
refresh_token_encrypted: New encrypted refresh token
|
||||
token_expires_at: New token expiration time
|
||||
|
||||
Returns:
|
||||
Updated OAuthAccount
|
||||
"""
|
||||
try:
|
||||
if access_token_encrypted is not None:
|
||||
account.access_token_encrypted = access_token_encrypted
|
||||
if refresh_token_encrypted is not None:
|
||||
account.refresh_token_encrypted = refresh_token_encrypted
|
||||
if token_expires_at is not None:
|
||||
account.token_expires_at = token_expires_at
|
||||
|
||||
db.add(account)
|
||||
await db.commit()
|
||||
await db.refresh(account)
|
||||
|
||||
return account
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error updating OAuth tokens: {e!s}")
|
||||
raise
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# OAuth State CRUD
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class CRUDOAuthState(CRUDBase[OAuthState, OAuthStateCreate, EmptySchema]):
|
||||
"""CRUD operations for OAuth state (CSRF protection)."""
|
||||
|
||||
async def create_state(
|
||||
self, db: AsyncSession, *, obj_in: OAuthStateCreate
|
||||
) -> OAuthState:
|
||||
"""
|
||||
Create a new OAuth state for CSRF protection.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
obj_in: OAuth state creation data
|
||||
|
||||
Returns:
|
||||
Created OAuthState
|
||||
"""
|
||||
try:
|
||||
db_obj = OAuthState(
|
||||
state=obj_in.state,
|
||||
code_verifier=obj_in.code_verifier,
|
||||
nonce=obj_in.nonce,
|
||||
provider=obj_in.provider,
|
||||
redirect_uri=obj_in.redirect_uri,
|
||||
user_id=obj_in.user_id,
|
||||
expires_at=obj_in.expires_at,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.debug(f"OAuth state created for {obj_in.provider}")
|
||||
return db_obj
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
# State collision (extremely rare with cryptographic random)
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"OAuth state collision: {error_msg}")
|
||||
raise ValueError("Failed to create OAuth state, please retry")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error creating OAuth state: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_and_consume_state(
|
||||
self, db: AsyncSession, *, state: str
|
||||
) -> OAuthState | None:
|
||||
"""
|
||||
Get and delete OAuth state (consume it).
|
||||
|
||||
This ensures each state can only be used once (replay protection).
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
state: State string to look up
|
||||
|
||||
Returns:
|
||||
OAuthState if found and valid, None otherwise
|
||||
"""
|
||||
try:
|
||||
# Get the state
|
||||
result = await db.execute(
|
||||
select(OAuthState).where(OAuthState.state == state)
|
||||
)
|
||||
db_obj = result.scalar_one_or_none()
|
||||
|
||||
if db_obj is None:
|
||||
logger.warning(f"OAuth state not found: {state[:8]}...")
|
||||
return None
|
||||
|
||||
# Check expiration
|
||||
# Handle both timezone-aware and timezone-naive datetimes
|
||||
now = datetime.now(UTC)
|
||||
expires_at = db_obj.expires_at
|
||||
if expires_at.tzinfo is None:
|
||||
# SQLite returns naive datetimes, assume UTC
|
||||
expires_at = expires_at.replace(tzinfo=UTC)
|
||||
|
||||
if expires_at < now:
|
||||
logger.warning(f"OAuth state expired: {state[:8]}...")
|
||||
await db.delete(db_obj)
|
||||
await db.commit()
|
||||
return None
|
||||
|
||||
# Delete it (consume)
|
||||
await db.delete(db_obj)
|
||||
await db.commit()
|
||||
|
||||
logger.debug(f"OAuth state consumed: {state[:8]}...")
|
||||
return db_obj
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error consuming OAuth state: {e!s}")
|
||||
raise
|
||||
|
||||
async def cleanup_expired(self, db: AsyncSession) -> int:
|
||||
"""
|
||||
Clean up expired OAuth states.
|
||||
|
||||
Should be called periodically to remove stale states.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Number of states deleted
|
||||
"""
|
||||
try:
|
||||
now = datetime.now(UTC)
|
||||
|
||||
stmt = delete(OAuthState).where(OAuthState.expires_at < now)
|
||||
result = await db.execute(stmt)
|
||||
await db.commit()
|
||||
|
||||
count = result.rowcount
|
||||
if count > 0:
|
||||
logger.info(f"Cleaned up {count} expired OAuth states")
|
||||
|
||||
return count
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error cleaning up expired OAuth states: {e!s}")
|
||||
raise
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# OAuth Client CRUD (Provider Mode - Skeleton)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class CRUDOAuthClient(CRUDBase[OAuthClient, OAuthClientCreate, EmptySchema]):
|
||||
"""
|
||||
CRUD operations for OAuth clients (provider mode).
|
||||
|
||||
This is a skeleton implementation for MCP client registration.
|
||||
Full implementation can be expanded when needed.
|
||||
"""
|
||||
|
||||
async def get_by_client_id(
|
||||
self, db: AsyncSession, *, client_id: str
|
||||
) -> OAuthClient | None:
|
||||
"""
|
||||
Get OAuth client by client_id.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: OAuth client ID
|
||||
|
||||
Returns:
|
||||
OAuthClient if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthClient).where(
|
||||
and_(
|
||||
OAuthClient.client_id == client_id,
|
||||
OAuthClient.is_active == True, # noqa: E712
|
||||
)
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(f"Error getting OAuth client {client_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create_client(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
obj_in: OAuthClientCreate,
|
||||
owner_user_id: UUID | None = None,
|
||||
) -> tuple[OAuthClient, str | None]:
|
||||
"""
|
||||
Create a new OAuth client.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
obj_in: OAuth client creation data
|
||||
owner_user_id: Optional owner user ID
|
||||
|
||||
Returns:
|
||||
Tuple of (created OAuthClient, client_secret or None for public clients)
|
||||
"""
|
||||
try:
|
||||
# Generate client_id
|
||||
client_id = secrets.token_urlsafe(32)
|
||||
|
||||
# Generate client_secret for confidential clients
|
||||
client_secret = None
|
||||
client_secret_hash = None
|
||||
if obj_in.client_type == "confidential":
|
||||
client_secret = secrets.token_urlsafe(48)
|
||||
# SECURITY: Use bcrypt for secret storage (not SHA-256)
|
||||
# bcrypt is computationally expensive, making brute-force attacks infeasible
|
||||
from app.core.auth import get_password_hash
|
||||
|
||||
client_secret_hash = get_password_hash(client_secret)
|
||||
|
||||
db_obj = OAuthClient(
|
||||
client_id=client_id,
|
||||
client_secret_hash=client_secret_hash,
|
||||
client_name=obj_in.client_name,
|
||||
client_description=obj_in.client_description,
|
||||
client_type=obj_in.client_type,
|
||||
redirect_uris=obj_in.redirect_uris,
|
||||
allowed_scopes=obj_in.allowed_scopes,
|
||||
owner_user_id=owner_user_id,
|
||||
is_active=True,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.info(
|
||||
f"OAuth client created: {obj_in.client_name} ({client_id[:8]}...)"
|
||||
)
|
||||
return db_obj, client_secret
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Error creating OAuth client: {error_msg}")
|
||||
raise ValueError(f"Failed to create OAuth client: {error_msg}")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error creating OAuth client: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def deactivate_client(
|
||||
self, db: AsyncSession, *, client_id: str
|
||||
) -> OAuthClient | None:
|
||||
"""
|
||||
Deactivate an OAuth client.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: OAuth client ID
|
||||
|
||||
Returns:
|
||||
Deactivated OAuthClient if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
client = await self.get_by_client_id(db, client_id=client_id)
|
||||
if client is None:
|
||||
return None
|
||||
|
||||
client.is_active = False
|
||||
db.add(client)
|
||||
await db.commit()
|
||||
await db.refresh(client)
|
||||
|
||||
logger.info(f"OAuth client deactivated: {client.client_name}")
|
||||
return client
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error deactivating OAuth client {client_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def validate_redirect_uri(
|
||||
self, db: AsyncSession, *, client_id: str, redirect_uri: str
|
||||
) -> bool:
|
||||
"""
|
||||
Validate that a redirect URI is allowed for a client.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: OAuth client ID
|
||||
redirect_uri: Redirect URI to validate
|
||||
|
||||
Returns:
|
||||
True if valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
client = await self.get_by_client_id(db, client_id=client_id)
|
||||
if client is None:
|
||||
return False
|
||||
|
||||
return redirect_uri in (client.redirect_uris or [])
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(f"Error validating redirect URI: {e!s}")
|
||||
return False
|
||||
|
||||
async def verify_client_secret(
|
||||
self, db: AsyncSession, *, client_id: str, client_secret: str
|
||||
) -> bool:
|
||||
"""
|
||||
Verify client credentials.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: OAuth client ID
|
||||
client_secret: Client secret to verify
|
||||
|
||||
Returns:
|
||||
True if valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthClient).where(
|
||||
and_(
|
||||
OAuthClient.client_id == client_id,
|
||||
OAuthClient.is_active == True, # noqa: E712
|
||||
)
|
||||
)
|
||||
)
|
||||
client = result.scalar_one_or_none()
|
||||
|
||||
if client is None or client.client_secret_hash is None:
|
||||
return False
|
||||
|
||||
# SECURITY: Verify secret using bcrypt (not SHA-256)
|
||||
# This supports both old SHA-256 hashes (for migration) and new bcrypt hashes
|
||||
from app.core.auth import verify_password
|
||||
|
||||
stored_hash: str = str(client.client_secret_hash)
|
||||
|
||||
# Check if it's a bcrypt hash (starts with $2b$) or legacy SHA-256
|
||||
if stored_hash.startswith("$2"):
|
||||
# New bcrypt format
|
||||
return verify_password(client_secret, stored_hash)
|
||||
else:
|
||||
# Legacy SHA-256 format - still support for migration
|
||||
import hashlib
|
||||
|
||||
secret_hash = hashlib.sha256(client_secret.encode()).hexdigest()
|
||||
return secrets.compare_digest(stored_hash, secret_hash)
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(f"Error verifying client secret: {e!s}")
|
||||
return False
|
||||
|
||||
async def get_all_clients(
|
||||
self, db: AsyncSession, *, include_inactive: bool = False
|
||||
) -> list[OAuthClient]:
|
||||
"""
|
||||
Get all OAuth clients.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
include_inactive: Whether to include inactive clients
|
||||
|
||||
Returns:
|
||||
List of OAuthClient objects
|
||||
"""
|
||||
try:
|
||||
query = select(OAuthClient).order_by(OAuthClient.created_at.desc())
|
||||
if not include_inactive:
|
||||
query = query.where(OAuthClient.is_active == True) # noqa: E712
|
||||
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(f"Error getting all OAuth clients: {e!s}")
|
||||
raise
|
||||
|
||||
async def delete_client(self, db: AsyncSession, *, client_id: str) -> bool:
|
||||
"""
|
||||
Delete an OAuth client permanently.
|
||||
|
||||
Note: This will cascade delete related records (tokens, consents, etc.)
|
||||
due to foreign key constraints.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: OAuth client ID
|
||||
|
||||
Returns:
|
||||
True if deleted, False if not found
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
delete(OAuthClient).where(OAuthClient.client_id == client_id)
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
deleted = result.rowcount > 0
|
||||
if deleted:
|
||||
logger.info(f"OAuth client deleted: {client_id}")
|
||||
else:
|
||||
logger.warning(f"OAuth client not found for deletion: {client_id}")
|
||||
|
||||
return deleted
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(f"Error deleting OAuth client {client_id}: {e!s}")
|
||||
raise
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Singleton instances
|
||||
# ============================================================================
|
||||
|
||||
oauth_account = CRUDOAuthAccount(OAuthAccount)
|
||||
oauth_state = CRUDOAuthState(OAuthState)
|
||||
oauth_client = CRUDOAuthClient(OAuthClient)
|
||||
128
backend/app/repositories/organization.py → backend/app/crud/organization.py
Normal file → Executable file
128
backend/app/repositories/organization.py → backend/app/crud/organization.py
Normal file → Executable file
@@ -1,5 +1,5 @@
|
||||
# app/repositories/organization.py
|
||||
"""Repository for Organization model async database operations using SQLAlchemy 2.0 patterns."""
|
||||
# app/crud/organization_async.py
|
||||
"""Async CRUD operations for Organization model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
@@ -9,11 +9,10 @@ from sqlalchemy import and_, case, func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.repository_exceptions import DuplicateEntryError, IntegrityConstraintError
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.organization import Organization
|
||||
from app.models.user import User
|
||||
from app.models.user_organization import OrganizationRole, UserOrganization
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.organizations import (
|
||||
OrganizationCreate,
|
||||
OrganizationUpdate,
|
||||
@@ -22,10 +21,8 @@ from app.schemas.organizations import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OrganizationRepository(
|
||||
BaseRepository[Organization, OrganizationCreate, OrganizationUpdate]
|
||||
):
|
||||
"""Repository for Organization model."""
|
||||
class CRUDOrganization(CRUDBase[Organization, OrganizationCreate, OrganizationUpdate]):
|
||||
"""Async CRUD operations for Organization model."""
|
||||
|
||||
async def get_by_slug(self, db: AsyncSession, *, slug: str) -> Organization | None:
|
||||
"""Get organization by slug."""
|
||||
@@ -35,7 +32,7 @@ class OrganizationRepository(
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting organization by slug %s: %s", slug, e)
|
||||
logger.error(f"Error getting organization by slug {slug}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(
|
||||
@@ -57,20 +54,18 @@ class OrganizationRepository(
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if (
|
||||
"slug" in error_msg.lower()
|
||||
or "unique" in error_msg.lower()
|
||||
or "duplicate" in error_msg.lower()
|
||||
):
|
||||
logger.warning("Duplicate slug attempted: %s", obj_in.slug)
|
||||
raise DuplicateEntryError(
|
||||
if "slug" in error_msg.lower():
|
||||
logger.warning(f"Duplicate slug attempted: {obj_in.slug}")
|
||||
raise ValueError(
|
||||
f"Organization with slug '{obj_in.slug}' already exists"
|
||||
)
|
||||
logger.error("Integrity error creating organization: %s", error_msg)
|
||||
raise IntegrityConstraintError(f"Database integrity error: {error_msg}")
|
||||
logger.error(f"Integrity error creating organization: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Unexpected error creating organization: %s", e)
|
||||
logger.error(
|
||||
f"Unexpected error creating organization: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_filters(
|
||||
@@ -84,10 +79,16 @@ class OrganizationRepository(
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[Organization], int]:
|
||||
"""Get multiple organizations with filtering, searching, and sorting."""
|
||||
"""
|
||||
Get multiple organizations with filtering, searching, and sorting.
|
||||
|
||||
Returns:
|
||||
Tuple of (organizations list, total count)
|
||||
"""
|
||||
try:
|
||||
query = select(Organization)
|
||||
|
||||
# Apply filters
|
||||
if is_active is not None:
|
||||
query = query.where(Organization.is_active == is_active)
|
||||
|
||||
@@ -99,23 +100,26 @@ class OrganizationRepository(
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count before pagination
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(Organization, sort_by, Organization.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
organizations = list(result.scalars().all())
|
||||
|
||||
return organizations, total
|
||||
except Exception as e:
|
||||
logger.error("Error getting organizations with filters: %s", e)
|
||||
logger.error(f"Error getting organizations with filters: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_member_count(self, db: AsyncSession, *, organization_id: UUID) -> int:
|
||||
@@ -132,7 +136,7 @@ class OrganizationRepository(
|
||||
return result.scalar_one() or 0
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error getting member count for organization %s: %s", organization_id, e
|
||||
f"Error getting member count for organization {organization_id}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -145,8 +149,16 @@ class OrganizationRepository(
|
||||
is_active: bool | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Get organizations with member counts in a SINGLE QUERY using JOIN and GROUP BY."""
|
||||
"""
|
||||
Get organizations with member counts in a SINGLE QUERY using JOIN and GROUP BY.
|
||||
This eliminates the N+1 query problem.
|
||||
|
||||
Returns:
|
||||
Tuple of (list of dicts with org and member_count, total count)
|
||||
"""
|
||||
try:
|
||||
# Build base query with LEFT JOIN and GROUP BY
|
||||
# Use CASE statement to count only active members
|
||||
query = (
|
||||
select(
|
||||
Organization,
|
||||
@@ -169,10 +181,10 @@ class OrganizationRepository(
|
||||
.group_by(Organization.id)
|
||||
)
|
||||
|
||||
# Apply filters
|
||||
if is_active is not None:
|
||||
query = query.where(Organization.is_active == is_active)
|
||||
|
||||
search_filter = None
|
||||
if search:
|
||||
search_filter = or_(
|
||||
Organization.name.ilike(f"%{search}%"),
|
||||
@@ -181,15 +193,17 @@ class OrganizationRepository(
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count(Organization.id))
|
||||
if is_active is not None:
|
||||
count_query = count_query.where(Organization.is_active == is_active)
|
||||
if search_filter is not None:
|
||||
if search:
|
||||
count_query = count_query.where(search_filter)
|
||||
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply pagination and ordering
|
||||
query = (
|
||||
query.order_by(Organization.created_at.desc()).offset(skip).limit(limit)
|
||||
)
|
||||
@@ -197,6 +211,7 @@ class OrganizationRepository(
|
||||
result = await db.execute(query)
|
||||
rows = result.all()
|
||||
|
||||
# Convert to list of dicts
|
||||
orgs_with_counts = [
|
||||
{"organization": org, "member_count": member_count}
|
||||
for org, member_count in rows
|
||||
@@ -205,7 +220,9 @@ class OrganizationRepository(
|
||||
return orgs_with_counts, total
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error getting organizations with member counts: %s", e)
|
||||
logger.error(
|
||||
f"Error getting organizations with member counts: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def add_user(
|
||||
@@ -219,6 +236,7 @@ class OrganizationRepository(
|
||||
) -> UserOrganization:
|
||||
"""Add a user to an organization with a specific role."""
|
||||
try:
|
||||
# Check if relationship already exists
|
||||
result = await db.execute(
|
||||
select(UserOrganization).where(
|
||||
and_(
|
||||
@@ -230,6 +248,7 @@ class OrganizationRepository(
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if existing:
|
||||
# Reactivate if inactive, or raise error if already active
|
||||
if not existing.is_active:
|
||||
existing.is_active = True
|
||||
existing.role = role
|
||||
@@ -238,10 +257,9 @@ class OrganizationRepository(
|
||||
await db.refresh(existing)
|
||||
return existing
|
||||
else:
|
||||
raise DuplicateEntryError(
|
||||
"User is already a member of this organization"
|
||||
)
|
||||
raise ValueError("User is already a member of this organization")
|
||||
|
||||
# Create new relationship
|
||||
user_org = UserOrganization(
|
||||
user_id=user_id,
|
||||
organization_id=organization_id,
|
||||
@@ -255,11 +273,11 @@ class OrganizationRepository(
|
||||
return user_org
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
logger.error("Integrity error adding user to organization: %s", e)
|
||||
raise IntegrityConstraintError("Failed to add user to organization")
|
||||
logger.error(f"Integrity error adding user to organization: {e!s}")
|
||||
raise ValueError("Failed to add user to organization")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error adding user to organization: %s", e)
|
||||
logger.error(f"Error adding user to organization: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def remove_user(
|
||||
@@ -285,7 +303,7 @@ class OrganizationRepository(
|
||||
return True
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error removing user from organization: %s", e)
|
||||
logger.error(f"Error removing user from organization: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def update_user_role(
|
||||
@@ -320,7 +338,7 @@ class OrganizationRepository(
|
||||
return user_org
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error updating user role: %s", e)
|
||||
logger.error(f"Error updating user role: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_organization_members(
|
||||
@@ -330,10 +348,16 @@ class OrganizationRepository(
|
||||
organization_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
is_active: bool | None = True,
|
||||
is_active: bool = True,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Get members of an organization with user details."""
|
||||
"""
|
||||
Get members of an organization with user details.
|
||||
|
||||
Returns:
|
||||
Tuple of (members list with user details, total count)
|
||||
"""
|
||||
try:
|
||||
# Build query with join
|
||||
query = (
|
||||
select(UserOrganization, User)
|
||||
.join(User, UserOrganization.user_id == User.id)
|
||||
@@ -343,6 +367,7 @@ class OrganizationRepository(
|
||||
if is_active is not None:
|
||||
query = query.where(UserOrganization.is_active == is_active)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(
|
||||
select(UserOrganization)
|
||||
.where(UserOrganization.organization_id == organization_id)
|
||||
@@ -356,6 +381,7 @@ class OrganizationRepository(
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply ordering and pagination
|
||||
query = (
|
||||
query.order_by(UserOrganization.created_at.desc())
|
||||
.offset(skip)
|
||||
@@ -380,11 +406,11 @@ class OrganizationRepository(
|
||||
|
||||
return members, total
|
||||
except Exception as e:
|
||||
logger.error("Error getting organization members: %s", e)
|
||||
logger.error(f"Error getting organization members: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_user_organizations(
|
||||
self, db: AsyncSession, *, user_id: UUID, is_active: bool | None = True
|
||||
self, db: AsyncSession, *, user_id: UUID, is_active: bool = True
|
||||
) -> list[Organization]:
|
||||
"""Get all organizations a user belongs to."""
|
||||
try:
|
||||
@@ -403,14 +429,21 @@ class OrganizationRepository(
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error("Error getting user organizations: %s", e)
|
||||
logger.error(f"Error getting user organizations: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_user_organizations_with_details(
|
||||
self, db: AsyncSession, *, user_id: UUID, is_active: bool | None = True
|
||||
self, db: AsyncSession, *, user_id: UUID, is_active: bool = True
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Get user's organizations with role and member count in SINGLE QUERY."""
|
||||
"""
|
||||
Get user's organizations with role and member count in SINGLE QUERY.
|
||||
Eliminates N+1 problem by using subquery for member counts.
|
||||
|
||||
Returns:
|
||||
List of dicts with organization, role, and member_count
|
||||
"""
|
||||
try:
|
||||
# Subquery to get member counts for each organization
|
||||
member_count_subq = (
|
||||
select(
|
||||
UserOrganization.organization_id,
|
||||
@@ -421,6 +454,7 @@ class OrganizationRepository(
|
||||
.subquery()
|
||||
)
|
||||
|
||||
# Main query with JOIN to get org, role, and member count
|
||||
query = (
|
||||
select(
|
||||
Organization,
|
||||
@@ -452,7 +486,9 @@ class OrganizationRepository(
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error getting user organizations with details: %s", e)
|
||||
logger.error(
|
||||
f"Error getting user organizations with details: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_user_role_in_org(
|
||||
@@ -471,9 +507,9 @@ class OrganizationRepository(
|
||||
)
|
||||
user_org = result.scalar_one_or_none()
|
||||
|
||||
return user_org.role if user_org else None # pyright: ignore[reportReturnType]
|
||||
return user_org.role if user_org else None
|
||||
except Exception as e:
|
||||
logger.error("Error getting user role in org: %s", e)
|
||||
logger.error(f"Error getting user role in org: {e!s}")
|
||||
raise
|
||||
|
||||
async def is_user_org_owner(
|
||||
@@ -495,5 +531,5 @@ class OrganizationRepository(
|
||||
return role in [OrganizationRole.OWNER, OrganizationRole.ADMIN]
|
||||
|
||||
|
||||
# Singleton instance
|
||||
organization_repo = OrganizationRepository(Organization)
|
||||
# Create a singleton instance for use across the application
|
||||
organization = CRUDOrganization(Organization)
|
||||
231
backend/app/repositories/session.py → backend/app/crud/session.py
Normal file → Executable file
231
backend/app/repositories/session.py → backend/app/crud/session.py
Normal file → Executable file
@@ -1,5 +1,6 @@
|
||||
# app/repositories/session.py
|
||||
"""Repository for UserSession model async database operations using SQLAlchemy 2.0 patterns."""
|
||||
"""
|
||||
Async CRUD operations for user sessions using SQLAlchemy 2.0 patterns.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
@@ -10,32 +11,49 @@ from sqlalchemy import and_, delete, func, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.core.repository_exceptions import IntegrityConstraintError, InvalidInputError
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.user_session import UserSession
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.sessions import SessionCreate, SessionUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate]):
|
||||
"""Repository for UserSession model."""
|
||||
class CRUDSession(CRUDBase[UserSession, SessionCreate, SessionUpdate]):
|
||||
"""Async CRUD operations for user sessions."""
|
||||
|
||||
async def get_by_jti(self, db: AsyncSession, *, jti: str) -> UserSession | None:
|
||||
"""Get session by refresh token JTI."""
|
||||
"""
|
||||
Get session by refresh token JTI.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
jti: Refresh token JWT ID
|
||||
|
||||
Returns:
|
||||
UserSession if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(UserSession).where(UserSession.refresh_token_jti == jti)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting session by JTI %s: %s", jti, e)
|
||||
logger.error(f"Error getting session by JTI {jti}: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_active_by_jti(
|
||||
self, db: AsyncSession, *, jti: str
|
||||
) -> UserSession | None:
|
||||
"""Get active session by refresh token JTI."""
|
||||
"""
|
||||
Get active session by refresh token JTI.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
jti: Refresh token JWT ID
|
||||
|
||||
Returns:
|
||||
Active UserSession if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(UserSession).where(
|
||||
@@ -47,7 +65,7 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting active session by JTI %s: %s", jti, e)
|
||||
logger.error(f"Error getting active session by JTI {jti}: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_user_sessions(
|
||||
@@ -58,12 +76,25 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
active_only: bool = True,
|
||||
with_user: bool = False,
|
||||
) -> list[UserSession]:
|
||||
"""Get all sessions for a user with optional eager loading."""
|
||||
"""
|
||||
Get all sessions for a user with optional eager loading.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
active_only: If True, return only active sessions
|
||||
with_user: If True, eager load user relationship to prevent N+1
|
||||
|
||||
Returns:
|
||||
List of UserSession objects
|
||||
"""
|
||||
try:
|
||||
# Convert user_id string to UUID if needed
|
||||
user_uuid = UUID(user_id) if isinstance(user_id, str) else user_id
|
||||
|
||||
query = select(UserSession).where(UserSession.user_id == user_uuid)
|
||||
|
||||
# Add eager loading if requested to prevent N+1 queries
|
||||
if with_user:
|
||||
query = query.options(joinedload(UserSession.user))
|
||||
|
||||
@@ -74,13 +105,25 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error("Error getting sessions for user %s: %s", user_id, e)
|
||||
logger.error(f"Error getting sessions for user {user_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create_session(
|
||||
self, db: AsyncSession, *, obj_in: SessionCreate
|
||||
) -> UserSession:
|
||||
"""Create a new user session."""
|
||||
"""
|
||||
Create a new user session.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
obj_in: SessionCreate schema with session data
|
||||
|
||||
Returns:
|
||||
Created UserSession
|
||||
|
||||
Raises:
|
||||
ValueError: If session creation fails
|
||||
"""
|
||||
try:
|
||||
db_obj = UserSession(
|
||||
user_id=obj_in.user_id,
|
||||
@@ -100,26 +143,33 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.info(
|
||||
"Session created for user %s from %s (IP: %s)",
|
||||
obj_in.user_id,
|
||||
obj_in.device_name,
|
||||
obj_in.ip_address,
|
||||
f"Session created for user {obj_in.user_id} from {obj_in.device_name} "
|
||||
f"(IP: {obj_in.ip_address})"
|
||||
)
|
||||
|
||||
return db_obj
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error creating session: %s", e)
|
||||
raise IntegrityConstraintError(f"Failed to create session: {e!s}")
|
||||
logger.error(f"Error creating session: {e!s}", exc_info=True)
|
||||
raise ValueError(f"Failed to create session: {e!s}")
|
||||
|
||||
async def deactivate(
|
||||
self, db: AsyncSession, *, session_id: str
|
||||
) -> UserSession | None:
|
||||
"""Deactivate a session (logout from device)."""
|
||||
"""
|
||||
Deactivate a session (logout from device).
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
session_id: Session UUID
|
||||
|
||||
Returns:
|
||||
Deactivated UserSession if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
session = await self.get(db, id=session_id)
|
||||
if not session:
|
||||
logger.warning("Session %s not found for deactivation", session_id)
|
||||
logger.warning(f"Session {session_id} not found for deactivation")
|
||||
return None
|
||||
|
||||
session.is_active = False
|
||||
@@ -128,23 +178,31 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
await db.refresh(session)
|
||||
|
||||
logger.info(
|
||||
"Session %s deactivated for user %s (%s)",
|
||||
session_id,
|
||||
session.user_id,
|
||||
session.device_name,
|
||||
f"Session {session_id} deactivated for user {session.user_id} "
|
||||
f"({session.device_name})"
|
||||
)
|
||||
|
||||
return session
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error deactivating session %s: %s", session_id, e)
|
||||
logger.error(f"Error deactivating session {session_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def deactivate_all_user_sessions(
|
||||
self, db: AsyncSession, *, user_id: str
|
||||
) -> int:
|
||||
"""Deactivate all active sessions for a user (logout from all devices)."""
|
||||
"""
|
||||
Deactivate all active sessions for a user (logout from all devices).
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
|
||||
Returns:
|
||||
Number of sessions deactivated
|
||||
"""
|
||||
try:
|
||||
# Convert user_id string to UUID if needed
|
||||
user_uuid = UUID(user_id) if isinstance(user_id, str) else user_id
|
||||
|
||||
stmt = (
|
||||
@@ -158,18 +216,27 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
|
||||
count = result.rowcount
|
||||
|
||||
logger.info("Deactivated %s sessions for user %s", count, user_id)
|
||||
logger.info(f"Deactivated {count} sessions for user {user_id}")
|
||||
|
||||
return count
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error deactivating all sessions for user %s: %s", user_id, e)
|
||||
logger.error(f"Error deactivating all sessions for user {user_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def update_last_used(
|
||||
self, db: AsyncSession, *, session: UserSession
|
||||
) -> UserSession:
|
||||
"""Update the last_used_at timestamp for a session."""
|
||||
"""
|
||||
Update the last_used_at timestamp for a session.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
session: UserSession object
|
||||
|
||||
Returns:
|
||||
Updated UserSession
|
||||
"""
|
||||
try:
|
||||
session.last_used_at = datetime.now(UTC)
|
||||
db.add(session)
|
||||
@@ -178,7 +245,7 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
return session
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error updating last_used for session %s: %s", session.id, e)
|
||||
logger.error(f"Error updating last_used for session {session.id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def update_refresh_token(
|
||||
@@ -189,7 +256,20 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
new_jti: str,
|
||||
new_expires_at: datetime,
|
||||
) -> UserSession:
|
||||
"""Update session with new refresh token JTI and expiration."""
|
||||
"""
|
||||
Update session with new refresh token JTI and expiration.
|
||||
|
||||
Called during token refresh.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
session: UserSession object
|
||||
new_jti: New refresh token JTI
|
||||
new_expires_at: New expiration datetime
|
||||
|
||||
Returns:
|
||||
Updated UserSession
|
||||
"""
|
||||
try:
|
||||
session.refresh_token_jti = new_jti
|
||||
session.expires_at = new_expires_at
|
||||
@@ -201,16 +281,32 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
"Error updating refresh token for session %s: %s", session.id, e
|
||||
f"Error updating refresh token for session {session.id}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def cleanup_expired(self, db: AsyncSession, *, keep_days: int = 30) -> int:
|
||||
"""Clean up expired sessions using optimized bulk DELETE."""
|
||||
"""
|
||||
Clean up expired sessions using optimized bulk DELETE.
|
||||
|
||||
Deletes sessions that are:
|
||||
- Expired AND inactive
|
||||
- Older than keep_days
|
||||
|
||||
Uses single DELETE query instead of N individual deletes for efficiency.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
keep_days: Keep inactive sessions for this many days (for audit)
|
||||
|
||||
Returns:
|
||||
Number of sessions deleted
|
||||
"""
|
||||
try:
|
||||
cutoff_date = datetime.now(UTC) - timedelta(days=keep_days)
|
||||
now = datetime.now(UTC)
|
||||
|
||||
# Use bulk DELETE with WHERE clause - single query
|
||||
stmt = delete(UserSession).where(
|
||||
and_(
|
||||
UserSession.is_active == False, # noqa: E712
|
||||
@@ -225,25 +321,38 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
count = result.rowcount
|
||||
|
||||
if count > 0:
|
||||
logger.info("Cleaned up %s expired sessions using bulk DELETE", count)
|
||||
logger.info(f"Cleaned up {count} expired sessions using bulk DELETE")
|
||||
|
||||
return count
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error("Error cleaning up expired sessions: %s", e)
|
||||
logger.error(f"Error cleaning up expired sessions: {e!s}")
|
||||
raise
|
||||
|
||||
async def cleanup_expired_for_user(self, db: AsyncSession, *, user_id: str) -> int:
|
||||
"""Clean up expired and inactive sessions for a specific user."""
|
||||
"""
|
||||
Clean up expired and inactive sessions for a specific user.
|
||||
|
||||
Uses single bulk DELETE query for efficiency instead of N individual deletes.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID to cleanup sessions for
|
||||
|
||||
Returns:
|
||||
Number of sessions deleted
|
||||
"""
|
||||
try:
|
||||
# Validate UUID
|
||||
try:
|
||||
uuid_obj = uuid.UUID(user_id)
|
||||
except (ValueError, AttributeError):
|
||||
logger.error("Invalid UUID format: %s", user_id)
|
||||
raise InvalidInputError(f"Invalid user ID format: {user_id}")
|
||||
logger.error(f"Invalid UUID format: {user_id}")
|
||||
raise ValueError(f"Invalid user ID format: {user_id}")
|
||||
|
||||
now = datetime.now(UTC)
|
||||
|
||||
# Use bulk DELETE with WHERE clause - single query
|
||||
stmt = delete(UserSession).where(
|
||||
and_(
|
||||
UserSession.user_id == uuid_obj,
|
||||
@@ -259,22 +368,30 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
|
||||
if count > 0:
|
||||
logger.info(
|
||||
"Cleaned up %s expired sessions for user %s using bulk DELETE",
|
||||
count,
|
||||
user_id,
|
||||
f"Cleaned up {count} expired sessions for user {user_id} using bulk DELETE"
|
||||
)
|
||||
|
||||
return count
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
"Error cleaning up expired sessions for user %s: %s", user_id, e
|
||||
f"Error cleaning up expired sessions for user {user_id}: {e!s}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_user_session_count(self, db: AsyncSession, *, user_id: str) -> int:
|
||||
"""Get count of active sessions for a user."""
|
||||
"""
|
||||
Get count of active sessions for a user.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: User ID
|
||||
|
||||
Returns:
|
||||
Number of active sessions
|
||||
"""
|
||||
try:
|
||||
# Convert user_id string to UUID if needed
|
||||
user_uuid = UUID(user_id) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
@@ -284,7 +401,7 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
)
|
||||
return result.scalar_one()
|
||||
except Exception as e:
|
||||
logger.error("Error counting sessions for user %s: %s", user_id, e)
|
||||
logger.error(f"Error counting sessions for user {user_id}: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_all_sessions(
|
||||
@@ -296,16 +413,31 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
active_only: bool = True,
|
||||
with_user: bool = True,
|
||||
) -> tuple[list[UserSession], int]:
|
||||
"""Get all sessions across all users with pagination (admin only)."""
|
||||
"""
|
||||
Get all sessions across all users with pagination (admin only).
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
active_only: If True, return only active sessions
|
||||
with_user: If True, eager load user relationship to prevent N+1
|
||||
|
||||
Returns:
|
||||
Tuple of (list of UserSession objects, total count)
|
||||
"""
|
||||
try:
|
||||
# Build query
|
||||
query = select(UserSession)
|
||||
|
||||
# Add eager loading if requested to prevent N+1 queries
|
||||
if with_user:
|
||||
query = query.options(joinedload(UserSession.user))
|
||||
|
||||
if active_only:
|
||||
query = query.where(UserSession.is_active)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count(UserSession.id))
|
||||
if active_only:
|
||||
count_query = count_query.where(UserSession.is_active)
|
||||
@@ -313,6 +445,7 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply pagination and ordering
|
||||
query = (
|
||||
query.order_by(UserSession.last_used_at.desc())
|
||||
.offset(skip)
|
||||
@@ -325,9 +458,9 @@ class SessionRepository(BaseRepository[UserSession, SessionCreate, SessionUpdate
|
||||
return sessions, total
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Error getting all sessions: %s", e)
|
||||
logger.error(f"Error getting all sessions: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# Singleton instance
|
||||
session_repo = SessionRepository(UserSession)
|
||||
# Create singleton instance
|
||||
session = CRUDSession(UserSession)
|
||||
20
backend/app/crud/syndarix/__init__.py
Normal file
20
backend/app/crud/syndarix/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# app/crud/syndarix/__init__.py
|
||||
"""
|
||||
Syndarix CRUD operations.
|
||||
|
||||
This package contains CRUD operations for all Syndarix domain entities.
|
||||
"""
|
||||
|
||||
from .agent_instance import agent_instance
|
||||
from .agent_type import agent_type
|
||||
from .issue import issue
|
||||
from .project import project
|
||||
from .sprint import sprint
|
||||
|
||||
__all__ = [
|
||||
"agent_instance",
|
||||
"agent_type",
|
||||
"issue",
|
||||
"project",
|
||||
"sprint",
|
||||
]
|
||||
394
backend/app/crud/syndarix/agent_instance.py
Normal file
394
backend/app/crud/syndarix/agent_instance.py
Normal file
@@ -0,0 +1,394 @@
|
||||
# app/crud/syndarix/agent_instance.py
|
||||
"""Async CRUD operations for AgentInstance model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from decimal import Decimal
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select, update
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue
|
||||
from app.models.syndarix.enums import AgentStatus
|
||||
from app.schemas.syndarix import AgentInstanceCreate, AgentInstanceUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDAgentInstance(
|
||||
CRUDBase[AgentInstance, AgentInstanceCreate, AgentInstanceUpdate]
|
||||
):
|
||||
"""Async CRUD operations for AgentInstance model."""
|
||||
|
||||
async def create(
|
||||
self, db: AsyncSession, *, obj_in: AgentInstanceCreate
|
||||
) -> AgentInstance:
|
||||
"""Create a new agent instance with error handling."""
|
||||
try:
|
||||
db_obj = AgentInstance(
|
||||
agent_type_id=obj_in.agent_type_id,
|
||||
project_id=obj_in.project_id,
|
||||
name=obj_in.name,
|
||||
status=obj_in.status,
|
||||
current_task=obj_in.current_task,
|
||||
short_term_memory=obj_in.short_term_memory,
|
||||
long_term_memory_ref=obj_in.long_term_memory_ref,
|
||||
session_id=obj_in.session_id,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating agent instance: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Unexpected error creating agent instance: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get an agent instance with full details including related entities.
|
||||
|
||||
Returns:
|
||||
Dictionary with instance and related entity details
|
||||
"""
|
||||
try:
|
||||
# Get instance with joined relationships
|
||||
result = await db.execute(
|
||||
select(AgentInstance)
|
||||
.options(
|
||||
joinedload(AgentInstance.agent_type),
|
||||
joinedload(AgentInstance.project),
|
||||
)
|
||||
.where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
# Get assigned issues count
|
||||
issues_count_result = await db.execute(
|
||||
select(func.count(Issue.id)).where(
|
||||
Issue.assigned_agent_id == instance_id
|
||||
)
|
||||
)
|
||||
assigned_issues_count = issues_count_result.scalar_one()
|
||||
|
||||
return {
|
||||
"instance": instance,
|
||||
"agent_type_name": instance.agent_type.name
|
||||
if instance.agent_type
|
||||
else None,
|
||||
"agent_type_slug": instance.agent_type.slug
|
||||
if instance.agent_type
|
||||
else None,
|
||||
"project_name": instance.project.name if instance.project else None,
|
||||
"project_slug": instance.project.slug if instance.project else None,
|
||||
"assigned_issues_count": assigned_issues_count,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent instance with details {instance_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: AgentStatus | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[AgentInstance], int]:
|
||||
"""Get agent instances for a specific project."""
|
||||
try:
|
||||
query = select(AgentInstance).where(AgentInstance.project_id == project_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(AgentInstance.status == status)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply pagination
|
||||
query = query.order_by(AgentInstance.created_at.desc())
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
instances = list(result.scalars().all())
|
||||
|
||||
return instances, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting instances by project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_agent_type(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
status: AgentStatus | None = None,
|
||||
) -> list[AgentInstance]:
|
||||
"""Get all instances of a specific agent type."""
|
||||
try:
|
||||
query = select(AgentInstance).where(
|
||||
AgentInstance.agent_type_id == agent_type_id
|
||||
)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(AgentInstance.status == status)
|
||||
|
||||
query = query.order_by(AgentInstance.created_at.desc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting instances by agent type {agent_type_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def update_status(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
status: AgentStatus,
|
||||
current_task: str | None = None,
|
||||
) -> AgentInstance | None:
|
||||
"""Update the status of an agent instance."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentInstance).where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
instance.status = status
|
||||
instance.last_activity_at = datetime.now(UTC)
|
||||
if current_task is not None:
|
||||
instance.current_task = current_task
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(instance)
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error updating instance status {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def terminate(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
) -> AgentInstance | None:
|
||||
"""Terminate an agent instance.
|
||||
|
||||
Also unassigns all issues from this agent to prevent orphaned assignments.
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentInstance).where(AgentInstance.id == instance_id)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
# Unassign all issues from this agent before terminating
|
||||
await db.execute(
|
||||
update(Issue)
|
||||
.where(Issue.assigned_agent_id == instance_id)
|
||||
.values(assigned_agent_id=None)
|
||||
)
|
||||
|
||||
instance.status = AgentStatus.TERMINATED
|
||||
instance.terminated_at = datetime.now(UTC)
|
||||
instance.current_task = None
|
||||
instance.session_id = None
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(instance)
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error terminating instance {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def record_task_completion(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
instance_id: UUID,
|
||||
tokens_used: int,
|
||||
cost_incurred: Decimal,
|
||||
) -> AgentInstance | None:
|
||||
"""Record a completed task and update metrics.
|
||||
|
||||
Uses atomic SQL UPDATE to prevent lost updates under concurrent load.
|
||||
This avoids the read-modify-write race condition that occurs when
|
||||
multiple task completions happen simultaneously.
|
||||
"""
|
||||
try:
|
||||
now = datetime.now(UTC)
|
||||
|
||||
# Use atomic SQL UPDATE to increment counters without race conditions
|
||||
# This is safe for concurrent updates - no read-modify-write pattern
|
||||
result = await db.execute(
|
||||
update(AgentInstance)
|
||||
.where(AgentInstance.id == instance_id)
|
||||
.values(
|
||||
tasks_completed=AgentInstance.tasks_completed + 1,
|
||||
tokens_used=AgentInstance.tokens_used + tokens_used,
|
||||
cost_incurred=AgentInstance.cost_incurred + cost_incurred,
|
||||
last_activity_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
.returning(AgentInstance)
|
||||
)
|
||||
instance = result.scalar_one_or_none()
|
||||
|
||||
if not instance:
|
||||
return None
|
||||
|
||||
await db.commit()
|
||||
return instance
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error recording task completion {instance_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_project_metrics(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any]:
|
||||
"""Get aggregated metrics for all agents in a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(
|
||||
func.count(AgentInstance.id).label("total_instances"),
|
||||
func.count(AgentInstance.id)
|
||||
.filter(AgentInstance.status == AgentStatus.WORKING)
|
||||
.label("active_instances"),
|
||||
func.count(AgentInstance.id)
|
||||
.filter(AgentInstance.status == AgentStatus.IDLE)
|
||||
.label("idle_instances"),
|
||||
func.sum(AgentInstance.tasks_completed).label("total_tasks"),
|
||||
func.sum(AgentInstance.tokens_used).label("total_tokens"),
|
||||
func.sum(AgentInstance.cost_incurred).label("total_cost"),
|
||||
).where(AgentInstance.project_id == project_id)
|
||||
)
|
||||
row = result.one()
|
||||
|
||||
return {
|
||||
"total_instances": row.total_instances or 0,
|
||||
"active_instances": row.active_instances or 0,
|
||||
"idle_instances": row.idle_instances or 0,
|
||||
"total_tasks_completed": row.total_tasks or 0,
|
||||
"total_tokens_used": row.total_tokens or 0,
|
||||
"total_cost_incurred": row.total_cost or Decimal("0.0000"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting project metrics {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def bulk_terminate_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> int:
|
||||
"""Terminate all active instances in a project.
|
||||
|
||||
Also unassigns all issues from these agents to prevent orphaned assignments.
|
||||
"""
|
||||
try:
|
||||
# First, unassign all issues from agents in this project
|
||||
# Get all agent IDs that will be terminated
|
||||
agents_to_terminate = await db.execute(
|
||||
select(AgentInstance.id).where(
|
||||
AgentInstance.project_id == project_id,
|
||||
AgentInstance.status != AgentStatus.TERMINATED,
|
||||
)
|
||||
)
|
||||
agent_ids = [row[0] for row in agents_to_terminate.fetchall()]
|
||||
|
||||
# Unassign issues from these agents
|
||||
if agent_ids:
|
||||
await db.execute(
|
||||
update(Issue)
|
||||
.where(Issue.assigned_agent_id.in_(agent_ids))
|
||||
.values(assigned_agent_id=None)
|
||||
)
|
||||
|
||||
now = datetime.now(UTC)
|
||||
stmt = (
|
||||
update(AgentInstance)
|
||||
.where(
|
||||
AgentInstance.project_id == project_id,
|
||||
AgentInstance.status != AgentStatus.TERMINATED,
|
||||
)
|
||||
.values(
|
||||
status=AgentStatus.TERMINATED,
|
||||
terminated_at=now,
|
||||
current_task=None,
|
||||
session_id=None,
|
||||
updated_at=now,
|
||||
)
|
||||
)
|
||||
|
||||
result = await db.execute(stmt)
|
||||
await db.commit()
|
||||
|
||||
terminated_count = result.rowcount
|
||||
logger.info(
|
||||
f"Bulk terminated {terminated_count} instances in project {project_id}"
|
||||
)
|
||||
return terminated_count
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error bulk terminating instances for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
agent_instance = CRUDAgentInstance(AgentInstance)
|
||||
316
backend/app/crud/syndarix/agent_type.py
Normal file
316
backend/app/crud/syndarix/agent_type.py
Normal file
@@ -0,0 +1,316 @@
|
||||
# app/crud/syndarix/agent_type.py
|
||||
"""Async CRUD operations for AgentType model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, AgentType
|
||||
from app.schemas.syndarix import AgentTypeCreate, AgentTypeUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDAgentType(CRUDBase[AgentType, AgentTypeCreate, AgentTypeUpdate]):
|
||||
"""Async CRUD operations for AgentType model."""
|
||||
|
||||
async def get_by_slug(self, db: AsyncSession, *, slug: str) -> AgentType | None:
|
||||
"""Get agent type by slug."""
|
||||
try:
|
||||
result = await db.execute(select(AgentType).where(AgentType.slug == slug))
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent type by slug {slug}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: AgentTypeCreate) -> AgentType:
|
||||
"""Create a new agent type with error handling."""
|
||||
try:
|
||||
db_obj = AgentType(
|
||||
name=obj_in.name,
|
||||
slug=obj_in.slug,
|
||||
description=obj_in.description,
|
||||
expertise=obj_in.expertise,
|
||||
personality_prompt=obj_in.personality_prompt,
|
||||
primary_model=obj_in.primary_model,
|
||||
fallback_models=obj_in.fallback_models,
|
||||
model_params=obj_in.model_params,
|
||||
mcp_servers=obj_in.mcp_servers,
|
||||
tool_permissions=obj_in.tool_permissions,
|
||||
is_active=obj_in.is_active,
|
||||
# Category and display fields
|
||||
category=obj_in.category.value if obj_in.category else None,
|
||||
icon=obj_in.icon,
|
||||
color=obj_in.color,
|
||||
sort_order=obj_in.sort_order,
|
||||
typical_tasks=obj_in.typical_tasks,
|
||||
collaboration_hints=obj_in.collaboration_hints,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "slug" in error_msg.lower():
|
||||
logger.warning(f"Duplicate slug attempted: {obj_in.slug}")
|
||||
raise ValueError(f"Agent type with slug '{obj_in.slug}' already exists")
|
||||
logger.error(f"Integrity error creating agent type: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating agent type: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_multi_with_filters(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
is_active: bool | None = None,
|
||||
category: str | None = None,
|
||||
search: str | None = None,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[AgentType], int]:
|
||||
"""
|
||||
Get multiple agent types with filtering, searching, and sorting.
|
||||
|
||||
Returns:
|
||||
Tuple of (agent types list, total count)
|
||||
"""
|
||||
try:
|
||||
query = select(AgentType)
|
||||
|
||||
# Apply filters
|
||||
if is_active is not None:
|
||||
query = query.where(AgentType.is_active == is_active)
|
||||
|
||||
if category:
|
||||
query = query.where(AgentType.category == category)
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
AgentType.name.ilike(f"%{search}%"),
|
||||
AgentType.slug.ilike(f"%{search}%"),
|
||||
AgentType.description.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count before pagination
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(AgentType, sort_by, AgentType.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
agent_types = list(result.scalars().all())
|
||||
|
||||
return agent_types, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent types with filters: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_with_instance_count(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a single agent type with its instance count.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent_type and instance_count
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentType).where(AgentType.id == agent_type_id)
|
||||
)
|
||||
agent_type = result.scalar_one_or_none()
|
||||
|
||||
if not agent_type:
|
||||
return None
|
||||
|
||||
# Get instance count
|
||||
count_result = await db.execute(
|
||||
select(func.count(AgentInstance.id)).where(
|
||||
AgentInstance.agent_type_id == agent_type_id
|
||||
)
|
||||
)
|
||||
instance_count = count_result.scalar_one()
|
||||
|
||||
return {
|
||||
"agent_type": agent_type,
|
||||
"instance_count": instance_count,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent type with count {agent_type_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_instance_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
is_active: bool | None = None,
|
||||
category: str | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""
|
||||
Get agent types with instance counts in optimized queries.
|
||||
|
||||
Returns:
|
||||
Tuple of (list of dicts with agent_type and instance_count, total count)
|
||||
"""
|
||||
try:
|
||||
# Get filtered agent types
|
||||
agent_types, total = await self.get_multi_with_filters(
|
||||
db,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
is_active=is_active,
|
||||
category=category,
|
||||
search=search,
|
||||
)
|
||||
|
||||
if not agent_types:
|
||||
return [], 0
|
||||
|
||||
agent_type_ids = [at.id for at in agent_types]
|
||||
|
||||
# Get instance counts in bulk
|
||||
counts_result = await db.execute(
|
||||
select(
|
||||
AgentInstance.agent_type_id,
|
||||
func.count(AgentInstance.id).label("count"),
|
||||
)
|
||||
.where(AgentInstance.agent_type_id.in_(agent_type_ids))
|
||||
.group_by(AgentInstance.agent_type_id)
|
||||
)
|
||||
counts = {row.agent_type_id: row.count for row in counts_result}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"agent_type": agent_type,
|
||||
"instance_count": counts.get(agent_type.id, 0),
|
||||
}
|
||||
for agent_type in agent_types
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent types with counts: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_by_expertise(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
expertise: str,
|
||||
is_active: bool = True,
|
||||
) -> list[AgentType]:
|
||||
"""Get agent types that have a specific expertise."""
|
||||
try:
|
||||
# Use PostgreSQL JSONB contains operator
|
||||
query = select(AgentType).where(
|
||||
AgentType.expertise.contains([expertise.lower()]),
|
||||
AgentType.is_active == is_active,
|
||||
)
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting agent types by expertise {expertise}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def deactivate(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
agent_type_id: UUID,
|
||||
) -> AgentType | None:
|
||||
"""Deactivate an agent type (soft delete)."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(AgentType).where(AgentType.id == agent_type_id)
|
||||
)
|
||||
agent_type = result.scalar_one_or_none()
|
||||
|
||||
if not agent_type:
|
||||
return None
|
||||
|
||||
agent_type.is_active = False
|
||||
await db.commit()
|
||||
await db.refresh(agent_type)
|
||||
return agent_type
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error deactivating agent type {agent_type_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_grouped_by_category(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
is_active: bool = True,
|
||||
) -> dict[str, list[AgentType]]:
|
||||
"""
|
||||
Get agent types grouped by category, sorted by sort_order within each group.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
is_active: Filter by active status (default: True)
|
||||
|
||||
Returns:
|
||||
Dictionary mapping category to list of agent types
|
||||
"""
|
||||
try:
|
||||
query = (
|
||||
select(AgentType)
|
||||
.where(AgentType.is_active == is_active)
|
||||
.order_by(AgentType.category, AgentType.sort_order, AgentType.name)
|
||||
)
|
||||
result = await db.execute(query)
|
||||
agent_types = list(result.scalars().all())
|
||||
|
||||
# Group by category
|
||||
grouped: dict[str, list[AgentType]] = {}
|
||||
for at in agent_types:
|
||||
cat: str = str(at.category) if at.category else "uncategorized"
|
||||
if cat not in grouped:
|
||||
grouped[cat] = []
|
||||
grouped[cat].append(at)
|
||||
|
||||
return grouped
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting grouped agent types: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
agent_type = CRUDAgentType(AgentType)
|
||||
525
backend/app/crud/syndarix/issue.py
Normal file
525
backend/app/crud/syndarix/issue.py
Normal file
@@ -0,0 +1,525 @@
|
||||
# app/crud/syndarix/issue.py
|
||||
"""Async CRUD operations for Issue model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue
|
||||
from app.models.syndarix.enums import IssuePriority, IssueStatus, SyncStatus
|
||||
from app.schemas.syndarix import IssueCreate, IssueUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDIssue(CRUDBase[Issue, IssueCreate, IssueUpdate]):
|
||||
"""Async CRUD operations for Issue model."""
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: IssueCreate) -> Issue:
|
||||
"""Create a new issue with error handling."""
|
||||
try:
|
||||
db_obj = Issue(
|
||||
project_id=obj_in.project_id,
|
||||
title=obj_in.title,
|
||||
body=obj_in.body,
|
||||
status=obj_in.status,
|
||||
priority=obj_in.priority,
|
||||
labels=obj_in.labels,
|
||||
assigned_agent_id=obj_in.assigned_agent_id,
|
||||
human_assignee=obj_in.human_assignee,
|
||||
sprint_id=obj_in.sprint_id,
|
||||
story_points=obj_in.story_points,
|
||||
external_tracker_type=obj_in.external_tracker_type,
|
||||
external_issue_id=obj_in.external_issue_id,
|
||||
remote_url=obj_in.remote_url,
|
||||
external_issue_number=obj_in.external_issue_number,
|
||||
sync_status=SyncStatus.SYNCED,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating issue: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating issue: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get an issue with full details including related entity names.
|
||||
|
||||
Returns:
|
||||
Dictionary with issue and related entity details
|
||||
"""
|
||||
try:
|
||||
# Get issue with joined relationships
|
||||
result = await db.execute(
|
||||
select(Issue)
|
||||
.options(
|
||||
joinedload(Issue.project),
|
||||
joinedload(Issue.sprint),
|
||||
joinedload(Issue.assigned_agent).joinedload(
|
||||
AgentInstance.agent_type
|
||||
),
|
||||
)
|
||||
.where(Issue.id == issue_id)
|
||||
)
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
return {
|
||||
"issue": issue,
|
||||
"project_name": issue.project.name if issue.project else None,
|
||||
"project_slug": issue.project.slug if issue.project else None,
|
||||
"sprint_name": issue.sprint.name if issue.sprint else None,
|
||||
"assigned_agent_type_name": (
|
||||
issue.assigned_agent.agent_type.name
|
||||
if issue.assigned_agent and issue.assigned_agent.agent_type
|
||||
else None
|
||||
),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue with details {issue_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: IssueStatus | None = None,
|
||||
priority: IssuePriority | None = None,
|
||||
sprint_id: UUID | None = None,
|
||||
assigned_agent_id: UUID | None = None,
|
||||
labels: list[str] | None = None,
|
||||
search: str | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[Issue], int]:
|
||||
"""Get issues for a specific project with filters."""
|
||||
try:
|
||||
query = select(Issue).where(Issue.project_id == project_id)
|
||||
|
||||
# Apply filters
|
||||
if status is not None:
|
||||
query = query.where(Issue.status == status)
|
||||
|
||||
if priority is not None:
|
||||
query = query.where(Issue.priority == priority)
|
||||
|
||||
if sprint_id is not None:
|
||||
query = query.where(Issue.sprint_id == sprint_id)
|
||||
|
||||
if assigned_agent_id is not None:
|
||||
query = query.where(Issue.assigned_agent_id == assigned_agent_id)
|
||||
|
||||
if labels:
|
||||
# Match any of the provided labels
|
||||
for label in labels:
|
||||
query = query.where(Issue.labels.contains([label.lower()]))
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
Issue.title.ilike(f"%{search}%"),
|
||||
Issue.body.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(Issue, sort_by, Issue.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
issues = list(result.scalars().all())
|
||||
|
||||
return issues, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issues by project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
status: IssueStatus | None = None,
|
||||
) -> list[Issue]:
|
||||
"""Get all issues in a sprint."""
|
||||
try:
|
||||
query = select(Issue).where(Issue.sprint_id == sprint_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Issue.status == status)
|
||||
|
||||
query = query.order_by(Issue.priority.desc(), Issue.created_at.asc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issues by sprint {sprint_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def assign_to_agent(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
agent_id: UUID | None,
|
||||
) -> Issue | None:
|
||||
"""Assign an issue to an agent (or unassign if agent_id is None)."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.assigned_agent_id = agent_id
|
||||
issue.human_assignee = None # Clear human assignee when assigning to agent
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error assigning issue {issue_id} to agent {agent_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def assign_to_human(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
human_assignee: str | None,
|
||||
) -> Issue | None:
|
||||
"""Assign an issue to a human (or unassign if human_assignee is None)."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.human_assignee = human_assignee
|
||||
issue.assigned_agent_id = None # Clear agent when assigning to human
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error assigning issue {issue_id} to human {human_assignee}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def close_issue(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Close an issue by setting status and closed_at timestamp."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.status = IssueStatus.CLOSED
|
||||
issue.closed_at = datetime.now(UTC)
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error closing issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def reopen_issue(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Reopen a closed issue."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.status = IssueStatus.OPEN
|
||||
issue.closed_at = None
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error reopening issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def update_sync_status(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
sync_status: SyncStatus,
|
||||
last_synced_at: datetime | None = None,
|
||||
external_updated_at: datetime | None = None,
|
||||
) -> Issue | None:
|
||||
"""Update the sync status of an issue."""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.sync_status = sync_status
|
||||
if last_synced_at:
|
||||
issue.last_synced_at = last_synced_at
|
||||
if external_updated_at:
|
||||
issue.external_updated_at = external_updated_at
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error updating sync status for issue {issue_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_project_stats(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any]:
|
||||
"""Get issue statistics for a project."""
|
||||
try:
|
||||
# Get counts by status
|
||||
status_counts = await db.execute(
|
||||
select(Issue.status, func.count(Issue.id).label("count"))
|
||||
.where(Issue.project_id == project_id)
|
||||
.group_by(Issue.status)
|
||||
)
|
||||
by_status = {row.status.value: row.count for row in status_counts}
|
||||
|
||||
# Get counts by priority
|
||||
priority_counts = await db.execute(
|
||||
select(Issue.priority, func.count(Issue.id).label("count"))
|
||||
.where(Issue.project_id == project_id)
|
||||
.group_by(Issue.priority)
|
||||
)
|
||||
by_priority = {row.priority.value: row.count for row in priority_counts}
|
||||
|
||||
# Get story points
|
||||
points_result = await db.execute(
|
||||
select(
|
||||
func.sum(Issue.story_points).label("total"),
|
||||
func.sum(Issue.story_points)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
).where(Issue.project_id == project_id)
|
||||
)
|
||||
points_row = points_result.one()
|
||||
|
||||
total_issues = sum(by_status.values())
|
||||
|
||||
return {
|
||||
"total": total_issues,
|
||||
"open": by_status.get("open", 0),
|
||||
"in_progress": by_status.get("in_progress", 0),
|
||||
"in_review": by_status.get("in_review", 0),
|
||||
"blocked": by_status.get("blocked", 0),
|
||||
"closed": by_status.get("closed", 0),
|
||||
"by_priority": by_priority,
|
||||
"total_story_points": points_row.total,
|
||||
"completed_story_points": points_row.completed,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue stats for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_external_id(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
external_tracker_type: str,
|
||||
external_issue_id: str,
|
||||
) -> Issue | None:
|
||||
"""Get an issue by its external tracker ID."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Issue).where(
|
||||
Issue.external_tracker_type == external_tracker_type,
|
||||
Issue.external_issue_id == external_issue_id,
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting issue by external ID {external_tracker_type}:{external_issue_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_pending_sync(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID | None = None,
|
||||
limit: int = 100,
|
||||
) -> list[Issue]:
|
||||
"""Get issues that need to be synced with external tracker."""
|
||||
try:
|
||||
query = select(Issue).where(
|
||||
Issue.external_tracker_type.isnot(None),
|
||||
Issue.sync_status.in_([SyncStatus.PENDING, SyncStatus.ERROR]),
|
||||
)
|
||||
|
||||
if project_id:
|
||||
query = query.where(Issue.project_id == project_id)
|
||||
|
||||
query = query.order_by(Issue.updated_at.asc()).limit(limit)
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting pending sync issues: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def remove_sprint_from_issues(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> int:
|
||||
"""Remove sprint assignment from all issues in a sprint.
|
||||
|
||||
Used when deleting a sprint to clean up references.
|
||||
|
||||
Returns:
|
||||
Number of issues updated
|
||||
"""
|
||||
try:
|
||||
from sqlalchemy import update
|
||||
|
||||
result = await db.execute(
|
||||
update(Issue).where(Issue.sprint_id == sprint_id).values(sprint_id=None)
|
||||
)
|
||||
await db.commit()
|
||||
return result.rowcount
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error removing sprint {sprint_id} from issues: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def unassign(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Remove agent assignment from an issue.
|
||||
|
||||
Returns:
|
||||
Updated issue or None if not found
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.assigned_agent_id = None
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error unassigning issue {issue_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def remove_from_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
issue_id: UUID,
|
||||
) -> Issue | None:
|
||||
"""Remove an issue from its current sprint.
|
||||
|
||||
Returns:
|
||||
Updated issue or None if not found
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(select(Issue).where(Issue.id == issue_id))
|
||||
issue = result.scalar_one_or_none()
|
||||
|
||||
if not issue:
|
||||
return None
|
||||
|
||||
issue.sprint_id = None
|
||||
await db.commit()
|
||||
await db.refresh(issue)
|
||||
return issue
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
f"Error removing issue {issue_id} from sprint: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
issue = CRUDIssue(Issue)
|
||||
362
backend/app/crud/syndarix/project.py
Normal file
362
backend/app/crud/syndarix/project.py
Normal file
@@ -0,0 +1,362 @@
|
||||
# app/crud/syndarix/project.py
|
||||
"""Async CRUD operations for Project model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, or_, select, update
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import AgentInstance, Issue, Project, Sprint
|
||||
from app.models.syndarix.enums import AgentStatus, ProjectStatus, SprintStatus
|
||||
from app.schemas.syndarix import ProjectCreate, ProjectUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDProject(CRUDBase[Project, ProjectCreate, ProjectUpdate]):
|
||||
"""Async CRUD operations for Project model."""
|
||||
|
||||
async def get_by_slug(self, db: AsyncSession, *, slug: str) -> Project | None:
|
||||
"""Get project by slug."""
|
||||
try:
|
||||
result = await db.execute(select(Project).where(Project.slug == slug))
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project by slug {slug}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: ProjectCreate) -> Project:
|
||||
"""Create a new project with error handling."""
|
||||
try:
|
||||
db_obj = Project(
|
||||
name=obj_in.name,
|
||||
slug=obj_in.slug,
|
||||
description=obj_in.description,
|
||||
autonomy_level=obj_in.autonomy_level,
|
||||
status=obj_in.status,
|
||||
settings=obj_in.settings or {},
|
||||
owner_id=obj_in.owner_id,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "slug" in error_msg.lower():
|
||||
logger.warning(f"Duplicate slug attempted: {obj_in.slug}")
|
||||
raise ValueError(f"Project with slug '{obj_in.slug}' already exists")
|
||||
logger.error(f"Integrity error creating project: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating project: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_multi_with_filters(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
status: ProjectStatus | None = None,
|
||||
owner_id: UUID | None = None,
|
||||
search: str | None = None,
|
||||
sort_by: str = "created_at",
|
||||
sort_order: str = "desc",
|
||||
) -> tuple[list[Project], int]:
|
||||
"""
|
||||
Get multiple projects with filtering, searching, and sorting.
|
||||
|
||||
Returns:
|
||||
Tuple of (projects list, total count)
|
||||
"""
|
||||
try:
|
||||
query = select(Project)
|
||||
|
||||
# Apply filters
|
||||
if status is not None:
|
||||
query = query.where(Project.status == status)
|
||||
|
||||
if owner_id is not None:
|
||||
query = query.where(Project.owner_id == owner_id)
|
||||
|
||||
if search:
|
||||
search_filter = or_(
|
||||
Project.name.ilike(f"%{search}%"),
|
||||
Project.slug.ilike(f"%{search}%"),
|
||||
Project.description.ilike(f"%{search}%"),
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count before pagination
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
sort_column = getattr(Project, sort_by, Project.created_at)
|
||||
if sort_order == "desc":
|
||||
query = query.order_by(sort_column.desc())
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
projects = list(result.scalars().all())
|
||||
|
||||
return projects, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting projects with filters: {e!s}")
|
||||
raise
|
||||
|
||||
async def get_with_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a single project with agent and issue counts.
|
||||
|
||||
Returns:
|
||||
Dictionary with project, agent_count, issue_count, active_sprint_name
|
||||
"""
|
||||
try:
|
||||
# Get project
|
||||
result = await db.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
|
||||
if not project:
|
||||
return None
|
||||
|
||||
# Get agent count
|
||||
agent_count_result = await db.execute(
|
||||
select(func.count(AgentInstance.id)).where(
|
||||
AgentInstance.project_id == project_id
|
||||
)
|
||||
)
|
||||
agent_count = agent_count_result.scalar_one()
|
||||
|
||||
# Get issue count
|
||||
issue_count_result = await db.execute(
|
||||
select(func.count(Issue.id)).where(Issue.project_id == project_id)
|
||||
)
|
||||
issue_count = issue_count_result.scalar_one()
|
||||
|
||||
# Get active sprint name
|
||||
active_sprint_result = await db.execute(
|
||||
select(Sprint.name).where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
active_sprint_name = active_sprint_result.scalar_one_or_none()
|
||||
|
||||
return {
|
||||
"project": project,
|
||||
"agent_count": agent_count,
|
||||
"issue_count": issue_count,
|
||||
"active_sprint_name": active_sprint_name,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting project with counts {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_multi_with_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
status: ProjectStatus | None = None,
|
||||
owner_id: UUID | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""
|
||||
Get projects with agent/issue counts in optimized queries.
|
||||
|
||||
Returns:
|
||||
Tuple of (list of dicts with project and counts, total count)
|
||||
"""
|
||||
try:
|
||||
# Get filtered projects
|
||||
projects, total = await self.get_multi_with_filters(
|
||||
db,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
status=status,
|
||||
owner_id=owner_id,
|
||||
search=search,
|
||||
)
|
||||
|
||||
if not projects:
|
||||
return [], 0
|
||||
|
||||
project_ids = [p.id for p in projects]
|
||||
|
||||
# Get agent counts in bulk
|
||||
agent_counts_result = await db.execute(
|
||||
select(
|
||||
AgentInstance.project_id,
|
||||
func.count(AgentInstance.id).label("count"),
|
||||
)
|
||||
.where(AgentInstance.project_id.in_(project_ids))
|
||||
.group_by(AgentInstance.project_id)
|
||||
)
|
||||
agent_counts = {row.project_id: row.count for row in agent_counts_result}
|
||||
|
||||
# Get issue counts in bulk
|
||||
issue_counts_result = await db.execute(
|
||||
select(
|
||||
Issue.project_id,
|
||||
func.count(Issue.id).label("count"),
|
||||
)
|
||||
.where(Issue.project_id.in_(project_ids))
|
||||
.group_by(Issue.project_id)
|
||||
)
|
||||
issue_counts = {row.project_id: row.count for row in issue_counts_result}
|
||||
|
||||
# Get active sprint names
|
||||
active_sprints_result = await db.execute(
|
||||
select(Sprint.project_id, Sprint.name).where(
|
||||
Sprint.project_id.in_(project_ids),
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
active_sprints = {row.project_id: row.name for row in active_sprints_result}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"project": project,
|
||||
"agent_count": agent_counts.get(project.id, 0),
|
||||
"issue_count": issue_counts.get(project.id, 0),
|
||||
"active_sprint_name": active_sprints.get(project.id),
|
||||
}
|
||||
for project in projects
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting projects with counts: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_projects_by_owner(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
owner_id: UUID,
|
||||
status: ProjectStatus | None = None,
|
||||
) -> list[Project]:
|
||||
"""Get all projects owned by a specific user."""
|
||||
try:
|
||||
query = select(Project).where(Project.owner_id == owner_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Project.status == status)
|
||||
|
||||
query = query.order_by(Project.created_at.desc())
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting projects by owner {owner_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def archive_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> Project | None:
|
||||
"""Archive a project by setting status to ARCHIVED.
|
||||
|
||||
This also performs cascading cleanup:
|
||||
- Terminates all active agent instances
|
||||
- Cancels all planned/active sprints
|
||||
- Unassigns issues from terminated agents
|
||||
"""
|
||||
try:
|
||||
result = await db.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
|
||||
if not project:
|
||||
return None
|
||||
|
||||
now = datetime.now(UTC)
|
||||
|
||||
# 1. Get all agent IDs that will be terminated
|
||||
agents_to_terminate = await db.execute(
|
||||
select(AgentInstance.id).where(
|
||||
AgentInstance.project_id == project_id,
|
||||
AgentInstance.status != AgentStatus.TERMINATED,
|
||||
)
|
||||
)
|
||||
agent_ids = [row[0] for row in agents_to_terminate.fetchall()]
|
||||
|
||||
# 2. Unassign issues from these agents to prevent orphaned assignments
|
||||
if agent_ids:
|
||||
await db.execute(
|
||||
update(Issue)
|
||||
.where(Issue.assigned_agent_id.in_(agent_ids))
|
||||
.values(assigned_agent_id=None)
|
||||
)
|
||||
|
||||
# 3. Terminate all active agents
|
||||
await db.execute(
|
||||
update(AgentInstance)
|
||||
.where(
|
||||
AgentInstance.project_id == project_id,
|
||||
AgentInstance.status != AgentStatus.TERMINATED,
|
||||
)
|
||||
.values(
|
||||
status=AgentStatus.TERMINATED,
|
||||
terminated_at=now,
|
||||
current_task=None,
|
||||
session_id=None,
|
||||
updated_at=now,
|
||||
)
|
||||
)
|
||||
|
||||
# 4. Cancel all planned/active sprints
|
||||
await db.execute(
|
||||
update(Sprint)
|
||||
.where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status.in_([SprintStatus.PLANNED, SprintStatus.ACTIVE]),
|
||||
)
|
||||
.values(
|
||||
status=SprintStatus.CANCELLED,
|
||||
updated_at=now,
|
||||
)
|
||||
)
|
||||
|
||||
# 5. Archive the project
|
||||
project.status = ProjectStatus.ARCHIVED
|
||||
await db.commit()
|
||||
await db.refresh(project)
|
||||
|
||||
logger.info(
|
||||
f"Archived project {project_id}: terminated agents={len(agent_ids)}"
|
||||
)
|
||||
|
||||
return project
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error archiving project {project_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
project = CRUDProject(Project)
|
||||
439
backend/app/crud/syndarix/sprint.py
Normal file
439
backend/app/crud/syndarix/sprint.py
Normal file
@@ -0,0 +1,439 @@
|
||||
# app/crud/syndarix/sprint.py
|
||||
"""Async CRUD operations for Sprint model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import date
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.syndarix import Issue, Sprint
|
||||
from app.models.syndarix.enums import IssueStatus, SprintStatus
|
||||
from app.schemas.syndarix import SprintCreate, SprintUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CRUDSprint(CRUDBase[Sprint, SprintCreate, SprintUpdate]):
|
||||
"""Async CRUD operations for Sprint model."""
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: SprintCreate) -> Sprint:
|
||||
"""Create a new sprint with error handling."""
|
||||
try:
|
||||
db_obj = Sprint(
|
||||
project_id=obj_in.project_id,
|
||||
name=obj_in.name,
|
||||
number=obj_in.number,
|
||||
goal=obj_in.goal,
|
||||
start_date=obj_in.start_date,
|
||||
end_date=obj_in.end_date,
|
||||
status=obj_in.status,
|
||||
planned_points=obj_in.planned_points,
|
||||
velocity=obj_in.velocity,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error(f"Integrity error creating sprint: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Unexpected error creating sprint: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_with_details(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get a sprint with full details including issue counts.
|
||||
|
||||
Returns:
|
||||
Dictionary with sprint and related details
|
||||
"""
|
||||
try:
|
||||
# Get sprint with joined project
|
||||
result = await db.execute(
|
||||
select(Sprint)
|
||||
.options(joinedload(Sprint.project))
|
||||
.where(Sprint.id == sprint_id)
|
||||
)
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
# Get issue counts
|
||||
issue_counts = await db.execute(
|
||||
select(
|
||||
func.count(Issue.id).label("total"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.OPEN)
|
||||
.label("open"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
).where(Issue.sprint_id == sprint_id)
|
||||
)
|
||||
counts = issue_counts.one()
|
||||
|
||||
return {
|
||||
"sprint": sprint,
|
||||
"project_name": sprint.project.name if sprint.project else None,
|
||||
"project_slug": sprint.project.slug if sprint.project else None,
|
||||
"issue_count": counts.total,
|
||||
"open_issues": counts.open,
|
||||
"completed_issues": counts.completed,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprint with details {sprint_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_project(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
status: SprintStatus | None = None,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[Sprint], int]:
|
||||
"""Get sprints for a specific project."""
|
||||
try:
|
||||
query = select(Sprint).where(Sprint.project_id == project_id)
|
||||
|
||||
if status is not None:
|
||||
query = query.where(Sprint.status == status)
|
||||
|
||||
# Get total count
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting (by number descending - newest first)
|
||||
query = query.order_by(Sprint.number.desc())
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
sprints = list(result.scalars().all())
|
||||
|
||||
return sprints, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprints by project {project_id}: {e!s}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_active_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Get the currently active sprint for a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Sprint).where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting active sprint for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_next_sprint_number(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
) -> int:
|
||||
"""Get the next sprint number for a project."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(func.max(Sprint.number)).where(Sprint.project_id == project_id)
|
||||
)
|
||||
max_number = result.scalar_one_or_none()
|
||||
return (max_number or 0) + 1
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting next sprint number for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def start_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
start_date: date | None = None,
|
||||
) -> Sprint | None:
|
||||
"""Start a planned sprint.
|
||||
|
||||
Uses row-level locking (SELECT FOR UPDATE) to prevent race conditions
|
||||
when multiple requests try to start sprints concurrently.
|
||||
"""
|
||||
try:
|
||||
# Lock the sprint row to prevent concurrent modifications
|
||||
result = await db.execute(
|
||||
select(Sprint).where(Sprint.id == sprint_id).with_for_update()
|
||||
)
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status != SprintStatus.PLANNED:
|
||||
raise ValueError(
|
||||
f"Cannot start sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
# Check for existing active sprint with lock to prevent race condition
|
||||
# Lock all sprints for this project to ensure atomic check-and-update
|
||||
active_check = await db.execute(
|
||||
select(Sprint)
|
||||
.where(
|
||||
Sprint.project_id == sprint.project_id,
|
||||
Sprint.status == SprintStatus.ACTIVE,
|
||||
)
|
||||
.with_for_update()
|
||||
)
|
||||
active_sprint = active_check.scalar_one_or_none()
|
||||
if active_sprint:
|
||||
raise ValueError(
|
||||
f"Project already has an active sprint: {active_sprint.name}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.ACTIVE
|
||||
if start_date:
|
||||
sprint.start_date = start_date
|
||||
|
||||
# Calculate planned points from issues
|
||||
points_result = await db.execute(
|
||||
select(func.sum(Issue.story_points)).where(Issue.sprint_id == sprint_id)
|
||||
)
|
||||
sprint.planned_points = points_result.scalar_one_or_none() or 0
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error starting sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def complete_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Complete an active sprint and calculate completed points.
|
||||
|
||||
Uses row-level locking (SELECT FOR UPDATE) to prevent race conditions
|
||||
when velocity is being calculated and other operations might modify issues.
|
||||
"""
|
||||
try:
|
||||
# Lock the sprint row to prevent concurrent modifications
|
||||
result = await db.execute(
|
||||
select(Sprint).where(Sprint.id == sprint_id).with_for_update()
|
||||
)
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status != SprintStatus.ACTIVE:
|
||||
raise ValueError(
|
||||
f"Cannot complete sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.COMPLETED
|
||||
|
||||
# Calculate velocity (completed points) from closed issues
|
||||
# Note: Issues are not locked, but sprint lock ensures this sprint's
|
||||
# completion is atomic and prevents concurrent completion attempts
|
||||
points_result = await db.execute(
|
||||
select(func.sum(Issue.story_points)).where(
|
||||
Issue.sprint_id == sprint_id,
|
||||
Issue.status == IssueStatus.CLOSED,
|
||||
)
|
||||
)
|
||||
sprint.velocity = points_result.scalar_one_or_none() or 0
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error completing sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def cancel_sprint(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
sprint_id: UUID,
|
||||
) -> Sprint | None:
|
||||
"""Cancel a sprint (only PLANNED or ACTIVE sprints can be cancelled).
|
||||
|
||||
Uses row-level locking to prevent race conditions with concurrent
|
||||
sprint status modifications.
|
||||
"""
|
||||
try:
|
||||
# Lock the sprint row to prevent concurrent modifications
|
||||
result = await db.execute(
|
||||
select(Sprint).where(Sprint.id == sprint_id).with_for_update()
|
||||
)
|
||||
sprint = result.scalar_one_or_none()
|
||||
|
||||
if not sprint:
|
||||
return None
|
||||
|
||||
if sprint.status not in [SprintStatus.PLANNED, SprintStatus.ACTIVE]:
|
||||
raise ValueError(
|
||||
f"Cannot cancel sprint with status {sprint.status.value}"
|
||||
)
|
||||
|
||||
sprint.status = SprintStatus.CANCELLED
|
||||
await db.commit()
|
||||
await db.refresh(sprint)
|
||||
return sprint
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Error cancelling sprint {sprint_id}: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def get_velocity(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
limit: int = 5,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Get velocity data for completed sprints."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(Sprint)
|
||||
.where(
|
||||
Sprint.project_id == project_id,
|
||||
Sprint.status == SprintStatus.COMPLETED,
|
||||
)
|
||||
.order_by(Sprint.number.desc())
|
||||
.limit(limit)
|
||||
)
|
||||
sprints = list(result.scalars().all())
|
||||
|
||||
velocity_data = []
|
||||
for sprint in reversed(sprints): # Return in chronological order
|
||||
velocity_ratio = None
|
||||
if sprint.planned_points and sprint.planned_points > 0:
|
||||
velocity_ratio = (sprint.velocity or 0) / sprint.planned_points
|
||||
velocity_data.append(
|
||||
{
|
||||
"sprint_number": sprint.number,
|
||||
"sprint_name": sprint.name,
|
||||
"planned_points": sprint.planned_points,
|
||||
"velocity": sprint.velocity,
|
||||
"velocity_ratio": velocity_ratio,
|
||||
}
|
||||
)
|
||||
|
||||
return velocity_data
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting velocity for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_sprints_with_issue_counts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
project_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Get sprints with issue counts in optimized queries."""
|
||||
try:
|
||||
# Get sprints
|
||||
sprints, total = await self.get_by_project(
|
||||
db, project_id=project_id, skip=skip, limit=limit
|
||||
)
|
||||
|
||||
if not sprints:
|
||||
return [], 0
|
||||
|
||||
sprint_ids = [s.id for s in sprints]
|
||||
|
||||
# Get issue counts in bulk
|
||||
issue_counts = await db.execute(
|
||||
select(
|
||||
Issue.sprint_id,
|
||||
func.count(Issue.id).label("total"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.OPEN)
|
||||
.label("open"),
|
||||
func.count(Issue.id)
|
||||
.filter(Issue.status == IssueStatus.CLOSED)
|
||||
.label("completed"),
|
||||
)
|
||||
.where(Issue.sprint_id.in_(sprint_ids))
|
||||
.group_by(Issue.sprint_id)
|
||||
)
|
||||
counts_map = {
|
||||
row.sprint_id: {
|
||||
"issue_count": row.total,
|
||||
"open_issues": row.open,
|
||||
"completed_issues": row.completed,
|
||||
}
|
||||
for row in issue_counts
|
||||
}
|
||||
|
||||
# Combine results
|
||||
results = [
|
||||
{
|
||||
"sprint": sprint,
|
||||
**counts_map.get(
|
||||
sprint.id,
|
||||
{"issue_count": 0, "open_issues": 0, "completed_issues": 0},
|
||||
),
|
||||
}
|
||||
for sprint in sprints
|
||||
]
|
||||
|
||||
return results, total
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting sprints with counts for project {project_id}: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Create a singleton instance for use across the application
|
||||
sprint = CRUDSprint(Sprint)
|
||||
155
backend/app/repositories/user.py → backend/app/crud/user.py
Normal file → Executable file
155
backend/app/repositories/user.py → backend/app/crud/user.py
Normal file → Executable file
@@ -1,5 +1,5 @@
|
||||
# app/repositories/user.py
|
||||
"""Repository for User model async database operations using SQLAlchemy 2.0 patterns."""
|
||||
# app/crud/user_async.py
|
||||
"""Async CRUD operations for User model using SQLAlchemy 2.0 patterns."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
@@ -11,16 +11,15 @@ from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.auth import get_password_hash_async
|
||||
from app.core.repository_exceptions import DuplicateEntryError, InvalidInputError
|
||||
from app.crud.base import CRUDBase
|
||||
from app.models.user import User
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.users import UserCreate, UserUpdate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
"""Repository for User model."""
|
||||
class CRUDUser(CRUDBase[User, UserCreate, UserUpdate]):
|
||||
"""Async CRUD operations for User model."""
|
||||
|
||||
async def get_by_email(self, db: AsyncSession, *, email: str) -> User | None:
|
||||
"""Get user by email address."""
|
||||
@@ -28,12 +27,13 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
result = await db.execute(select(User).where(User.email == email))
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e:
|
||||
logger.error("Error getting user by email %s: %s", email, e)
|
||||
logger.error(f"Error getting user by email {email}: {e!s}")
|
||||
raise
|
||||
|
||||
async def create(self, db: AsyncSession, *, obj_in: UserCreate) -> User:
|
||||
"""Create a new user with async password hashing and error handling."""
|
||||
try:
|
||||
# Hash password asynchronously to avoid blocking event loop
|
||||
password_hash = await get_password_hash_async(obj_in.password)
|
||||
|
||||
db_obj = User(
|
||||
@@ -57,49 +57,13 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "email" in error_msg.lower():
|
||||
logger.warning("Duplicate email attempted: %s", obj_in.email)
|
||||
raise DuplicateEntryError(
|
||||
f"User with email {obj_in.email} already exists"
|
||||
)
|
||||
logger.error("Integrity error creating user: %s", error_msg)
|
||||
raise DuplicateEntryError(f"Database integrity error: {error_msg}")
|
||||
logger.warning(f"Duplicate email attempted: {obj_in.email}")
|
||||
raise ValueError(f"User with email {obj_in.email} already exists")
|
||||
logger.error(f"Integrity error creating user: {error_msg}")
|
||||
raise ValueError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Unexpected error creating user: %s", e)
|
||||
raise
|
||||
|
||||
async def create_oauth_user(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
email: str,
|
||||
first_name: str = "User",
|
||||
last_name: str | None = None,
|
||||
) -> User:
|
||||
"""Create a new passwordless user for OAuth sign-in."""
|
||||
try:
|
||||
db_obj = User(
|
||||
email=email,
|
||||
password_hash=None, # OAuth-only user
|
||||
first_name=first_name,
|
||||
last_name=last_name,
|
||||
is_active=True,
|
||||
is_superuser=False,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.flush() # Get user.id without committing
|
||||
return db_obj
|
||||
except IntegrityError as e:
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "email" in error_msg.lower():
|
||||
logger.warning("Duplicate email attempted: %s", email)
|
||||
raise DuplicateEntryError(f"User with email {email} already exists")
|
||||
logger.error("Integrity error creating OAuth user: %s", error_msg)
|
||||
raise DuplicateEntryError(f"Database integrity error: {error_msg}")
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Unexpected error creating OAuth user: %s", e)
|
||||
logger.error(f"Unexpected error creating user: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def update(
|
||||
@@ -111,6 +75,8 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
else:
|
||||
update_data = obj_in.model_dump(exclude_unset=True)
|
||||
|
||||
# Handle password separately if it exists in update data
|
||||
# Hash password asynchronously to avoid blocking event loop
|
||||
if "password" in update_data:
|
||||
update_data["password_hash"] = await get_password_hash_async(
|
||||
update_data["password"]
|
||||
@@ -119,15 +85,6 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
|
||||
return await super().update(db, db_obj=db_obj, obj_in=update_data)
|
||||
|
||||
async def update_password(
|
||||
self, db: AsyncSession, *, user: User, password_hash: str
|
||||
) -> User:
|
||||
"""Set a new password hash on a user and commit."""
|
||||
user.password_hash = password_hash
|
||||
await db.commit()
|
||||
await db.refresh(user)
|
||||
return user
|
||||
|
||||
async def get_multi_with_total(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
@@ -139,23 +96,43 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
filters: dict[str, Any] | None = None,
|
||||
search: str | None = None,
|
||||
) -> tuple[list[User], int]:
|
||||
"""Get multiple users with total count, filtering, sorting, and search."""
|
||||
"""
|
||||
Get multiple users with total count, filtering, sorting, and search.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
sort_by: Field name to sort by
|
||||
sort_order: Sort order ("asc" or "desc")
|
||||
filters: Dictionary of filters (field_name: value)
|
||||
search: Search term to match against email, first_name, last_name
|
||||
|
||||
Returns:
|
||||
Tuple of (users list, total count)
|
||||
"""
|
||||
# Validate pagination
|
||||
if skip < 0:
|
||||
raise InvalidInputError("skip must be non-negative")
|
||||
raise ValueError("skip must be non-negative")
|
||||
if limit < 0:
|
||||
raise InvalidInputError("limit must be non-negative")
|
||||
raise ValueError("limit must be non-negative")
|
||||
if limit > 1000:
|
||||
raise InvalidInputError("Maximum limit is 1000")
|
||||
raise ValueError("Maximum limit is 1000")
|
||||
|
||||
try:
|
||||
# Build base query
|
||||
query = select(User)
|
||||
|
||||
# Exclude soft-deleted users
|
||||
query = query.where(User.deleted_at.is_(None))
|
||||
|
||||
# Apply filters
|
||||
if filters:
|
||||
for field, value in filters.items():
|
||||
if hasattr(User, field) and value is not None:
|
||||
query = query.where(getattr(User, field) == value)
|
||||
|
||||
# Apply search
|
||||
if search:
|
||||
search_filter = or_(
|
||||
User.email.ilike(f"%{search}%"),
|
||||
@@ -164,12 +141,14 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
)
|
||||
query = query.where(search_filter)
|
||||
|
||||
# Get total count
|
||||
from sqlalchemy import func
|
||||
|
||||
count_query = select(func.count()).select_from(query.alias())
|
||||
count_result = await db.execute(count_query)
|
||||
total = count_result.scalar_one()
|
||||
|
||||
# Apply sorting
|
||||
if sort_by and hasattr(User, sort_by):
|
||||
sort_column = getattr(User, sort_by)
|
||||
if sort_order.lower() == "desc":
|
||||
@@ -177,6 +156,7 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
else:
|
||||
query = query.order_by(sort_column.asc())
|
||||
|
||||
# Apply pagination
|
||||
query = query.offset(skip).limit(limit)
|
||||
result = await db.execute(query)
|
||||
users = list(result.scalars().all())
|
||||
@@ -184,21 +164,32 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
return users, total
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error retrieving paginated users: %s", e)
|
||||
logger.error(f"Error retrieving paginated users: {e!s}")
|
||||
raise
|
||||
|
||||
async def bulk_update_status(
|
||||
self, db: AsyncSession, *, user_ids: list[UUID], is_active: bool
|
||||
) -> int:
|
||||
"""Bulk update is_active status for multiple users."""
|
||||
"""
|
||||
Bulk update is_active status for multiple users.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_ids: List of user IDs to update
|
||||
is_active: New active status
|
||||
|
||||
Returns:
|
||||
Number of users updated
|
||||
"""
|
||||
try:
|
||||
if not user_ids:
|
||||
return 0
|
||||
|
||||
# Use UPDATE with WHERE IN for efficiency
|
||||
stmt = (
|
||||
update(User)
|
||||
.where(User.id.in_(user_ids))
|
||||
.where(User.deleted_at.is_(None))
|
||||
.where(User.deleted_at.is_(None)) # Don't update deleted users
|
||||
.values(is_active=is_active, updated_at=datetime.now(UTC))
|
||||
)
|
||||
|
||||
@@ -206,14 +197,12 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
await db.commit()
|
||||
|
||||
updated_count = result.rowcount
|
||||
logger.info(
|
||||
"Bulk updated %s users to is_active=%s", updated_count, is_active
|
||||
)
|
||||
logger.info(f"Bulk updated {updated_count} users to is_active={is_active}")
|
||||
return updated_count
|
||||
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error bulk updating user status: %s", e)
|
||||
logger.error(f"Error bulk updating user status: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def bulk_soft_delete(
|
||||
@@ -223,20 +212,34 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
user_ids: list[UUID],
|
||||
exclude_user_id: UUID | None = None,
|
||||
) -> int:
|
||||
"""Bulk soft delete multiple users."""
|
||||
"""
|
||||
Bulk soft delete multiple users.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_ids: List of user IDs to delete
|
||||
exclude_user_id: Optional user ID to exclude (e.g., the admin performing the action)
|
||||
|
||||
Returns:
|
||||
Number of users deleted
|
||||
"""
|
||||
try:
|
||||
if not user_ids:
|
||||
return 0
|
||||
|
||||
# Remove excluded user from list
|
||||
filtered_ids = [uid for uid in user_ids if uid != exclude_user_id]
|
||||
|
||||
if not filtered_ids:
|
||||
return 0
|
||||
|
||||
# Use UPDATE with WHERE IN for efficiency
|
||||
stmt = (
|
||||
update(User)
|
||||
.where(User.id.in_(filtered_ids))
|
||||
.where(User.deleted_at.is_(None))
|
||||
.where(
|
||||
User.deleted_at.is_(None)
|
||||
) # Don't re-delete already deleted users
|
||||
.values(
|
||||
deleted_at=datetime.now(UTC),
|
||||
is_active=False,
|
||||
@@ -248,22 +251,22 @@ class UserRepository(BaseRepository[User, UserCreate, UserUpdate]):
|
||||
await db.commit()
|
||||
|
||||
deleted_count = result.rowcount
|
||||
logger.info("Bulk soft deleted %s users", deleted_count)
|
||||
logger.info(f"Bulk soft deleted {deleted_count} users")
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.exception("Error bulk deleting users: %s", e)
|
||||
logger.error(f"Error bulk deleting users: {e!s}", exc_info=True)
|
||||
raise
|
||||
|
||||
def is_active(self, user: User) -> bool:
|
||||
"""Check if user is active."""
|
||||
return bool(user.is_active)
|
||||
return user.is_active
|
||||
|
||||
def is_superuser(self, user: User) -> bool:
|
||||
"""Check if user is a superuser."""
|
||||
return bool(user.is_superuser)
|
||||
return user.is_superuser
|
||||
|
||||
|
||||
# Singleton instance
|
||||
user_repo = UserRepository(User)
|
||||
# Create a singleton instance for use across the application
|
||||
user = CRUDUser(User)
|
||||
@@ -3,27 +3,48 @@
|
||||
Async database initialization script.
|
||||
|
||||
Creates the first superuser if configured and doesn't already exist.
|
||||
Seeds default agent types (production data) and demo data (when DEMO_MODE is enabled).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from datetime import UTC, date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
from sqlalchemy import select, text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.database import SessionLocal, engine
|
||||
from app.crud.syndarix.agent_type import agent_type as agent_type_crud
|
||||
from app.crud.user import user as user_crud
|
||||
from app.models.organization import Organization
|
||||
from app.models.syndarix import AgentInstance, AgentType, Issue, Project, Sprint
|
||||
from app.models.syndarix.enums import (
|
||||
AgentStatus,
|
||||
AutonomyLevel,
|
||||
ClientMode,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
IssueType,
|
||||
ProjectComplexity,
|
||||
ProjectStatus,
|
||||
SprintStatus,
|
||||
)
|
||||
from app.models.user import User
|
||||
from app.models.user_organization import UserOrganization
|
||||
from app.repositories.user import user_repo as user_repo
|
||||
from app.schemas.syndarix import AgentTypeCreate
|
||||
from app.schemas.users import UserCreate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Data file paths
|
||||
DATA_DIR = Path(__file__).parent.parent / "data"
|
||||
DEFAULT_AGENT_TYPES_PATH = DATA_DIR / "default_agent_types.json"
|
||||
DEMO_DATA_PATH = DATA_DIR / "demo_data.json"
|
||||
|
||||
|
||||
async def init_db() -> User | None:
|
||||
"""
|
||||
@@ -44,43 +65,43 @@ async def init_db() -> User | None:
|
||||
if not settings.FIRST_SUPERUSER_EMAIL or not settings.FIRST_SUPERUSER_PASSWORD:
|
||||
logger.warning(
|
||||
"First superuser credentials not configured in settings. "
|
||||
"Using defaults: %s",
|
||||
superuser_email,
|
||||
f"Using defaults: {superuser_email}"
|
||||
)
|
||||
|
||||
async with SessionLocal() as session:
|
||||
try:
|
||||
# Check if superuser already exists
|
||||
existing_user = await user_repo.get_by_email(session, email=superuser_email)
|
||||
existing_user = await user_crud.get_by_email(session, email=superuser_email)
|
||||
|
||||
if existing_user:
|
||||
logger.info("Superuser already exists: %s", existing_user.email)
|
||||
return existing_user
|
||||
logger.info(f"Superuser already exists: {existing_user.email}")
|
||||
else:
|
||||
# Create superuser if doesn't exist
|
||||
user_in = UserCreate(
|
||||
email=superuser_email,
|
||||
password=superuser_password,
|
||||
first_name="Admin",
|
||||
last_name="User",
|
||||
is_superuser=True,
|
||||
)
|
||||
|
||||
# Create superuser if doesn't exist
|
||||
user_in = UserCreate(
|
||||
email=superuser_email,
|
||||
password=superuser_password,
|
||||
first_name="Admin",
|
||||
last_name="User",
|
||||
is_superuser=True,
|
||||
)
|
||||
existing_user = await user_crud.create(session, obj_in=user_in)
|
||||
await session.commit()
|
||||
await session.refresh(existing_user)
|
||||
logger.info(f"Created first superuser: {existing_user.email}")
|
||||
|
||||
user = await user_repo.create(session, obj_in=user_in)
|
||||
await session.commit()
|
||||
await session.refresh(user)
|
||||
# ALWAYS load default agent types (production data)
|
||||
await load_default_agent_types(session)
|
||||
|
||||
logger.info("Created first superuser: %s", user.email)
|
||||
|
||||
# Create demo data if in demo mode
|
||||
# Only load demo data if in demo mode
|
||||
if settings.DEMO_MODE:
|
||||
await load_demo_data(session)
|
||||
|
||||
return user
|
||||
return existing_user
|
||||
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
logger.error("Error initializing database: %s", e)
|
||||
logger.error(f"Error initializing database: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@@ -89,26 +110,96 @@ def _load_json_file(path: Path):
|
||||
return json.load(f)
|
||||
|
||||
|
||||
async def load_demo_data(session):
|
||||
"""Load demo data from JSON file."""
|
||||
demo_data_path = Path(__file__).parent / "core" / "demo_data.json"
|
||||
if not demo_data_path.exists():
|
||||
logger.warning("Demo data file not found: %s", demo_data_path)
|
||||
async def load_default_agent_types(session: AsyncSession) -> None:
|
||||
"""
|
||||
Load default agent types from JSON file.
|
||||
|
||||
These are production defaults - created only if they don't exist, never overwritten.
|
||||
This allows users to customize agent types without worrying about server restarts.
|
||||
"""
|
||||
if not DEFAULT_AGENT_TYPES_PATH.exists():
|
||||
logger.warning(
|
||||
f"Default agent types file not found: {DEFAULT_AGENT_TYPES_PATH}"
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
# Use asyncio.to_thread to avoid blocking the event loop
|
||||
data = await asyncio.to_thread(_load_json_file, demo_data_path)
|
||||
data = await asyncio.to_thread(_load_json_file, DEFAULT_AGENT_TYPES_PATH)
|
||||
|
||||
# Create Organizations
|
||||
org_map = {}
|
||||
for org_data in data.get("organizations", []):
|
||||
# Check if org exists
|
||||
result = await session.execute(
|
||||
text("SELECT * FROM organizations WHERE slug = :slug"),
|
||||
{"slug": org_data["slug"]},
|
||||
for agent_type_data in data:
|
||||
slug = agent_type_data["slug"]
|
||||
|
||||
# Check if agent type already exists
|
||||
existing = await agent_type_crud.get_by_slug(session, slug=slug)
|
||||
|
||||
if existing:
|
||||
logger.debug(f"Agent type already exists: {agent_type_data['name']}")
|
||||
continue
|
||||
|
||||
# Create the agent type
|
||||
agent_type_in = AgentTypeCreate(
|
||||
name=agent_type_data["name"],
|
||||
slug=slug,
|
||||
description=agent_type_data.get("description"),
|
||||
expertise=agent_type_data.get("expertise", []),
|
||||
personality_prompt=agent_type_data["personality_prompt"],
|
||||
primary_model=agent_type_data["primary_model"],
|
||||
fallback_models=agent_type_data.get("fallback_models", []),
|
||||
model_params=agent_type_data.get("model_params", {}),
|
||||
mcp_servers=agent_type_data.get("mcp_servers", []),
|
||||
tool_permissions=agent_type_data.get("tool_permissions", {}),
|
||||
is_active=agent_type_data.get("is_active", True),
|
||||
# Category and display fields
|
||||
category=agent_type_data.get("category"),
|
||||
icon=agent_type_data.get("icon", "bot"),
|
||||
color=agent_type_data.get("color", "#3B82F6"),
|
||||
sort_order=agent_type_data.get("sort_order", 0),
|
||||
typical_tasks=agent_type_data.get("typical_tasks", []),
|
||||
collaboration_hints=agent_type_data.get("collaboration_hints", []),
|
||||
)
|
||||
existing_org = result.first()
|
||||
|
||||
await agent_type_crud.create(session, obj_in=agent_type_in)
|
||||
logger.info(f"Created default agent type: {agent_type_data['name']}")
|
||||
|
||||
logger.info("Default agent types loaded successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading default agent types: {e}")
|
||||
raise
|
||||
|
||||
|
||||
async def load_demo_data(session: AsyncSession) -> None:
|
||||
"""
|
||||
Load demo data from JSON file.
|
||||
|
||||
Only runs when DEMO_MODE is enabled. Creates demo organizations, users,
|
||||
projects, sprints, agent instances, and issues.
|
||||
"""
|
||||
if not DEMO_DATA_PATH.exists():
|
||||
logger.warning(f"Demo data file not found: {DEMO_DATA_PATH}")
|
||||
return
|
||||
|
||||
try:
|
||||
data = await asyncio.to_thread(_load_json_file, DEMO_DATA_PATH)
|
||||
|
||||
# Build lookup maps for FK resolution
|
||||
org_map: dict[str, Organization] = {}
|
||||
user_map: dict[str, User] = {}
|
||||
project_map: dict[str, Project] = {}
|
||||
sprint_map: dict[str, Sprint] = {} # key: "project_slug:sprint_number"
|
||||
agent_type_map: dict[str, AgentType] = {}
|
||||
agent_instance_map: dict[
|
||||
str, AgentInstance
|
||||
] = {} # key: "project_slug:agent_name"
|
||||
|
||||
# ========================
|
||||
# 1. Create Organizations
|
||||
# ========================
|
||||
for org_data in data.get("organizations", []):
|
||||
org_result = await session.execute(
|
||||
select(Organization).where(Organization.slug == org_data["slug"])
|
||||
)
|
||||
existing_org = org_result.scalar_one_or_none()
|
||||
|
||||
if not existing_org:
|
||||
org = Organization(
|
||||
@@ -118,29 +209,20 @@ async def load_demo_data(session):
|
||||
is_active=True,
|
||||
)
|
||||
session.add(org)
|
||||
await session.flush() # Flush to get ID
|
||||
org_map[org.slug] = org
|
||||
logger.info("Created demo organization: %s", org.name)
|
||||
await session.flush()
|
||||
org_map[str(org.slug)] = org
|
||||
logger.info(f"Created demo organization: {org.name}")
|
||||
else:
|
||||
# We can't easily get the ORM object from raw SQL result for map without querying again or mapping
|
||||
# So let's just query it properly if we need it for relationships
|
||||
# But for simplicity in this script, let's just assume we created it or it exists.
|
||||
# To properly map for users, we need the ID.
|
||||
# Let's use a simpler approach: just try to create, if slug conflict, skip.
|
||||
pass
|
||||
org_map[str(existing_org.slug)] = existing_org
|
||||
|
||||
# Re-query all orgs to build map for users
|
||||
result = await session.execute(select(Organization))
|
||||
orgs = result.scalars().all()
|
||||
org_map = {org.slug: org for org in orgs}
|
||||
|
||||
# Create Users
|
||||
# ========================
|
||||
# 2. Create Users
|
||||
# ========================
|
||||
for user_data in data.get("users", []):
|
||||
existing_user = await user_repo.get_by_email(
|
||||
existing_user = await user_crud.get_by_email(
|
||||
session, email=user_data["email"]
|
||||
)
|
||||
if not existing_user:
|
||||
# Create user
|
||||
user_in = UserCreate(
|
||||
email=user_data["email"],
|
||||
password=user_data["password"],
|
||||
@@ -149,20 +231,16 @@ async def load_demo_data(session):
|
||||
is_superuser=user_data["is_superuser"],
|
||||
is_active=user_data.get("is_active", True),
|
||||
)
|
||||
user = await user_repo.create(session, obj_in=user_in)
|
||||
user = await user_crud.create(session, obj_in=user_in)
|
||||
|
||||
# Randomize created_at for demo data (last 30 days)
|
||||
# This makes the charts look more realistic
|
||||
days_ago = random.randint(0, 30) # noqa: S311
|
||||
random_time = datetime.now(UTC) - timedelta(days=days_ago)
|
||||
# Add some random hours/minutes variation
|
||||
random_time = random_time.replace(
|
||||
hour=random.randint(0, 23), # noqa: S311
|
||||
minute=random.randint(0, 59), # noqa: S311
|
||||
)
|
||||
|
||||
# Update the timestamp and is_active directly in the database
|
||||
# We do this to ensure the values are persisted correctly
|
||||
await session.execute(
|
||||
text(
|
||||
"UPDATE users SET created_at = :created_at, is_active = :is_active WHERE id = :user_id"
|
||||
@@ -175,10 +253,7 @@ async def load_demo_data(session):
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Created demo user: %s (created %s days ago, active=%s)",
|
||||
user.email,
|
||||
days_ago,
|
||||
user_data.get("is_active", True),
|
||||
f"Created demo user: {user.email} (created {days_ago} days ago)"
|
||||
)
|
||||
|
||||
# Add to organization if specified
|
||||
@@ -186,20 +261,229 @@ async def load_demo_data(session):
|
||||
role = user_data.get("role")
|
||||
if org_slug and org_slug in org_map and role:
|
||||
org = org_map[org_slug]
|
||||
# Check if membership exists (it shouldn't for new user)
|
||||
member = UserOrganization(
|
||||
user_id=user.id, organization_id=org.id, role=role
|
||||
)
|
||||
session.add(member)
|
||||
logger.info("Added %s to %s as %s", user.email, org.name, role)
|
||||
logger.info(f"Added {user.email} to {org.name} as {role}")
|
||||
|
||||
user_map[str(user.email)] = user
|
||||
else:
|
||||
logger.info("Demo user already exists: %s", existing_user.email)
|
||||
user_map[str(existing_user.email)] = existing_user
|
||||
logger.debug(f"Demo user already exists: {existing_user.email}")
|
||||
|
||||
await session.flush()
|
||||
|
||||
# Add admin user to map with special "__admin__" key
|
||||
# This allows demo data to reference the admin user as owner
|
||||
superuser_email = settings.FIRST_SUPERUSER_EMAIL or "admin@example.com"
|
||||
admin_user = await user_crud.get_by_email(session, email=superuser_email)
|
||||
if admin_user:
|
||||
user_map["__admin__"] = admin_user
|
||||
user_map[str(admin_user.email)] = admin_user
|
||||
logger.debug(f"Added admin user to map: {admin_user.email}")
|
||||
|
||||
# ========================
|
||||
# 3. Load Agent Types Map (for FK resolution)
|
||||
# ========================
|
||||
agent_types_result = await session.execute(select(AgentType))
|
||||
for at in agent_types_result.scalars().all():
|
||||
agent_type_map[str(at.slug)] = at
|
||||
|
||||
# ========================
|
||||
# 4. Create Projects
|
||||
# ========================
|
||||
for project_data in data.get("projects", []):
|
||||
project_result = await session.execute(
|
||||
select(Project).where(Project.slug == project_data["slug"])
|
||||
)
|
||||
existing_project = project_result.scalar_one_or_none()
|
||||
|
||||
if not existing_project:
|
||||
# Resolve owner email to user ID
|
||||
owner_id = None
|
||||
owner_email = project_data.get("owner_email")
|
||||
if owner_email and owner_email in user_map:
|
||||
owner_id = user_map[owner_email].id
|
||||
|
||||
project = Project(
|
||||
name=project_data["name"],
|
||||
slug=project_data["slug"],
|
||||
description=project_data.get("description"),
|
||||
owner_id=owner_id,
|
||||
autonomy_level=AutonomyLevel(
|
||||
project_data.get("autonomy_level", "milestone")
|
||||
),
|
||||
status=ProjectStatus(project_data.get("status", "active")),
|
||||
complexity=ProjectComplexity(
|
||||
project_data.get("complexity", "medium")
|
||||
),
|
||||
client_mode=ClientMode(project_data.get("client_mode", "auto")),
|
||||
settings=project_data.get("settings", {}),
|
||||
)
|
||||
session.add(project)
|
||||
await session.flush()
|
||||
project_map[str(project.slug)] = project
|
||||
logger.info(f"Created demo project: {project.name}")
|
||||
else:
|
||||
project_map[str(existing_project.slug)] = existing_project
|
||||
logger.debug(f"Demo project already exists: {existing_project.name}")
|
||||
|
||||
# ========================
|
||||
# 5. Create Sprints
|
||||
# ========================
|
||||
for sprint_data in data.get("sprints", []):
|
||||
project_slug = sprint_data["project_slug"]
|
||||
sprint_number = sprint_data["number"]
|
||||
sprint_key = f"{project_slug}:{sprint_number}"
|
||||
|
||||
if project_slug not in project_map:
|
||||
logger.warning(f"Project not found for sprint: {project_slug}")
|
||||
continue
|
||||
|
||||
sprint_project = project_map[project_slug]
|
||||
|
||||
# Check if sprint exists
|
||||
sprint_result = await session.execute(
|
||||
select(Sprint).where(
|
||||
Sprint.project_id == sprint_project.id,
|
||||
Sprint.number == sprint_number,
|
||||
)
|
||||
)
|
||||
existing_sprint = sprint_result.scalar_one_or_none()
|
||||
|
||||
if not existing_sprint:
|
||||
sprint = Sprint(
|
||||
project_id=sprint_project.id,
|
||||
name=sprint_data["name"],
|
||||
number=sprint_number,
|
||||
goal=sprint_data.get("goal"),
|
||||
start_date=date.fromisoformat(sprint_data["start_date"]),
|
||||
end_date=date.fromisoformat(sprint_data["end_date"]),
|
||||
status=SprintStatus(sprint_data.get("status", "planned")),
|
||||
planned_points=sprint_data.get("planned_points"),
|
||||
)
|
||||
session.add(sprint)
|
||||
await session.flush()
|
||||
sprint_map[sprint_key] = sprint
|
||||
logger.info(
|
||||
f"Created demo sprint: {sprint.name} for {sprint_project.name}"
|
||||
)
|
||||
else:
|
||||
sprint_map[sprint_key] = existing_sprint
|
||||
logger.debug(f"Demo sprint already exists: {existing_sprint.name}")
|
||||
|
||||
# ========================
|
||||
# 6. Create Agent Instances
|
||||
# ========================
|
||||
for agent_data in data.get("agent_instances", []):
|
||||
project_slug = agent_data["project_slug"]
|
||||
agent_type_slug = agent_data["agent_type_slug"]
|
||||
agent_name = agent_data["name"]
|
||||
agent_key = f"{project_slug}:{agent_name}"
|
||||
|
||||
if project_slug not in project_map:
|
||||
logger.warning(f"Project not found for agent: {project_slug}")
|
||||
continue
|
||||
|
||||
if agent_type_slug not in agent_type_map:
|
||||
logger.warning(f"Agent type not found: {agent_type_slug}")
|
||||
continue
|
||||
|
||||
agent_project = project_map[project_slug]
|
||||
agent_type = agent_type_map[agent_type_slug]
|
||||
|
||||
# Check if agent instance exists (by name within project)
|
||||
agent_result = await session.execute(
|
||||
select(AgentInstance).where(
|
||||
AgentInstance.project_id == agent_project.id,
|
||||
AgentInstance.name == agent_name,
|
||||
)
|
||||
)
|
||||
existing_agent = agent_result.scalar_one_or_none()
|
||||
|
||||
if not existing_agent:
|
||||
agent_instance = AgentInstance(
|
||||
project_id=agent_project.id,
|
||||
agent_type_id=agent_type.id,
|
||||
name=agent_name,
|
||||
status=AgentStatus(agent_data.get("status", "idle")),
|
||||
current_task=agent_data.get("current_task"),
|
||||
)
|
||||
session.add(agent_instance)
|
||||
await session.flush()
|
||||
agent_instance_map[agent_key] = agent_instance
|
||||
logger.info(
|
||||
f"Created demo agent: {agent_name} ({agent_type.name}) "
|
||||
f"for {agent_project.name}"
|
||||
)
|
||||
else:
|
||||
agent_instance_map[agent_key] = existing_agent
|
||||
logger.debug(f"Demo agent already exists: {existing_agent.name}")
|
||||
|
||||
# ========================
|
||||
# 7. Create Issues
|
||||
# ========================
|
||||
for issue_data in data.get("issues", []):
|
||||
project_slug = issue_data["project_slug"]
|
||||
|
||||
if project_slug not in project_map:
|
||||
logger.warning(f"Project not found for issue: {project_slug}")
|
||||
continue
|
||||
|
||||
issue_project = project_map[project_slug]
|
||||
|
||||
# Check if issue exists (by title within project - simple heuristic)
|
||||
issue_result = await session.execute(
|
||||
select(Issue).where(
|
||||
Issue.project_id == issue_project.id,
|
||||
Issue.title == issue_data["title"],
|
||||
)
|
||||
)
|
||||
existing_issue = issue_result.scalar_one_or_none()
|
||||
|
||||
if not existing_issue:
|
||||
# Resolve sprint
|
||||
sprint_id = None
|
||||
sprint_number = issue_data.get("sprint_number")
|
||||
if sprint_number:
|
||||
sprint_key = f"{project_slug}:{sprint_number}"
|
||||
if sprint_key in sprint_map:
|
||||
sprint_id = sprint_map[sprint_key].id
|
||||
|
||||
# Resolve assigned agent
|
||||
assigned_agent_id = None
|
||||
assigned_agent_name = issue_data.get("assigned_agent_name")
|
||||
if assigned_agent_name:
|
||||
agent_key = f"{project_slug}:{assigned_agent_name}"
|
||||
if agent_key in agent_instance_map:
|
||||
assigned_agent_id = agent_instance_map[agent_key].id
|
||||
|
||||
issue = Issue(
|
||||
project_id=issue_project.id,
|
||||
sprint_id=sprint_id,
|
||||
type=IssueType(issue_data.get("type", "task")),
|
||||
title=issue_data["title"],
|
||||
body=issue_data.get("body", ""),
|
||||
status=IssueStatus(issue_data.get("status", "open")),
|
||||
priority=IssuePriority(issue_data.get("priority", "medium")),
|
||||
labels=issue_data.get("labels", []),
|
||||
story_points=issue_data.get("story_points"),
|
||||
assigned_agent_id=assigned_agent_id,
|
||||
)
|
||||
session.add(issue)
|
||||
logger.info(f"Created demo issue: {issue.title[:50]}...")
|
||||
else:
|
||||
logger.debug(
|
||||
f"Demo issue already exists: {existing_issue.title[:50]}..."
|
||||
)
|
||||
|
||||
await session.commit()
|
||||
logger.info("Demo data loaded successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error loading demo data: %s", e)
|
||||
await session.rollback()
|
||||
logger.error(f"Error loading demo data: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@@ -214,12 +498,12 @@ async def main():
|
||||
try:
|
||||
user = await init_db()
|
||||
if user:
|
||||
print("✓ Database initialized successfully")
|
||||
print(f"✓ Superuser: {user.email}")
|
||||
print("Database initialized successfully")
|
||||
print(f"Superuser: {user.email}")
|
||||
else:
|
||||
print("✗ Failed to initialize database")
|
||||
print("Failed to initialize database")
|
||||
except Exception as e:
|
||||
print(f"✗ Error initializing database: {e}")
|
||||
print(f"Error initializing database: {e}")
|
||||
raise
|
||||
finally:
|
||||
# Close the engine
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import UTC, datetime
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
@@ -16,7 +16,7 @@ from slowapi.util import get_remote_address
|
||||
from app.api.main import api_router
|
||||
from app.api.routes.oauth_provider import wellknown_router as oauth_wellknown_router
|
||||
from app.core.config import settings
|
||||
from app.core.database import check_database_health, close_async_db
|
||||
from app.core.database import check_database_health
|
||||
from app.core.exceptions import (
|
||||
APIException,
|
||||
api_exception_handler,
|
||||
@@ -72,7 +72,6 @@ async def lifespan(app: FastAPI):
|
||||
if os.getenv("IS_TEST", "False") != "True":
|
||||
scheduler.shutdown()
|
||||
logger.info("Scheduled jobs stopped")
|
||||
await close_async_db()
|
||||
|
||||
|
||||
logger.info("Starting app!!!")
|
||||
@@ -295,7 +294,7 @@ async def health_check() -> JSONResponse:
|
||||
"""
|
||||
health_status: dict[str, Any] = {
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.now(UTC).isoformat().replace("+00:00", "Z"),
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"version": settings.VERSION,
|
||||
"environment": settings.ENVIRONMENT,
|
||||
"checks": {},
|
||||
@@ -320,7 +319,7 @@ async def health_check() -> JSONResponse:
|
||||
"message": f"Database connection failed: {e!s}",
|
||||
}
|
||||
response_status = status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
logger.error("Health check failed - database error: %s", e)
|
||||
logger.error(f"Health check failed - database error: {e}")
|
||||
|
||||
return JSONResponse(status_code=response_status, content=health_status)
|
||||
|
||||
|
||||
@@ -8,6 +8,19 @@ from app.core.database import Base
|
||||
|
||||
from .base import TimestampMixin, UUIDMixin
|
||||
|
||||
# Memory system models
|
||||
from .memory import (
|
||||
ConsolidationStatus,
|
||||
ConsolidationType,
|
||||
Episode,
|
||||
EpisodeOutcome,
|
||||
Fact,
|
||||
MemoryConsolidationLog,
|
||||
Procedure,
|
||||
ScopeType,
|
||||
WorkingMemory,
|
||||
)
|
||||
|
||||
# OAuth models (client mode - authenticate via Google/GitHub)
|
||||
from .oauth_account import OAuthAccount
|
||||
|
||||
@@ -18,13 +31,33 @@ from .oauth_provider_token import OAuthConsent, OAuthProviderRefreshToken
|
||||
from .oauth_state import OAuthState
|
||||
from .organization import Organization
|
||||
|
||||
# Syndarix domain models
|
||||
from .syndarix import (
|
||||
AgentInstance,
|
||||
AgentType,
|
||||
Issue,
|
||||
Project,
|
||||
Sprint,
|
||||
)
|
||||
|
||||
# Import models
|
||||
from .user import User
|
||||
from .user_organization import OrganizationRole, UserOrganization
|
||||
from .user_session import UserSession
|
||||
|
||||
__all__ = [
|
||||
# Syndarix models
|
||||
"AgentInstance",
|
||||
"AgentType",
|
||||
"Base",
|
||||
# Memory models
|
||||
"ConsolidationStatus",
|
||||
"ConsolidationType",
|
||||
"Episode",
|
||||
"EpisodeOutcome",
|
||||
"Fact",
|
||||
"Issue",
|
||||
"MemoryConsolidationLog",
|
||||
"OAuthAccount",
|
||||
"OAuthAuthorizationCode",
|
||||
"OAuthClient",
|
||||
@@ -33,9 +66,14 @@ __all__ = [
|
||||
"OAuthState",
|
||||
"Organization",
|
||||
"OrganizationRole",
|
||||
"Procedure",
|
||||
"Project",
|
||||
"ScopeType",
|
||||
"Sprint",
|
||||
"TimestampMixin",
|
||||
"UUIDMixin",
|
||||
"User",
|
||||
"UserOrganization",
|
||||
"UserSession",
|
||||
"WorkingMemory",
|
||||
]
|
||||
|
||||
32
backend/app/models/memory/__init__.py
Normal file
32
backend/app/models/memory/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# app/models/memory/__init__.py
|
||||
"""
|
||||
Memory System Database Models.
|
||||
|
||||
Provides SQLAlchemy models for the Agent Memory System:
|
||||
- WorkingMemory: Key-value storage with TTL
|
||||
- Episode: Experiential memories
|
||||
- Fact: Semantic knowledge triples
|
||||
- Procedure: Learned skills
|
||||
- MemoryConsolidationLog: Consolidation job tracking
|
||||
"""
|
||||
|
||||
from .consolidation import MemoryConsolidationLog
|
||||
from .enums import ConsolidationStatus, ConsolidationType, EpisodeOutcome, ScopeType
|
||||
from .episode import Episode
|
||||
from .fact import Fact
|
||||
from .procedure import Procedure
|
||||
from .working_memory import WorkingMemory
|
||||
|
||||
__all__ = [
|
||||
# Enums
|
||||
"ConsolidationStatus",
|
||||
"ConsolidationType",
|
||||
# Models
|
||||
"Episode",
|
||||
"EpisodeOutcome",
|
||||
"Fact",
|
||||
"MemoryConsolidationLog",
|
||||
"Procedure",
|
||||
"ScopeType",
|
||||
"WorkingMemory",
|
||||
]
|
||||
72
backend/app/models/memory/consolidation.py
Normal file
72
backend/app/models/memory/consolidation.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# app/models/memory/consolidation.py
|
||||
"""
|
||||
Memory Consolidation Log database model.
|
||||
|
||||
Tracks memory consolidation jobs that transfer knowledge
|
||||
between memory tiers.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, DateTime, Enum, Index, Integer, Text
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import ConsolidationStatus, ConsolidationType
|
||||
|
||||
|
||||
class MemoryConsolidationLog(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Memory consolidation job log.
|
||||
|
||||
Tracks consolidation operations:
|
||||
- Working -> Episodic (session end)
|
||||
- Episodic -> Semantic (fact extraction)
|
||||
- Episodic -> Procedural (procedure learning)
|
||||
- Pruning (removing low-value memories)
|
||||
"""
|
||||
|
||||
__tablename__ = "memory_consolidation_log"
|
||||
|
||||
# Consolidation type
|
||||
consolidation_type: Column[ConsolidationType] = Column(
|
||||
Enum(ConsolidationType),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Counts
|
||||
source_count = Column(Integer, nullable=False, default=0)
|
||||
result_count = Column(Integer, nullable=False, default=0)
|
||||
|
||||
# Timing
|
||||
started_at = Column(DateTime(timezone=True), nullable=False)
|
||||
completed_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Status
|
||||
status: Column[ConsolidationStatus] = Column(
|
||||
Enum(ConsolidationStatus),
|
||||
nullable=False,
|
||||
default=ConsolidationStatus.PENDING,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Error details if failed
|
||||
error = Column(Text, nullable=True)
|
||||
|
||||
__table_args__ = (
|
||||
# Query patterns
|
||||
Index("ix_consolidation_type_status", "consolidation_type", "status"),
|
||||
Index("ix_consolidation_started", "started_at"),
|
||||
)
|
||||
|
||||
@property
|
||||
def duration_seconds(self) -> float | None:
|
||||
"""Calculate duration of the consolidation job."""
|
||||
if self.completed_at is None or self.started_at is None:
|
||||
return None
|
||||
return (self.completed_at - self.started_at).total_seconds()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<MemoryConsolidationLog {self.id} "
|
||||
f"type={self.consolidation_type.value} status={self.status.value}>"
|
||||
)
|
||||
73
backend/app/models/memory/enums.py
Normal file
73
backend/app/models/memory/enums.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# app/models/memory/enums.py
|
||||
"""
|
||||
Enums for Memory System database models.
|
||||
|
||||
These enums define the database-level constraints for memory types
|
||||
and scoping levels.
|
||||
"""
|
||||
|
||||
from enum import Enum as PyEnum
|
||||
|
||||
|
||||
class ScopeType(str, PyEnum):
|
||||
"""
|
||||
Memory scope levels matching the memory service types.
|
||||
|
||||
GLOBAL: System-wide memories accessible by all
|
||||
PROJECT: Project-scoped memories
|
||||
AGENT_TYPE: Type-specific memories (shared by instances of same type)
|
||||
AGENT_INSTANCE: Instance-specific memories
|
||||
SESSION: Session-scoped ephemeral memories
|
||||
"""
|
||||
|
||||
GLOBAL = "global"
|
||||
PROJECT = "project"
|
||||
AGENT_TYPE = "agent_type"
|
||||
AGENT_INSTANCE = "agent_instance"
|
||||
SESSION = "session"
|
||||
|
||||
|
||||
class EpisodeOutcome(str, PyEnum):
|
||||
"""
|
||||
Outcome of an episode (task execution).
|
||||
|
||||
SUCCESS: Task completed successfully
|
||||
FAILURE: Task failed
|
||||
PARTIAL: Task partially completed
|
||||
"""
|
||||
|
||||
SUCCESS = "success"
|
||||
FAILURE = "failure"
|
||||
PARTIAL = "partial"
|
||||
|
||||
|
||||
class ConsolidationType(str, PyEnum):
|
||||
"""
|
||||
Types of memory consolidation operations.
|
||||
|
||||
WORKING_TO_EPISODIC: Transfer session state to episodic
|
||||
EPISODIC_TO_SEMANTIC: Extract facts from episodes
|
||||
EPISODIC_TO_PROCEDURAL: Extract procedures from episodes
|
||||
PRUNING: Remove low-value memories
|
||||
"""
|
||||
|
||||
WORKING_TO_EPISODIC = "working_to_episodic"
|
||||
EPISODIC_TO_SEMANTIC = "episodic_to_semantic"
|
||||
EPISODIC_TO_PROCEDURAL = "episodic_to_procedural"
|
||||
PRUNING = "pruning"
|
||||
|
||||
|
||||
class ConsolidationStatus(str, PyEnum):
|
||||
"""
|
||||
Status of a consolidation job.
|
||||
|
||||
PENDING: Job is queued
|
||||
RUNNING: Job is currently executing
|
||||
COMPLETED: Job finished successfully
|
||||
FAILED: Job failed with errors
|
||||
"""
|
||||
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
139
backend/app/models/memory/episode.py
Normal file
139
backend/app/models/memory/episode.py
Normal file
@@ -0,0 +1,139 @@
|
||||
# app/models/memory/episode.py
|
||||
"""
|
||||
Episode database model.
|
||||
|
||||
Stores experiential memories - records of past task executions
|
||||
with context, actions, outcomes, and lessons learned.
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
BigInteger,
|
||||
CheckConstraint,
|
||||
Column,
|
||||
DateTime,
|
||||
Enum,
|
||||
Float,
|
||||
ForeignKey,
|
||||
Index,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import EpisodeOutcome
|
||||
|
||||
# Import pgvector type - will be available after migration enables extension
|
||||
try:
|
||||
from pgvector.sqlalchemy import Vector # type: ignore[import-not-found]
|
||||
except ImportError:
|
||||
# Fallback for environments without pgvector
|
||||
Vector = None
|
||||
|
||||
|
||||
class Episode(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Episodic memory model.
|
||||
|
||||
Records experiential memories from agent task execution:
|
||||
- What task was performed
|
||||
- What actions were taken
|
||||
- What was the outcome
|
||||
- What lessons were learned
|
||||
"""
|
||||
|
||||
__tablename__ = "episodes"
|
||||
|
||||
# Foreign keys
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
agent_instance_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("agent_instances.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
agent_type_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("agent_types.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Session reference
|
||||
session_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Task information
|
||||
task_type = Column(String(100), nullable=False, index=True)
|
||||
task_description = Column(Text, nullable=False)
|
||||
|
||||
# Actions taken (list of action dictionaries)
|
||||
actions = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Context summary
|
||||
context_summary = Column(Text, nullable=False)
|
||||
|
||||
# Outcome
|
||||
outcome: Column[EpisodeOutcome] = Column(
|
||||
Enum(EpisodeOutcome),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
outcome_details = Column(Text, nullable=True)
|
||||
|
||||
# Metrics
|
||||
duration_seconds = Column(Float, nullable=False, default=0.0)
|
||||
tokens_used = Column(BigInteger, nullable=False, default=0)
|
||||
|
||||
# Learning
|
||||
lessons_learned = Column(JSONB, default=list, nullable=False)
|
||||
importance_score = Column(Float, nullable=False, default=0.5, index=True)
|
||||
|
||||
# Vector embedding for semantic search
|
||||
# Using 1536 dimensions for OpenAI text-embedding-3-small
|
||||
embedding = Column(Vector(1536) if Vector else Text, nullable=True)
|
||||
|
||||
# When the episode occurred
|
||||
occurred_at = Column(DateTime(timezone=True), nullable=False, index=True)
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", foreign_keys=[project_id])
|
||||
agent_instance = relationship("AgentInstance", foreign_keys=[agent_instance_id])
|
||||
agent_type = relationship("AgentType", foreign_keys=[agent_type_id])
|
||||
|
||||
__table_args__ = (
|
||||
# Primary query patterns
|
||||
Index("ix_episodes_project_task", "project_id", "task_type"),
|
||||
Index("ix_episodes_project_outcome", "project_id", "outcome"),
|
||||
Index("ix_episodes_agent_task", "agent_instance_id", "task_type"),
|
||||
Index("ix_episodes_project_time", "project_id", "occurred_at"),
|
||||
# For importance-based pruning
|
||||
Index("ix_episodes_importance_time", "importance_score", "occurred_at"),
|
||||
# Data integrity constraints
|
||||
CheckConstraint(
|
||||
"importance_score >= 0.0 AND importance_score <= 1.0",
|
||||
name="ck_episodes_importance_range",
|
||||
),
|
||||
CheckConstraint(
|
||||
"duration_seconds >= 0.0",
|
||||
name="ck_episodes_duration_positive",
|
||||
),
|
||||
CheckConstraint(
|
||||
"tokens_used >= 0",
|
||||
name="ck_episodes_tokens_positive",
|
||||
),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Episode {self.id} task={self.task_type} outcome={self.outcome.value}>"
|
||||
120
backend/app/models/memory/fact.py
Normal file
120
backend/app/models/memory/fact.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# app/models/memory/fact.py
|
||||
"""
|
||||
Fact database model.
|
||||
|
||||
Stores semantic memories - learned facts in subject-predicate-object
|
||||
triple format with confidence scores and source tracking.
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
CheckConstraint,
|
||||
Column,
|
||||
DateTime,
|
||||
Float,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
String,
|
||||
Text,
|
||||
text,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
# Import pgvector type
|
||||
try:
|
||||
from pgvector.sqlalchemy import Vector # type: ignore[import-not-found]
|
||||
except ImportError:
|
||||
Vector = None
|
||||
|
||||
|
||||
class Fact(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Semantic memory model.
|
||||
|
||||
Stores learned facts as subject-predicate-object triples:
|
||||
- "FastAPI" - "uses" - "Starlette framework"
|
||||
- "Project Alpha" - "requires" - "OAuth authentication"
|
||||
|
||||
Facts have confidence scores that decay over time and can be
|
||||
reinforced when the same fact is learned again.
|
||||
"""
|
||||
|
||||
__tablename__ = "facts"
|
||||
|
||||
# Scoping: project_id is NULL for global facts
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Triple format
|
||||
subject = Column(String(500), nullable=False, index=True)
|
||||
predicate = Column(String(255), nullable=False, index=True)
|
||||
object = Column(Text, nullable=False)
|
||||
|
||||
# Confidence score (0.0 to 1.0)
|
||||
confidence = Column(Float, nullable=False, default=0.8, index=True)
|
||||
|
||||
# Source tracking: which episodes contributed to this fact (stored as JSONB array of UUID strings)
|
||||
source_episode_ids: Column[list] = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Learning history
|
||||
first_learned = Column(DateTime(timezone=True), nullable=False)
|
||||
last_reinforced = Column(DateTime(timezone=True), nullable=False)
|
||||
reinforcement_count = Column(Integer, nullable=False, default=1)
|
||||
|
||||
# Vector embedding for semantic search
|
||||
embedding = Column(Vector(1536) if Vector else Text, nullable=True)
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", foreign_keys=[project_id])
|
||||
|
||||
__table_args__ = (
|
||||
# Unique constraint on triple within project scope
|
||||
Index(
|
||||
"ix_facts_unique_triple",
|
||||
"project_id",
|
||||
"subject",
|
||||
"predicate",
|
||||
"object",
|
||||
unique=True,
|
||||
postgresql_where=text("project_id IS NOT NULL"),
|
||||
),
|
||||
# Unique constraint on triple for global facts (project_id IS NULL)
|
||||
Index(
|
||||
"ix_facts_unique_triple_global",
|
||||
"subject",
|
||||
"predicate",
|
||||
"object",
|
||||
unique=True,
|
||||
postgresql_where=text("project_id IS NULL"),
|
||||
),
|
||||
# Query patterns
|
||||
Index("ix_facts_subject_predicate", "subject", "predicate"),
|
||||
Index("ix_facts_project_subject", "project_id", "subject"),
|
||||
Index("ix_facts_confidence_time", "confidence", "last_reinforced"),
|
||||
# Note: subject already has index=True on Column definition, no need for explicit index
|
||||
# Data integrity constraints
|
||||
CheckConstraint(
|
||||
"confidence >= 0.0 AND confidence <= 1.0",
|
||||
name="ck_facts_confidence_range",
|
||||
),
|
||||
CheckConstraint(
|
||||
"reinforcement_count >= 1",
|
||||
name="ck_facts_reinforcement_positive",
|
||||
),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<Fact {self.id} '{self.subject}' - '{self.predicate}' - "
|
||||
f"'{self.object[:50]}...' conf={self.confidence:.2f}>"
|
||||
)
|
||||
129
backend/app/models/memory/procedure.py
Normal file
129
backend/app/models/memory/procedure.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# app/models/memory/procedure.py
|
||||
"""
|
||||
Procedure database model.
|
||||
|
||||
Stores procedural memories - learned skills and procedures
|
||||
derived from successful task execution patterns.
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
CheckConstraint,
|
||||
Column,
|
||||
DateTime,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
# Import pgvector type
|
||||
try:
|
||||
from pgvector.sqlalchemy import Vector # type: ignore[import-not-found]
|
||||
except ImportError:
|
||||
Vector = None
|
||||
|
||||
|
||||
class Procedure(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Procedural memory model.
|
||||
|
||||
Stores learned procedures (skills) extracted from successful
|
||||
task execution patterns:
|
||||
- Name and trigger pattern for matching
|
||||
- Step-by-step actions
|
||||
- Success/failure tracking
|
||||
"""
|
||||
|
||||
__tablename__ = "procedures"
|
||||
|
||||
# Scoping
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
agent_type_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("agent_types.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Procedure identification
|
||||
name = Column(String(255), nullable=False, index=True)
|
||||
trigger_pattern = Column(Text, nullable=False)
|
||||
|
||||
# Steps as JSON array of step objects
|
||||
# Each step: {order, action, parameters, expected_outcome, fallback_action}
|
||||
steps = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Success tracking
|
||||
success_count = Column(Integer, nullable=False, default=0)
|
||||
failure_count = Column(Integer, nullable=False, default=0)
|
||||
|
||||
# Usage tracking
|
||||
last_used = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
|
||||
# Vector embedding for semantic matching
|
||||
embedding = Column(Vector(1536) if Vector else Text, nullable=True)
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", foreign_keys=[project_id])
|
||||
agent_type = relationship("AgentType", foreign_keys=[agent_type_id])
|
||||
|
||||
__table_args__ = (
|
||||
# Unique procedure name within scope
|
||||
Index(
|
||||
"ix_procedures_unique_name",
|
||||
"project_id",
|
||||
"agent_type_id",
|
||||
"name",
|
||||
unique=True,
|
||||
),
|
||||
# Query patterns
|
||||
Index("ix_procedures_project_name", "project_id", "name"),
|
||||
# Note: agent_type_id already has index=True on Column definition
|
||||
# For finding best procedures
|
||||
Index("ix_procedures_success_rate", "success_count", "failure_count"),
|
||||
# Data integrity constraints
|
||||
CheckConstraint(
|
||||
"success_count >= 0",
|
||||
name="ck_procedures_success_positive",
|
||||
),
|
||||
CheckConstraint(
|
||||
"failure_count >= 0",
|
||||
name="ck_procedures_failure_positive",
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def success_rate(self) -> float:
|
||||
"""Calculate the success rate of this procedure."""
|
||||
# Snapshot values to avoid race conditions in concurrent access
|
||||
success = self.success_count
|
||||
failure = self.failure_count
|
||||
total = success + failure
|
||||
if total == 0:
|
||||
return 0.0
|
||||
return success / total
|
||||
|
||||
@property
|
||||
def total_uses(self) -> int:
|
||||
"""Get total number of times this procedure was used."""
|
||||
# Snapshot values for consistency
|
||||
return self.success_count + self.failure_count
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<Procedure {self.name} ({self.id}) success_rate={self.success_rate:.2%}>"
|
||||
)
|
||||
58
backend/app/models/memory/working_memory.py
Normal file
58
backend/app/models/memory/working_memory.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# app/models/memory/working_memory.py
|
||||
"""
|
||||
Working Memory database model.
|
||||
|
||||
Stores ephemeral key-value data for active sessions with TTL support.
|
||||
Used as database backup when Redis is unavailable.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, DateTime, Enum, Index, String
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import ScopeType
|
||||
|
||||
|
||||
class WorkingMemory(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Working memory storage table.
|
||||
|
||||
Provides database-backed working memory as fallback when
|
||||
Redis is unavailable. Supports TTL-based expiration.
|
||||
"""
|
||||
|
||||
__tablename__ = "working_memory"
|
||||
|
||||
# Scoping
|
||||
scope_type: Column[ScopeType] = Column(
|
||||
Enum(ScopeType),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
scope_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Key-value storage
|
||||
key = Column(String(255), nullable=False)
|
||||
value = Column(JSONB, nullable=False)
|
||||
|
||||
# TTL support
|
||||
expires_at = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
|
||||
__table_args__ = (
|
||||
# Primary lookup: scope + key
|
||||
Index(
|
||||
"ix_working_memory_scope_key",
|
||||
"scope_type",
|
||||
"scope_id",
|
||||
"key",
|
||||
unique=True,
|
||||
),
|
||||
# For cleanup of expired entries
|
||||
Index("ix_working_memory_expires", "expires_at"),
|
||||
# For listing all keys in a scope
|
||||
Index("ix_working_memory_scope_list", "scope_type", "scope_id"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<WorkingMemory {self.scope_type.value}:{self.scope_id}:{self.key}>"
|
||||
@@ -36,9 +36,9 @@ class OAuthAccount(Base, UUIDMixin, TimestampMixin):
|
||||
) # Email from provider (for reference)
|
||||
|
||||
# Optional: store provider tokens for API access
|
||||
# TODO: Encrypt these at rest in production (requires key management infrastructure)
|
||||
access_token = Column(String(2048), nullable=True)
|
||||
refresh_token = Column(String(2048), nullable=True)
|
||||
# These should be encrypted at rest in production
|
||||
access_token_encrypted = Column(String(2048), nullable=True)
|
||||
refresh_token_encrypted = Column(String(2048), nullable=True)
|
||||
token_expires_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Relationship
|
||||
|
||||
@@ -92,7 +92,7 @@ class OAuthAuthorizationCode(Base, UUIDMixin, TimestampMixin):
|
||||
# Handle both timezone-aware and naive datetimes from DB
|
||||
if expires_at.tzinfo is None:
|
||||
expires_at = expires_at.replace(tzinfo=UTC)
|
||||
return bool(now > expires_at)
|
||||
return now > expires_at
|
||||
|
||||
@property
|
||||
def is_valid(self) -> bool:
|
||||
|
||||
@@ -99,7 +99,7 @@ class OAuthProviderRefreshToken(Base, UUIDMixin, TimestampMixin):
|
||||
# Handle both timezone-aware and naive datetimes from DB
|
||||
if expires_at.tzinfo is None:
|
||||
expires_at = expires_at.replace(tzinfo=UTC)
|
||||
return bool(now > expires_at)
|
||||
return now > expires_at
|
||||
|
||||
@property
|
||||
def is_valid(self) -> bool:
|
||||
|
||||
47
backend/app/models/syndarix/__init__.py
Normal file
47
backend/app/models/syndarix/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# app/models/syndarix/__init__.py
|
||||
"""
|
||||
Syndarix domain models.
|
||||
|
||||
This package contains all the core entities for the Syndarix AI consulting platform:
|
||||
- Project: Client engagements with autonomy settings
|
||||
- AgentType: Templates for AI agent capabilities
|
||||
- AgentInstance: Spawned agents working on projects
|
||||
- Issue: Units of work with external tracker sync
|
||||
- Sprint: Time-boxed iterations for organizing work
|
||||
"""
|
||||
|
||||
from .agent_instance import AgentInstance
|
||||
from .agent_type import AgentType
|
||||
from .enums import (
|
||||
AgentStatus,
|
||||
AutonomyLevel,
|
||||
ClientMode,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
IssueType,
|
||||
ProjectComplexity,
|
||||
ProjectStatus,
|
||||
SprintStatus,
|
||||
SyncStatus,
|
||||
)
|
||||
from .issue import Issue
|
||||
from .project import Project
|
||||
from .sprint import Sprint
|
||||
|
||||
__all__ = [
|
||||
"AgentInstance",
|
||||
"AgentStatus",
|
||||
"AgentType",
|
||||
"AutonomyLevel",
|
||||
"ClientMode",
|
||||
"Issue",
|
||||
"IssuePriority",
|
||||
"IssueStatus",
|
||||
"IssueType",
|
||||
"Project",
|
||||
"ProjectComplexity",
|
||||
"ProjectStatus",
|
||||
"Sprint",
|
||||
"SprintStatus",
|
||||
"SyncStatus",
|
||||
]
|
||||
115
backend/app/models/syndarix/agent_instance.py
Normal file
115
backend/app/models/syndarix/agent_instance.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# app/models/syndarix/agent_instance.py
|
||||
"""
|
||||
AgentInstance model for Syndarix AI consulting platform.
|
||||
|
||||
An AgentInstance is a spawned instance of an AgentType, assigned to a
|
||||
specific project to perform work.
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
BigInteger,
|
||||
Column,
|
||||
DateTime,
|
||||
Enum,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
Numeric,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import AgentStatus
|
||||
|
||||
|
||||
class AgentInstance(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
AgentInstance model representing a spawned agent working on a project.
|
||||
|
||||
Tracks:
|
||||
- Current status and task
|
||||
- Memory (short-term in DB, long-term reference to vector store)
|
||||
- Session information for MCP connections
|
||||
- Usage metrics (tasks completed, tokens, cost)
|
||||
"""
|
||||
|
||||
__tablename__ = "agent_instances"
|
||||
|
||||
# Foreign keys
|
||||
agent_type_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("agent_types.id", ondelete="RESTRICT"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Agent instance name (e.g., "Dave", "Eve") for personality
|
||||
name = Column(String(100), nullable=False, index=True)
|
||||
|
||||
# Status tracking
|
||||
status: Column[AgentStatus] = Column(
|
||||
Enum(
|
||||
AgentStatus,
|
||||
name="agent_status",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=AgentStatus.IDLE,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Current task description (brief summary of what agent is doing)
|
||||
current_task = Column(Text, nullable=True)
|
||||
|
||||
# Short-term memory stored in database (conversation context, recent decisions)
|
||||
short_term_memory = Column(JSONB, default=dict, nullable=False)
|
||||
|
||||
# Reference to long-term memory in vector store (e.g., "project-123/agent-456")
|
||||
long_term_memory_ref = Column(String(500), nullable=True)
|
||||
|
||||
# Session ID for active MCP connections
|
||||
session_id = Column(String(255), nullable=True, index=True)
|
||||
|
||||
# Activity tracking
|
||||
last_activity_at = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
terminated_at = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
|
||||
# Usage metrics
|
||||
tasks_completed = Column(Integer, default=0, nullable=False)
|
||||
tokens_used = Column(BigInteger, default=0, nullable=False)
|
||||
cost_incurred = Column(Numeric(precision=10, scale=4), default=0, nullable=False)
|
||||
|
||||
# Relationships
|
||||
agent_type = relationship("AgentType", back_populates="instances")
|
||||
project = relationship("Project", back_populates="agent_instances")
|
||||
assigned_issues = relationship(
|
||||
"Issue",
|
||||
back_populates="assigned_agent",
|
||||
foreign_keys="Issue.assigned_agent_id",
|
||||
)
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_agent_instances_project_status", "project_id", "status"),
|
||||
Index("ix_agent_instances_type_status", "agent_type_id", "status"),
|
||||
Index("ix_agent_instances_project_type", "project_id", "agent_type_id"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<AgentInstance {self.name} ({self.id}) type={self.agent_type_id} "
|
||||
f"project={self.project_id} status={self.status.value}>"
|
||||
)
|
||||
91
backend/app/models/syndarix/agent_type.py
Normal file
91
backend/app/models/syndarix/agent_type.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# app/models/syndarix/agent_type.py
|
||||
"""
|
||||
AgentType model for Syndarix AI consulting platform.
|
||||
|
||||
An AgentType is a template that defines the capabilities, personality,
|
||||
and model configuration for agent instances.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Boolean, Column, Index, Integer, String, Text
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
|
||||
class AgentType(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
AgentType model representing a template for agent instances.
|
||||
|
||||
Each agent type defines:
|
||||
- Expertise areas and personality prompt
|
||||
- Model configuration (primary, fallback, parameters)
|
||||
- MCP server access and tool permissions
|
||||
|
||||
Examples: ProductOwner, Architect, BackendEngineer, QAEngineer
|
||||
"""
|
||||
|
||||
__tablename__ = "agent_types"
|
||||
|
||||
name = Column(String(255), nullable=False, index=True)
|
||||
slug = Column(String(255), unique=True, nullable=False, index=True)
|
||||
description = Column(Text, nullable=True)
|
||||
|
||||
# Areas of expertise for this agent type (e.g., ["python", "fastapi", "databases"])
|
||||
expertise = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# System prompt defining the agent's personality and behavior
|
||||
personality_prompt = Column(Text, nullable=False)
|
||||
|
||||
# Primary LLM model to use (e.g., "claude-opus-4-5-20251101")
|
||||
primary_model = Column(String(100), nullable=False)
|
||||
|
||||
# Fallback models in order of preference
|
||||
fallback_models = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Model parameters (temperature, max_tokens, etc.)
|
||||
model_params = Column(JSONB, default=dict, nullable=False)
|
||||
|
||||
# List of MCP servers this agent can connect to
|
||||
mcp_servers = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Tool permissions configuration
|
||||
# Structure: {"allowed": ["*"], "denied": [], "require_approval": ["gitea:create_pr"]}
|
||||
tool_permissions = Column(JSONB, default=dict, nullable=False)
|
||||
|
||||
# Whether this agent type is available for new instances
|
||||
is_active = Column(Boolean, default=True, nullable=False, index=True)
|
||||
|
||||
# Category for grouping agents (development, design, quality, etc.)
|
||||
category = Column(String(50), nullable=True, index=True)
|
||||
|
||||
# Lucide icon identifier for UI display (e.g., "code", "palette", "shield")
|
||||
icon = Column(String(50), nullable=True, default="bot")
|
||||
|
||||
# Hex color code for visual distinction (e.g., "#3B82F6")
|
||||
color = Column(String(7), nullable=True, default="#3B82F6")
|
||||
|
||||
# Display ordering within category (lower = first)
|
||||
sort_order = Column(Integer, nullable=False, default=0, index=True)
|
||||
|
||||
# List of typical tasks this agent excels at
|
||||
typical_tasks = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# List of agent slugs that collaborate well with this type
|
||||
collaboration_hints = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Relationships
|
||||
instances = relationship(
|
||||
"AgentInstance",
|
||||
back_populates="agent_type",
|
||||
cascade="all, delete-orphan",
|
||||
)
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_agent_types_slug_active", "slug", "is_active"),
|
||||
Index("ix_agent_types_name_active", "name", "is_active"),
|
||||
Index("ix_agent_types_category_sort", "category", "sort_order"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<AgentType {self.name} ({self.slug}) active={self.is_active}>"
|
||||
195
backend/app/models/syndarix/enums.py
Normal file
195
backend/app/models/syndarix/enums.py
Normal file
@@ -0,0 +1,195 @@
|
||||
# app/models/syndarix/enums.py
|
||||
"""
|
||||
Enums for Syndarix domain models.
|
||||
|
||||
These enums represent the core state machines and categorizations
|
||||
used throughout the Syndarix AI consulting platform.
|
||||
"""
|
||||
|
||||
from enum import Enum as PyEnum
|
||||
|
||||
|
||||
class AutonomyLevel(str, PyEnum):
|
||||
"""
|
||||
Defines how much control the human has over agent actions.
|
||||
|
||||
FULL_CONTROL: Human must approve every agent action
|
||||
MILESTONE: Human approves at sprint boundaries and major decisions
|
||||
AUTONOMOUS: Agents work independently, only escalating critical issues
|
||||
"""
|
||||
|
||||
FULL_CONTROL = "full_control"
|
||||
MILESTONE = "milestone"
|
||||
AUTONOMOUS = "autonomous"
|
||||
|
||||
|
||||
class ProjectComplexity(str, PyEnum):
|
||||
"""
|
||||
Project complexity level for estimation and planning.
|
||||
|
||||
SCRIPT: Simple automation or script-level work
|
||||
SIMPLE: Straightforward feature or fix
|
||||
MEDIUM: Standard complexity with some architectural considerations
|
||||
COMPLEX: Large-scale feature requiring significant design work
|
||||
"""
|
||||
|
||||
SCRIPT = "script"
|
||||
SIMPLE = "simple"
|
||||
MEDIUM = "medium"
|
||||
COMPLEX = "complex"
|
||||
|
||||
|
||||
class ClientMode(str, PyEnum):
|
||||
"""
|
||||
How the client prefers to interact with agents.
|
||||
|
||||
TECHNICAL: Client is technical and prefers detailed updates
|
||||
AUTO: Agents automatically determine communication level
|
||||
"""
|
||||
|
||||
TECHNICAL = "technical"
|
||||
AUTO = "auto"
|
||||
|
||||
|
||||
class ProjectStatus(str, PyEnum):
|
||||
"""
|
||||
Project lifecycle status.
|
||||
|
||||
ACTIVE: Project is actively being worked on
|
||||
PAUSED: Project is temporarily on hold
|
||||
COMPLETED: Project has been delivered successfully
|
||||
ARCHIVED: Project is no longer accessible for work
|
||||
"""
|
||||
|
||||
ACTIVE = "active"
|
||||
PAUSED = "paused"
|
||||
COMPLETED = "completed"
|
||||
ARCHIVED = "archived"
|
||||
|
||||
|
||||
class AgentStatus(str, PyEnum):
|
||||
"""
|
||||
Current operational status of an agent instance.
|
||||
|
||||
IDLE: Agent is available but not currently working
|
||||
WORKING: Agent is actively processing a task
|
||||
WAITING: Agent is waiting for external input or approval
|
||||
PAUSED: Agent has been manually paused
|
||||
TERMINATED: Agent instance has been shut down
|
||||
"""
|
||||
|
||||
IDLE = "idle"
|
||||
WORKING = "working"
|
||||
WAITING = "waiting"
|
||||
PAUSED = "paused"
|
||||
TERMINATED = "terminated"
|
||||
|
||||
|
||||
class IssueType(str, PyEnum):
|
||||
"""
|
||||
Issue type for categorization and hierarchy.
|
||||
|
||||
EPIC: Large feature or body of work containing stories
|
||||
STORY: User-facing feature or requirement
|
||||
TASK: Technical work item
|
||||
BUG: Defect or issue to be fixed
|
||||
"""
|
||||
|
||||
EPIC = "epic"
|
||||
STORY = "story"
|
||||
TASK = "task"
|
||||
BUG = "bug"
|
||||
|
||||
|
||||
class IssueStatus(str, PyEnum):
|
||||
"""
|
||||
Issue workflow status.
|
||||
|
||||
OPEN: Issue is ready to be worked on
|
||||
IN_PROGRESS: Agent or human is actively working on the issue
|
||||
IN_REVIEW: Work is complete, awaiting review
|
||||
BLOCKED: Issue cannot proceed due to dependencies or blockers
|
||||
CLOSED: Issue has been completed or cancelled
|
||||
"""
|
||||
|
||||
OPEN = "open"
|
||||
IN_PROGRESS = "in_progress"
|
||||
IN_REVIEW = "in_review"
|
||||
BLOCKED = "blocked"
|
||||
CLOSED = "closed"
|
||||
|
||||
|
||||
class IssuePriority(str, PyEnum):
|
||||
"""
|
||||
Issue priority levels.
|
||||
|
||||
LOW: Nice to have, can be deferred
|
||||
MEDIUM: Standard priority, should be done
|
||||
HIGH: Important, should be prioritized
|
||||
CRITICAL: Must be done immediately, blocking other work
|
||||
"""
|
||||
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
class SyncStatus(str, PyEnum):
|
||||
"""
|
||||
External issue tracker synchronization status.
|
||||
|
||||
SYNCED: Local and remote are in sync
|
||||
PENDING: Local changes waiting to be pushed
|
||||
CONFLICT: Merge conflict between local and remote
|
||||
ERROR: Synchronization failed due to an error
|
||||
"""
|
||||
|
||||
SYNCED = "synced"
|
||||
PENDING = "pending"
|
||||
CONFLICT = "conflict"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class SprintStatus(str, PyEnum):
|
||||
"""
|
||||
Sprint lifecycle status.
|
||||
|
||||
PLANNED: Sprint has been created but not started
|
||||
ACTIVE: Sprint is currently in progress
|
||||
IN_REVIEW: Sprint work is done, demo/review pending
|
||||
COMPLETED: Sprint has been finished successfully
|
||||
CANCELLED: Sprint was cancelled before completion
|
||||
"""
|
||||
|
||||
PLANNED = "planned"
|
||||
ACTIVE = "active"
|
||||
IN_REVIEW = "in_review"
|
||||
COMPLETED = "completed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
class AgentTypeCategory(str, PyEnum):
|
||||
"""
|
||||
Category classification for agent types.
|
||||
|
||||
Used for grouping and filtering agents in the UI.
|
||||
|
||||
DEVELOPMENT: Product, project, and engineering roles
|
||||
DESIGN: UI/UX and design research roles
|
||||
QUALITY: QA and security engineering
|
||||
OPERATIONS: DevOps and MLOps
|
||||
AI_ML: Machine learning and AI specialists
|
||||
DATA: Data science and engineering
|
||||
LEADERSHIP: Technical leadership roles
|
||||
DOMAIN_EXPERT: Industry and domain specialists
|
||||
"""
|
||||
|
||||
DEVELOPMENT = "development"
|
||||
DESIGN = "design"
|
||||
QUALITY = "quality"
|
||||
OPERATIONS = "operations"
|
||||
AI_ML = "ai_ml"
|
||||
DATA = "data"
|
||||
LEADERSHIP = "leadership"
|
||||
DOMAIN_EXPERT = "domain_expert"
|
||||
190
backend/app/models/syndarix/issue.py
Normal file
190
backend/app/models/syndarix/issue.py
Normal file
@@ -0,0 +1,190 @@
|
||||
# app/models/syndarix/issue.py
|
||||
"""
|
||||
Issue model for Syndarix AI consulting platform.
|
||||
|
||||
An Issue represents a unit of work that can be assigned to agents or humans,
|
||||
with optional synchronization to external issue trackers (Gitea, GitHub, GitLab).
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
Column,
|
||||
Date,
|
||||
DateTime,
|
||||
Enum,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import IssuePriority, IssueStatus, IssueType, SyncStatus
|
||||
|
||||
|
||||
class Issue(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Issue model representing a unit of work in a project.
|
||||
|
||||
Features:
|
||||
- Standard issue fields (title, body, status, priority)
|
||||
- Assignment to agent instances or human assignees
|
||||
- Sprint association for backlog management
|
||||
- External tracker synchronization (Gitea, GitHub, GitLab)
|
||||
"""
|
||||
|
||||
__tablename__ = "issues"
|
||||
|
||||
# Foreign key to project
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Parent issue for hierarchy (Epic -> Story -> Task)
|
||||
parent_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("issues.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Issue type (Epic, Story, Task, Bug)
|
||||
type: Column[IssueType] = Column(
|
||||
Enum(
|
||||
IssueType, name="issue_type", values_callable=lambda x: [e.value for e in x]
|
||||
),
|
||||
default=IssueType.TASK,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Reporter (who created this issue - can be user or agent)
|
||||
reporter_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
nullable=True, # System-generated issues may have no reporter
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Issue content
|
||||
title = Column(String(500), nullable=False)
|
||||
body = Column(Text, nullable=False, default="")
|
||||
|
||||
# Status and priority
|
||||
status: Column[IssueStatus] = Column(
|
||||
Enum(
|
||||
IssueStatus,
|
||||
name="issue_status",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=IssueStatus.OPEN,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
priority: Column[IssuePriority] = Column(
|
||||
Enum(
|
||||
IssuePriority,
|
||||
name="issue_priority",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=IssuePriority.MEDIUM,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Labels for categorization (e.g., ["bug", "frontend", "urgent"])
|
||||
labels = Column(JSONB, default=list, nullable=False)
|
||||
|
||||
# Assignment - either to an agent or a human (mutually exclusive)
|
||||
assigned_agent_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("agent_instances.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Human assignee (username or email, not a FK to allow external users)
|
||||
human_assignee = Column(String(255), nullable=True, index=True)
|
||||
|
||||
# Sprint association
|
||||
sprint_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("sprints.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Story points for estimation
|
||||
story_points = Column(Integer, nullable=True)
|
||||
|
||||
# Due date for the issue
|
||||
due_date = Column(Date, nullable=True, index=True)
|
||||
|
||||
# External tracker integration
|
||||
external_tracker_type = Column(
|
||||
String(50),
|
||||
nullable=True,
|
||||
index=True,
|
||||
) # 'gitea', 'github', 'gitlab'
|
||||
|
||||
external_issue_id = Column(String(255), nullable=True) # External system's ID
|
||||
remote_url = Column(String(1000), nullable=True) # Link to external issue
|
||||
external_issue_number = Column(Integer, nullable=True) # Issue number (e.g., #123)
|
||||
|
||||
# Sync status with external tracker
|
||||
sync_status: Column[SyncStatus] = Column(
|
||||
Enum(
|
||||
SyncStatus,
|
||||
name="sync_status",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=SyncStatus.SYNCED,
|
||||
nullable=False,
|
||||
# Note: Index defined in __table_args__ as ix_issues_sync_status
|
||||
)
|
||||
|
||||
last_synced_at = Column(DateTime(timezone=True), nullable=True)
|
||||
external_updated_at = Column(DateTime(timezone=True), nullable=True)
|
||||
|
||||
# Lifecycle timestamp
|
||||
closed_at = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", back_populates="issues")
|
||||
assigned_agent = relationship(
|
||||
"AgentInstance",
|
||||
back_populates="assigned_issues",
|
||||
foreign_keys=[assigned_agent_id],
|
||||
)
|
||||
sprint = relationship("Sprint", back_populates="issues")
|
||||
parent = relationship("Issue", remote_side="Issue.id", backref="children")
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_issues_project_status", "project_id", "status"),
|
||||
Index("ix_issues_project_priority", "project_id", "priority"),
|
||||
Index("ix_issues_project_sprint", "project_id", "sprint_id"),
|
||||
Index(
|
||||
"ix_issues_external_tracker_id",
|
||||
"external_tracker_type",
|
||||
"external_issue_id",
|
||||
),
|
||||
Index("ix_issues_sync_status", "sync_status"),
|
||||
Index("ix_issues_project_agent", "project_id", "assigned_agent_id"),
|
||||
Index("ix_issues_project_type", "project_id", "type"),
|
||||
Index("ix_issues_project_status_priority", "project_id", "status", "priority"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<Issue {self.id} title='{self.title[:30]}...' "
|
||||
f"status={self.status.value} priority={self.priority.value}>"
|
||||
)
|
||||
119
backend/app/models/syndarix/project.py
Normal file
119
backend/app/models/syndarix/project.py
Normal file
@@ -0,0 +1,119 @@
|
||||
# app/models/syndarix/project.py
|
||||
"""
|
||||
Project model for Syndarix AI consulting platform.
|
||||
|
||||
A Project represents a client engagement where AI agents collaborate
|
||||
to deliver software solutions.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Enum, ForeignKey, Index, String, Text
|
||||
from sqlalchemy.dialects.postgresql import (
|
||||
JSONB,
|
||||
UUID as PGUUID,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import AutonomyLevel, ClientMode, ProjectComplexity, ProjectStatus
|
||||
|
||||
|
||||
class Project(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Project model representing a client engagement.
|
||||
|
||||
A project contains:
|
||||
- Configuration for how autonomous agents should operate
|
||||
- Settings for MCP server integrations
|
||||
- Relationship to assigned agents, issues, and sprints
|
||||
"""
|
||||
|
||||
__tablename__ = "projects"
|
||||
|
||||
name = Column(String(255), nullable=False, index=True)
|
||||
slug = Column(String(255), unique=True, nullable=False, index=True)
|
||||
description = Column(Text, nullable=True)
|
||||
|
||||
autonomy_level: Column[AutonomyLevel] = Column(
|
||||
Enum(
|
||||
AutonomyLevel,
|
||||
name="autonomy_level",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=AutonomyLevel.MILESTONE,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
status: Column[ProjectStatus] = Column(
|
||||
Enum(
|
||||
ProjectStatus,
|
||||
name="project_status",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=ProjectStatus.ACTIVE,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
complexity: Column[ProjectComplexity] = Column(
|
||||
Enum(
|
||||
ProjectComplexity,
|
||||
name="project_complexity",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=ProjectComplexity.MEDIUM,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
client_mode: Column[ClientMode] = Column(
|
||||
Enum(
|
||||
ClientMode,
|
||||
name="client_mode",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=ClientMode.AUTO,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# JSON field for flexible project configuration
|
||||
# Can include: mcp_servers, webhook_urls, notification_settings, etc.
|
||||
settings = Column(JSONB, default=dict, nullable=False)
|
||||
|
||||
# Foreign key to the User who owns this project
|
||||
owner_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("users.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Relationships
|
||||
owner = relationship("User", foreign_keys=[owner_id])
|
||||
agent_instances = relationship(
|
||||
"AgentInstance",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
)
|
||||
issues = relationship(
|
||||
"Issue",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
)
|
||||
sprints = relationship(
|
||||
"Sprint",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
)
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_projects_slug_status", "slug", "status"),
|
||||
Index("ix_projects_owner_status", "owner_id", "status"),
|
||||
Index("ix_projects_autonomy_status", "autonomy_level", "status"),
|
||||
Index("ix_projects_complexity_status", "complexity", "status"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Project {self.name} ({self.slug}) status={self.status.value}>"
|
||||
90
backend/app/models/syndarix/sprint.py
Normal file
90
backend/app/models/syndarix/sprint.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# app/models/syndarix/sprint.py
|
||||
"""
|
||||
Sprint model for Syndarix AI consulting platform.
|
||||
|
||||
A Sprint represents a time-boxed iteration for organizing and delivering work.
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
Column,
|
||||
Date,
|
||||
Enum,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
String,
|
||||
Text,
|
||||
UniqueConstraint,
|
||||
)
|
||||
from sqlalchemy.dialects.postgresql import UUID as PGUUID
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from app.models.base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
from .enums import SprintStatus
|
||||
|
||||
|
||||
class Sprint(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
Sprint model representing a time-boxed iteration.
|
||||
|
||||
Tracks:
|
||||
- Sprint metadata (name, number, goal)
|
||||
- Date range (start/end)
|
||||
- Progress metrics (planned vs completed points)
|
||||
"""
|
||||
|
||||
__tablename__ = "sprints"
|
||||
|
||||
# Foreign key to project
|
||||
project_id = Column(
|
||||
PGUUID(as_uuid=True),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Sprint identification
|
||||
name = Column(String(255), nullable=False)
|
||||
number = Column(Integer, nullable=False) # Sprint number within project
|
||||
|
||||
# Sprint goal (what we aim to achieve)
|
||||
goal = Column(Text, nullable=True)
|
||||
|
||||
# Date range
|
||||
start_date = Column(Date, nullable=False, index=True)
|
||||
end_date = Column(Date, nullable=False, index=True)
|
||||
|
||||
# Status
|
||||
status: Column[SprintStatus] = Column(
|
||||
Enum(
|
||||
SprintStatus,
|
||||
name="sprint_status",
|
||||
values_callable=lambda x: [e.value for e in x],
|
||||
),
|
||||
default=SprintStatus.PLANNED,
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
|
||||
# Progress metrics
|
||||
planned_points = Column(Integer, nullable=True) # Sum of story points at start
|
||||
velocity = Column(Integer, nullable=True) # Sum of completed story points
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", back_populates="sprints")
|
||||
issues = relationship("Issue", back_populates="sprint")
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_sprints_project_status", "project_id", "status"),
|
||||
Index("ix_sprints_project_number", "project_id", "number"),
|
||||
Index("ix_sprints_date_range", "start_date", "end_date"),
|
||||
# Ensure sprint numbers are unique within a project
|
||||
UniqueConstraint("project_id", "number", name="uq_sprint_project_number"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<Sprint {self.name} (#{self.number}) "
|
||||
f"project={self.project_id} status={self.status.value}>"
|
||||
)
|
||||
@@ -76,11 +76,7 @@ class UserSession(Base, UUIDMixin, TimestampMixin):
|
||||
"""Check if session has expired."""
|
||||
from datetime import datetime
|
||||
|
||||
now = datetime.now(UTC)
|
||||
expires_at = self.expires_at
|
||||
if expires_at.tzinfo is None:
|
||||
expires_at = expires_at.replace(tzinfo=UTC)
|
||||
return bool(expires_at < now)
|
||||
return self.expires_at < datetime.now(UTC)
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert session to dictionary for serialization."""
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# app/repositories/__init__.py
|
||||
"""Repository layer — all database access goes through these classes."""
|
||||
|
||||
from app.repositories.oauth_account import OAuthAccountRepository, oauth_account_repo
|
||||
from app.repositories.oauth_authorization_code import (
|
||||
OAuthAuthorizationCodeRepository,
|
||||
oauth_authorization_code_repo,
|
||||
)
|
||||
from app.repositories.oauth_client import OAuthClientRepository, oauth_client_repo
|
||||
from app.repositories.oauth_consent import OAuthConsentRepository, oauth_consent_repo
|
||||
from app.repositories.oauth_provider_token import (
|
||||
OAuthProviderTokenRepository,
|
||||
oauth_provider_token_repo,
|
||||
)
|
||||
from app.repositories.oauth_state import OAuthStateRepository, oauth_state_repo
|
||||
from app.repositories.organization import OrganizationRepository, organization_repo
|
||||
from app.repositories.session import SessionRepository, session_repo
|
||||
from app.repositories.user import UserRepository, user_repo
|
||||
|
||||
__all__ = [
|
||||
"OAuthAccountRepository",
|
||||
"OAuthAuthorizationCodeRepository",
|
||||
"OAuthClientRepository",
|
||||
"OAuthConsentRepository",
|
||||
"OAuthProviderTokenRepository",
|
||||
"OAuthStateRepository",
|
||||
"OrganizationRepository",
|
||||
"SessionRepository",
|
||||
"UserRepository",
|
||||
"oauth_account_repo",
|
||||
"oauth_authorization_code_repo",
|
||||
"oauth_client_repo",
|
||||
"oauth_consent_repo",
|
||||
"oauth_provider_token_repo",
|
||||
"oauth_state_repo",
|
||||
"organization_repo",
|
||||
"session_repo",
|
||||
"user_repo",
|
||||
]
|
||||
@@ -1,249 +0,0 @@
|
||||
# app/repositories/oauth_account.py
|
||||
"""Repository for OAuthAccount model async database operations."""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import and_, delete, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from app.core.repository_exceptions import DuplicateEntryError
|
||||
from app.models.oauth_account import OAuthAccount
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.oauth import OAuthAccountCreate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmptySchema(BaseModel):
|
||||
"""Placeholder schema for repository operations that don't need update schemas."""
|
||||
|
||||
|
||||
class OAuthAccountRepository(
|
||||
BaseRepository[OAuthAccount, OAuthAccountCreate, EmptySchema]
|
||||
):
|
||||
"""Repository for OAuth account links."""
|
||||
|
||||
async def get_by_provider_id(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
provider: str,
|
||||
provider_user_id: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""Get OAuth account by provider and provider user ID."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(
|
||||
and_(
|
||||
OAuthAccount.provider == provider,
|
||||
OAuthAccount.provider_user_id == provider_user_id,
|
||||
)
|
||||
)
|
||||
.options(joinedload(OAuthAccount.user))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(
|
||||
"Error getting OAuth account for %s:%s: %s",
|
||||
provider,
|
||||
provider_user_id,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_by_provider_email(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
provider: str,
|
||||
email: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""Get OAuth account by provider and email."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(
|
||||
and_(
|
||||
OAuthAccount.provider == provider,
|
||||
OAuthAccount.provider_email == email,
|
||||
)
|
||||
)
|
||||
.options(joinedload(OAuthAccount.user))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(
|
||||
"Error getting OAuth account for %s email %s: %s", provider, email, e
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_user_accounts(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
) -> list[OAuthAccount]:
|
||||
"""Get all OAuth accounts linked to a user."""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
select(OAuthAccount)
|
||||
.where(OAuthAccount.user_id == user_uuid)
|
||||
.order_by(OAuthAccount.created_at.desc())
|
||||
)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error("Error getting OAuth accounts for user %s: %s", user_id, e)
|
||||
raise
|
||||
|
||||
async def get_user_account_by_provider(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
provider: str,
|
||||
) -> OAuthAccount | None:
|
||||
"""Get a specific OAuth account for a user and provider."""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
select(OAuthAccount).where(
|
||||
and_(
|
||||
OAuthAccount.user_id == user_uuid,
|
||||
OAuthAccount.provider == provider,
|
||||
)
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error(
|
||||
"Error getting OAuth account for user %s, provider %s: %s",
|
||||
user_id,
|
||||
provider,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
async def create_account(
|
||||
self, db: AsyncSession, *, obj_in: OAuthAccountCreate
|
||||
) -> OAuthAccount:
|
||||
"""Create a new OAuth account link."""
|
||||
try:
|
||||
db_obj = OAuthAccount(
|
||||
user_id=obj_in.user_id,
|
||||
provider=obj_in.provider,
|
||||
provider_user_id=obj_in.provider_user_id,
|
||||
provider_email=obj_in.provider_email,
|
||||
access_token=obj_in.access_token,
|
||||
refresh_token=obj_in.refresh_token,
|
||||
token_expires_at=obj_in.token_expires_at,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.info(
|
||||
"OAuth account created: %s linked to user %s",
|
||||
obj_in.provider,
|
||||
obj_in.user_id,
|
||||
)
|
||||
return db_obj
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
if "uq_oauth_provider_user" in error_msg.lower():
|
||||
logger.warning(
|
||||
"OAuth account already exists: %s:%s",
|
||||
obj_in.provider,
|
||||
obj_in.provider_user_id,
|
||||
)
|
||||
raise DuplicateEntryError(
|
||||
f"This {obj_in.provider} account is already linked to another user"
|
||||
)
|
||||
logger.error("Integrity error creating OAuth account: %s", error_msg)
|
||||
raise DuplicateEntryError(f"Failed to create OAuth account: {error_msg}")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.exception("Error creating OAuth account: %s", e)
|
||||
raise
|
||||
|
||||
async def delete_account(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: str | UUID,
|
||||
provider: str,
|
||||
) -> bool:
|
||||
"""Delete an OAuth account link."""
|
||||
try:
|
||||
user_uuid = UUID(str(user_id)) if isinstance(user_id, str) else user_id
|
||||
|
||||
result = await db.execute(
|
||||
delete(OAuthAccount).where(
|
||||
and_(
|
||||
OAuthAccount.user_id == user_uuid,
|
||||
OAuthAccount.provider == provider,
|
||||
)
|
||||
)
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
deleted = result.rowcount > 0
|
||||
if deleted:
|
||||
logger.info(
|
||||
"OAuth account deleted: %s unlinked from user %s", provider, user_id
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"OAuth account not found for deletion: %s for user %s",
|
||||
provider,
|
||||
user_id,
|
||||
)
|
||||
|
||||
return deleted
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error(
|
||||
"Error deleting OAuth account %s for user %s: %s", provider, user_id, e
|
||||
)
|
||||
raise
|
||||
|
||||
async def update_tokens(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
account: OAuthAccount,
|
||||
access_token: str | None = None,
|
||||
refresh_token: str | None = None,
|
||||
token_expires_at: datetime | None = None,
|
||||
) -> OAuthAccount:
|
||||
"""Update OAuth tokens for an account."""
|
||||
try:
|
||||
if access_token is not None:
|
||||
account.access_token = access_token
|
||||
if refresh_token is not None:
|
||||
account.refresh_token = refresh_token
|
||||
if token_expires_at is not None:
|
||||
account.token_expires_at = token_expires_at
|
||||
|
||||
db.add(account)
|
||||
await db.commit()
|
||||
await db.refresh(account)
|
||||
|
||||
return account
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Error updating OAuth tokens: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_account_repo = OAuthAccountRepository(OAuthAccount)
|
||||
@@ -1,108 +0,0 @@
|
||||
# app/repositories/oauth_authorization_code.py
|
||||
"""Repository for OAuthAuthorizationCode model."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import and_, delete, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models.oauth_authorization_code import OAuthAuthorizationCode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OAuthAuthorizationCodeRepository:
|
||||
"""Repository for OAuth 2.0 authorization codes."""
|
||||
|
||||
async def create_code(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
code: str,
|
||||
client_id: str,
|
||||
user_id: UUID,
|
||||
redirect_uri: str,
|
||||
scope: str,
|
||||
expires_at: datetime,
|
||||
code_challenge: str | None = None,
|
||||
code_challenge_method: str | None = None,
|
||||
state: str | None = None,
|
||||
nonce: str | None = None,
|
||||
) -> OAuthAuthorizationCode:
|
||||
"""Create and persist a new authorization code."""
|
||||
auth_code = OAuthAuthorizationCode(
|
||||
code=code,
|
||||
client_id=client_id,
|
||||
user_id=user_id,
|
||||
redirect_uri=redirect_uri,
|
||||
scope=scope,
|
||||
code_challenge=code_challenge,
|
||||
code_challenge_method=code_challenge_method,
|
||||
state=state,
|
||||
nonce=nonce,
|
||||
expires_at=expires_at,
|
||||
used=False,
|
||||
)
|
||||
db.add(auth_code)
|
||||
await db.commit()
|
||||
return auth_code
|
||||
|
||||
async def consume_code_atomically(
|
||||
self, db: AsyncSession, *, code: str
|
||||
) -> UUID | None:
|
||||
"""
|
||||
Atomically mark a code as used and return its UUID.
|
||||
|
||||
Returns the UUID if the code was found and not yet used, None otherwise.
|
||||
This prevents race conditions per RFC 6749 Section 4.1.2.
|
||||
"""
|
||||
stmt = (
|
||||
update(OAuthAuthorizationCode)
|
||||
.where(
|
||||
and_(
|
||||
OAuthAuthorizationCode.code == code,
|
||||
OAuthAuthorizationCode.used == False, # noqa: E712
|
||||
)
|
||||
)
|
||||
.values(used=True)
|
||||
.returning(OAuthAuthorizationCode.id)
|
||||
)
|
||||
result = await db.execute(stmt)
|
||||
row_id = result.scalar_one_or_none()
|
||||
if row_id is not None:
|
||||
await db.commit()
|
||||
return row_id
|
||||
|
||||
async def get_by_id(
|
||||
self, db: AsyncSession, *, code_id: UUID
|
||||
) -> OAuthAuthorizationCode | None:
|
||||
"""Get authorization code by its UUID primary key."""
|
||||
result = await db.execute(
|
||||
select(OAuthAuthorizationCode).where(OAuthAuthorizationCode.id == code_id)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def get_by_code(
|
||||
self, db: AsyncSession, *, code: str
|
||||
) -> OAuthAuthorizationCode | None:
|
||||
"""Get authorization code by the code string value."""
|
||||
result = await db.execute(
|
||||
select(OAuthAuthorizationCode).where(OAuthAuthorizationCode.code == code)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def cleanup_expired(self, db: AsyncSession) -> int:
|
||||
"""Delete all expired authorization codes. Returns count deleted."""
|
||||
result = await db.execute(
|
||||
delete(OAuthAuthorizationCode).where(
|
||||
OAuthAuthorizationCode.expires_at < datetime.now(UTC)
|
||||
)
|
||||
)
|
||||
await db.commit()
|
||||
return result.rowcount # type: ignore[attr-defined]
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_authorization_code_repo = OAuthAuthorizationCodeRepository()
|
||||
@@ -1,201 +0,0 @@
|
||||
# app/repositories/oauth_client.py
|
||||
"""Repository for OAuthClient model async database operations."""
|
||||
|
||||
import logging
|
||||
import secrets
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import and_, delete, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.repository_exceptions import DuplicateEntryError
|
||||
from app.models.oauth_client import OAuthClient
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.oauth import OAuthClientCreate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmptySchema(BaseModel):
|
||||
"""Placeholder schema for repository operations that don't need update schemas."""
|
||||
|
||||
|
||||
class OAuthClientRepository(
|
||||
BaseRepository[OAuthClient, OAuthClientCreate, EmptySchema]
|
||||
):
|
||||
"""Repository for OAuth clients (provider mode)."""
|
||||
|
||||
async def get_by_client_id(
|
||||
self, db: AsyncSession, *, client_id: str
|
||||
) -> OAuthClient | None:
|
||||
"""Get OAuth client by client_id."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthClient).where(
|
||||
and_(
|
||||
OAuthClient.client_id == client_id,
|
||||
OAuthClient.is_active == True, # noqa: E712
|
||||
)
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error("Error getting OAuth client %s: %s", client_id, e)
|
||||
raise
|
||||
|
||||
async def create_client(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
obj_in: OAuthClientCreate,
|
||||
owner_user_id: UUID | None = None,
|
||||
) -> tuple[OAuthClient, str | None]:
|
||||
"""Create a new OAuth client."""
|
||||
try:
|
||||
client_id = secrets.token_urlsafe(32)
|
||||
|
||||
client_secret = None
|
||||
client_secret_hash = None
|
||||
if obj_in.client_type == "confidential":
|
||||
client_secret = secrets.token_urlsafe(48)
|
||||
from app.core.auth import get_password_hash
|
||||
|
||||
client_secret_hash = get_password_hash(client_secret)
|
||||
|
||||
db_obj = OAuthClient(
|
||||
client_id=client_id,
|
||||
client_secret_hash=client_secret_hash,
|
||||
client_name=obj_in.client_name,
|
||||
client_description=obj_in.client_description,
|
||||
client_type=obj_in.client_type,
|
||||
redirect_uris=obj_in.redirect_uris,
|
||||
allowed_scopes=obj_in.allowed_scopes,
|
||||
owner_user_id=owner_user_id,
|
||||
is_active=True,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.info(
|
||||
"OAuth client created: %s (%s...)", obj_in.client_name, client_id[:8]
|
||||
)
|
||||
return db_obj, client_secret
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error("Error creating OAuth client: %s", error_msg)
|
||||
raise DuplicateEntryError(f"Failed to create OAuth client: {error_msg}")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.exception("Error creating OAuth client: %s", e)
|
||||
raise
|
||||
|
||||
async def deactivate_client(
|
||||
self, db: AsyncSession, *, client_id: str
|
||||
) -> OAuthClient | None:
|
||||
"""Deactivate an OAuth client."""
|
||||
try:
|
||||
client = await self.get_by_client_id(db, client_id=client_id)
|
||||
if client is None:
|
||||
return None
|
||||
|
||||
client.is_active = False
|
||||
db.add(client)
|
||||
await db.commit()
|
||||
await db.refresh(client)
|
||||
|
||||
logger.info("OAuth client deactivated: %s", client.client_name)
|
||||
return client
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Error deactivating OAuth client %s: %s", client_id, e)
|
||||
raise
|
||||
|
||||
async def validate_redirect_uri(
|
||||
self, db: AsyncSession, *, client_id: str, redirect_uri: str
|
||||
) -> bool:
|
||||
"""Validate that a redirect URI is allowed for a client."""
|
||||
try:
|
||||
client = await self.get_by_client_id(db, client_id=client_id)
|
||||
if client is None:
|
||||
return False
|
||||
|
||||
return redirect_uri in (client.redirect_uris or [])
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error("Error validating redirect URI: %s", e)
|
||||
return False
|
||||
|
||||
async def verify_client_secret(
|
||||
self, db: AsyncSession, *, client_id: str, client_secret: str
|
||||
) -> bool:
|
||||
"""Verify client credentials."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthClient).where(
|
||||
and_(
|
||||
OAuthClient.client_id == client_id,
|
||||
OAuthClient.is_active == True, # noqa: E712
|
||||
)
|
||||
)
|
||||
)
|
||||
client = result.scalar_one_or_none()
|
||||
|
||||
if client is None or client.client_secret_hash is None:
|
||||
return False
|
||||
|
||||
from app.core.auth import verify_password
|
||||
|
||||
stored_hash: str = str(client.client_secret_hash)
|
||||
|
||||
if stored_hash.startswith("$2"):
|
||||
return verify_password(client_secret, stored_hash)
|
||||
else:
|
||||
import hashlib
|
||||
|
||||
secret_hash = hashlib.sha256(client_secret.encode()).hexdigest()
|
||||
return secrets.compare_digest(stored_hash, secret_hash)
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error("Error verifying client secret: %s", e)
|
||||
return False
|
||||
|
||||
async def get_all_clients(
|
||||
self, db: AsyncSession, *, include_inactive: bool = False
|
||||
) -> list[OAuthClient]:
|
||||
"""Get all OAuth clients."""
|
||||
try:
|
||||
query = select(OAuthClient).order_by(OAuthClient.created_at.desc())
|
||||
if not include_inactive:
|
||||
query = query.where(OAuthClient.is_active == True) # noqa: E712
|
||||
|
||||
result = await db.execute(query)
|
||||
return list(result.scalars().all())
|
||||
except Exception as e: # pragma: no cover
|
||||
logger.error("Error getting all OAuth clients: %s", e)
|
||||
raise
|
||||
|
||||
async def delete_client(self, db: AsyncSession, *, client_id: str) -> bool:
|
||||
"""Delete an OAuth client permanently."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
delete(OAuthClient).where(OAuthClient.client_id == client_id)
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
deleted = result.rowcount > 0
|
||||
if deleted:
|
||||
logger.info("OAuth client deleted: %s", client_id)
|
||||
else:
|
||||
logger.warning("OAuth client not found for deletion: %s", client_id)
|
||||
|
||||
return deleted
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Error deleting OAuth client %s: %s", client_id, e)
|
||||
raise
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_client_repo = OAuthClientRepository(OAuthClient)
|
||||
@@ -1,113 +0,0 @@
|
||||
# app/repositories/oauth_consent.py
|
||||
"""Repository for OAuthConsent model."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import and_, delete, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models.oauth_client import OAuthClient
|
||||
from app.models.oauth_provider_token import OAuthConsent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OAuthConsentRepository:
|
||||
"""Repository for OAuth consent records (user grants to clients)."""
|
||||
|
||||
async def get_consent(
|
||||
self, db: AsyncSession, *, user_id: UUID, client_id: str
|
||||
) -> OAuthConsent | None:
|
||||
"""Get the consent record for a user-client pair, or None if not found."""
|
||||
result = await db.execute(
|
||||
select(OAuthConsent).where(
|
||||
and_(
|
||||
OAuthConsent.user_id == user_id,
|
||||
OAuthConsent.client_id == client_id,
|
||||
)
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def grant_consent(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
user_id: UUID,
|
||||
client_id: str,
|
||||
scopes: list[str],
|
||||
) -> OAuthConsent:
|
||||
"""
|
||||
Create or update consent for a user-client pair.
|
||||
|
||||
If consent already exists, the new scopes are merged with existing ones.
|
||||
Returns the created or updated consent record.
|
||||
"""
|
||||
consent = await self.get_consent(db, user_id=user_id, client_id=client_id)
|
||||
|
||||
if consent:
|
||||
existing = (
|
||||
set(consent.granted_scopes.split()) if consent.granted_scopes else set()
|
||||
)
|
||||
merged = existing | set(scopes)
|
||||
consent.granted_scopes = " ".join(sorted(merged)) # type: ignore[assignment]
|
||||
else:
|
||||
consent = OAuthConsent(
|
||||
user_id=user_id,
|
||||
client_id=client_id,
|
||||
granted_scopes=" ".join(sorted(set(scopes))),
|
||||
)
|
||||
db.add(consent)
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(consent)
|
||||
return consent
|
||||
|
||||
async def get_user_consents_with_clients(
|
||||
self, db: AsyncSession, *, user_id: UUID
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Get all consent records for a user joined with client details."""
|
||||
result = await db.execute(
|
||||
select(OAuthConsent, OAuthClient)
|
||||
.join(OAuthClient, OAuthConsent.client_id == OAuthClient.client_id)
|
||||
.where(OAuthConsent.user_id == user_id)
|
||||
)
|
||||
rows = result.all()
|
||||
return [
|
||||
{
|
||||
"client_id": consent.client_id,
|
||||
"client_name": client.client_name,
|
||||
"client_description": client.client_description,
|
||||
"granted_scopes": consent.granted_scopes.split()
|
||||
if consent.granted_scopes
|
||||
else [],
|
||||
"granted_at": consent.created_at.isoformat(),
|
||||
}
|
||||
for consent, client in rows
|
||||
]
|
||||
|
||||
async def revoke_consent(
|
||||
self, db: AsyncSession, *, user_id: UUID, client_id: str
|
||||
) -> bool:
|
||||
"""
|
||||
Delete the consent record for a user-client pair.
|
||||
|
||||
Returns True if a record was found and deleted.
|
||||
Note: Callers are responsible for also revoking associated tokens.
|
||||
"""
|
||||
result = await db.execute(
|
||||
delete(OAuthConsent).where(
|
||||
and_(
|
||||
OAuthConsent.user_id == user_id,
|
||||
OAuthConsent.client_id == client_id,
|
||||
)
|
||||
)
|
||||
)
|
||||
await db.commit()
|
||||
return result.rowcount > 0 # type: ignore[attr-defined]
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_consent_repo = OAuthConsentRepository()
|
||||
@@ -1,142 +0,0 @@
|
||||
# app/repositories/oauth_provider_token.py
|
||||
"""Repository for OAuthProviderRefreshToken model."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import and_, delete, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models.oauth_provider_token import OAuthProviderRefreshToken
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OAuthProviderTokenRepository:
|
||||
"""Repository for OAuth provider refresh tokens."""
|
||||
|
||||
async def create_token(
|
||||
self,
|
||||
db: AsyncSession,
|
||||
*,
|
||||
token_hash: str,
|
||||
jti: str,
|
||||
client_id: str,
|
||||
user_id: UUID,
|
||||
scope: str,
|
||||
expires_at: datetime,
|
||||
device_info: str | None = None,
|
||||
ip_address: str | None = None,
|
||||
) -> OAuthProviderRefreshToken:
|
||||
"""Create and persist a new refresh token record."""
|
||||
token = OAuthProviderRefreshToken(
|
||||
token_hash=token_hash,
|
||||
jti=jti,
|
||||
client_id=client_id,
|
||||
user_id=user_id,
|
||||
scope=scope,
|
||||
expires_at=expires_at,
|
||||
device_info=device_info,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
db.add(token)
|
||||
await db.commit()
|
||||
return token
|
||||
|
||||
async def get_by_token_hash(
|
||||
self, db: AsyncSession, *, token_hash: str
|
||||
) -> OAuthProviderRefreshToken | None:
|
||||
"""Get refresh token record by SHA-256 token hash."""
|
||||
result = await db.execute(
|
||||
select(OAuthProviderRefreshToken).where(
|
||||
OAuthProviderRefreshToken.token_hash == token_hash
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def get_by_jti(
|
||||
self, db: AsyncSession, *, jti: str
|
||||
) -> OAuthProviderRefreshToken | None:
|
||||
"""Get refresh token record by JWT ID (JTI)."""
|
||||
result = await db.execute(
|
||||
select(OAuthProviderRefreshToken).where(
|
||||
OAuthProviderRefreshToken.jti == jti
|
||||
)
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def revoke(
|
||||
self, db: AsyncSession, *, token: OAuthProviderRefreshToken
|
||||
) -> None:
|
||||
"""Mark a specific token record as revoked."""
|
||||
token.revoked = True # type: ignore[assignment]
|
||||
token.last_used_at = datetime.now(UTC) # type: ignore[assignment]
|
||||
await db.commit()
|
||||
|
||||
async def revoke_all_for_user_client(
|
||||
self, db: AsyncSession, *, user_id: UUID, client_id: str
|
||||
) -> int:
|
||||
"""
|
||||
Revoke all active tokens for a specific user-client pair.
|
||||
|
||||
Used when security incidents are detected (e.g., authorization code reuse).
|
||||
Returns the number of tokens revoked.
|
||||
"""
|
||||
result = await db.execute(
|
||||
update(OAuthProviderRefreshToken)
|
||||
.where(
|
||||
and_(
|
||||
OAuthProviderRefreshToken.user_id == user_id,
|
||||
OAuthProviderRefreshToken.client_id == client_id,
|
||||
OAuthProviderRefreshToken.revoked == False, # noqa: E712
|
||||
)
|
||||
)
|
||||
.values(revoked=True)
|
||||
)
|
||||
count = result.rowcount # type: ignore[attr-defined]
|
||||
if count > 0:
|
||||
await db.commit()
|
||||
return count
|
||||
|
||||
async def revoke_all_for_user(self, db: AsyncSession, *, user_id: UUID) -> int:
|
||||
"""
|
||||
Revoke all active tokens for a user across all clients.
|
||||
|
||||
Used when user changes password or logs out everywhere.
|
||||
Returns the number of tokens revoked.
|
||||
"""
|
||||
result = await db.execute(
|
||||
update(OAuthProviderRefreshToken)
|
||||
.where(
|
||||
and_(
|
||||
OAuthProviderRefreshToken.user_id == user_id,
|
||||
OAuthProviderRefreshToken.revoked == False, # noqa: E712
|
||||
)
|
||||
)
|
||||
.values(revoked=True)
|
||||
)
|
||||
count = result.rowcount # type: ignore[attr-defined]
|
||||
if count > 0:
|
||||
await db.commit()
|
||||
return count
|
||||
|
||||
async def cleanup_expired(self, db: AsyncSession, *, cutoff_days: int = 7) -> int:
|
||||
"""
|
||||
Delete expired refresh tokens older than cutoff_days.
|
||||
|
||||
Should be called periodically (e.g., daily).
|
||||
Returns the number of tokens deleted.
|
||||
"""
|
||||
cutoff = datetime.now(UTC) - timedelta(days=cutoff_days)
|
||||
result = await db.execute(
|
||||
delete(OAuthProviderRefreshToken).where(
|
||||
OAuthProviderRefreshToken.expires_at < cutoff
|
||||
)
|
||||
)
|
||||
await db.commit()
|
||||
return result.rowcount # type: ignore[attr-defined]
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_provider_token_repo = OAuthProviderTokenRepository()
|
||||
@@ -1,113 +0,0 @@
|
||||
# app/repositories/oauth_state.py
|
||||
"""Repository for OAuthState model async database operations."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import delete, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.core.repository_exceptions import DuplicateEntryError
|
||||
from app.models.oauth_state import OAuthState
|
||||
from app.repositories.base import BaseRepository
|
||||
from app.schemas.oauth import OAuthStateCreate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmptySchema(BaseModel):
|
||||
"""Placeholder schema for repository operations that don't need update schemas."""
|
||||
|
||||
|
||||
class OAuthStateRepository(BaseRepository[OAuthState, OAuthStateCreate, EmptySchema]):
|
||||
"""Repository for OAuth state (CSRF protection)."""
|
||||
|
||||
async def create_state(
|
||||
self, db: AsyncSession, *, obj_in: OAuthStateCreate
|
||||
) -> OAuthState:
|
||||
"""Create a new OAuth state for CSRF protection."""
|
||||
try:
|
||||
db_obj = OAuthState(
|
||||
state=obj_in.state,
|
||||
code_verifier=obj_in.code_verifier,
|
||||
nonce=obj_in.nonce,
|
||||
provider=obj_in.provider,
|
||||
redirect_uri=obj_in.redirect_uri,
|
||||
user_id=obj_in.user_id,
|
||||
expires_at=obj_in.expires_at,
|
||||
)
|
||||
db.add(db_obj)
|
||||
await db.commit()
|
||||
await db.refresh(db_obj)
|
||||
|
||||
logger.debug("OAuth state created for %s", obj_in.provider)
|
||||
return db_obj
|
||||
except IntegrityError as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
error_msg = str(e.orig) if hasattr(e, "orig") else str(e)
|
||||
logger.error("OAuth state collision: %s", error_msg)
|
||||
raise DuplicateEntryError("Failed to create OAuth state, please retry")
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.exception("Error creating OAuth state: %s", e)
|
||||
raise
|
||||
|
||||
async def get_and_consume_state(
|
||||
self, db: AsyncSession, *, state: str
|
||||
) -> OAuthState | None:
|
||||
"""Get and delete OAuth state (consume it)."""
|
||||
try:
|
||||
result = await db.execute(
|
||||
select(OAuthState).where(OAuthState.state == state)
|
||||
)
|
||||
db_obj = result.scalar_one_or_none()
|
||||
|
||||
if db_obj is None:
|
||||
logger.warning("OAuth state not found: %s...", state[:8])
|
||||
return None
|
||||
|
||||
now = datetime.now(UTC)
|
||||
expires_at = db_obj.expires_at
|
||||
if expires_at.tzinfo is None:
|
||||
expires_at = expires_at.replace(tzinfo=UTC)
|
||||
|
||||
if expires_at < now:
|
||||
logger.warning("OAuth state expired: %s...", state[:8])
|
||||
await db.delete(db_obj)
|
||||
await db.commit()
|
||||
return None
|
||||
|
||||
await db.delete(db_obj)
|
||||
await db.commit()
|
||||
|
||||
logger.debug("OAuth state consumed: %s...", state[:8])
|
||||
return db_obj
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Error consuming OAuth state: %s", e)
|
||||
raise
|
||||
|
||||
async def cleanup_expired(self, db: AsyncSession) -> int:
|
||||
"""Clean up expired OAuth states."""
|
||||
try:
|
||||
now = datetime.now(UTC)
|
||||
|
||||
stmt = delete(OAuthState).where(OAuthState.expires_at < now)
|
||||
result = await db.execute(stmt)
|
||||
await db.commit()
|
||||
|
||||
count = result.rowcount
|
||||
if count > 0:
|
||||
logger.info("Cleaned up %s expired OAuth states", count)
|
||||
|
||||
return count
|
||||
except Exception as e: # pragma: no cover
|
||||
await db.rollback()
|
||||
logger.error("Error cleaning up expired OAuth states: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
# Singleton instance
|
||||
oauth_state_repo = OAuthStateRepository(OAuthState)
|
||||
273
backend/app/schemas/events.py
Normal file
273
backend/app/schemas/events.py
Normal file
@@ -0,0 +1,273 @@
|
||||
"""
|
||||
Event schemas for the Syndarix EventBus (Redis Pub/Sub).
|
||||
|
||||
This module defines event types and payload schemas for real-time communication
|
||||
between services, agents, and the frontend.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class EventType(str, Enum):
|
||||
"""
|
||||
Event types for the EventBus.
|
||||
|
||||
Naming convention: {domain}.{action}
|
||||
"""
|
||||
|
||||
# Agent Events
|
||||
AGENT_SPAWNED = "agent.spawned"
|
||||
AGENT_STATUS_CHANGED = "agent.status_changed"
|
||||
AGENT_MESSAGE = "agent.message"
|
||||
AGENT_TERMINATED = "agent.terminated"
|
||||
|
||||
# Issue Events
|
||||
ISSUE_CREATED = "issue.created"
|
||||
ISSUE_UPDATED = "issue.updated"
|
||||
ISSUE_ASSIGNED = "issue.assigned"
|
||||
ISSUE_CLOSED = "issue.closed"
|
||||
|
||||
# Sprint Events
|
||||
SPRINT_STARTED = "sprint.started"
|
||||
SPRINT_COMPLETED = "sprint.completed"
|
||||
|
||||
# Approval Events
|
||||
APPROVAL_REQUESTED = "approval.requested"
|
||||
APPROVAL_GRANTED = "approval.granted"
|
||||
APPROVAL_DENIED = "approval.denied"
|
||||
|
||||
# Project Events
|
||||
PROJECT_CREATED = "project.created"
|
||||
PROJECT_UPDATED = "project.updated"
|
||||
PROJECT_ARCHIVED = "project.archived"
|
||||
|
||||
# Workflow Events
|
||||
WORKFLOW_STARTED = "workflow.started"
|
||||
WORKFLOW_STEP_COMPLETED = "workflow.step_completed"
|
||||
WORKFLOW_COMPLETED = "workflow.completed"
|
||||
WORKFLOW_FAILED = "workflow.failed"
|
||||
|
||||
|
||||
ActorType = Literal["agent", "user", "system"]
|
||||
|
||||
|
||||
class Event(BaseModel):
|
||||
"""
|
||||
Base event schema for the EventBus.
|
||||
|
||||
All events published to the EventBus must conform to this schema.
|
||||
"""
|
||||
|
||||
id: str = Field(
|
||||
...,
|
||||
description="Unique event identifier (UUID string)",
|
||||
examples=["550e8400-e29b-41d4-a716-446655440000"],
|
||||
)
|
||||
type: EventType = Field(
|
||||
...,
|
||||
description="Event type enum value",
|
||||
examples=[EventType.AGENT_MESSAGE],
|
||||
)
|
||||
timestamp: datetime = Field(
|
||||
...,
|
||||
description="When the event occurred (UTC)",
|
||||
examples=["2024-01-15T10:30:00Z"],
|
||||
)
|
||||
project_id: UUID = Field(
|
||||
...,
|
||||
description="Project this event belongs to",
|
||||
examples=["550e8400-e29b-41d4-a716-446655440001"],
|
||||
)
|
||||
actor_id: UUID | None = Field(
|
||||
default=None,
|
||||
description="ID of the agent or user who triggered the event",
|
||||
examples=["550e8400-e29b-41d4-a716-446655440002"],
|
||||
)
|
||||
actor_type: ActorType = Field(
|
||||
...,
|
||||
description="Type of actor: 'agent', 'user', or 'system'",
|
||||
examples=["agent"],
|
||||
)
|
||||
payload: dict = Field(
|
||||
default_factory=dict,
|
||||
description="Event-specific payload data",
|
||||
)
|
||||
|
||||
model_config = {
|
||||
"json_schema_extra": {
|
||||
"example": {
|
||||
"id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"type": "agent.message",
|
||||
"timestamp": "2024-01-15T10:30:00Z",
|
||||
"project_id": "550e8400-e29b-41d4-a716-446655440001",
|
||||
"actor_id": "550e8400-e29b-41d4-a716-446655440002",
|
||||
"actor_type": "agent",
|
||||
"payload": {"message": "Processing task...", "progress": 50},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Specific payload schemas for type safety
|
||||
|
||||
|
||||
class AgentSpawnedPayload(BaseModel):
|
||||
"""Payload for AGENT_SPAWNED events."""
|
||||
|
||||
agent_instance_id: UUID = Field(..., description="ID of the spawned agent instance")
|
||||
agent_type_id: UUID = Field(..., description="ID of the agent type")
|
||||
agent_name: str = Field(..., description="Human-readable name of the agent")
|
||||
role: str = Field(..., description="Agent role (e.g., 'product_owner', 'engineer')")
|
||||
|
||||
|
||||
class AgentStatusChangedPayload(BaseModel):
|
||||
"""Payload for AGENT_STATUS_CHANGED events."""
|
||||
|
||||
agent_instance_id: UUID = Field(..., description="ID of the agent instance")
|
||||
previous_status: str = Field(..., description="Previous status")
|
||||
new_status: str = Field(..., description="New status")
|
||||
reason: str | None = Field(default=None, description="Reason for status change")
|
||||
|
||||
|
||||
class AgentMessagePayload(BaseModel):
|
||||
"""Payload for AGENT_MESSAGE events."""
|
||||
|
||||
agent_instance_id: UUID = Field(..., description="ID of the agent instance")
|
||||
message: str = Field(..., description="Message content")
|
||||
message_type: str = Field(
|
||||
default="info",
|
||||
description="Message type: 'info', 'warning', 'error', 'debug'",
|
||||
)
|
||||
metadata: dict = Field(
|
||||
default_factory=dict,
|
||||
description="Additional metadata (e.g., token usage, model info)",
|
||||
)
|
||||
|
||||
|
||||
class AgentTerminatedPayload(BaseModel):
|
||||
"""Payload for AGENT_TERMINATED events."""
|
||||
|
||||
agent_instance_id: UUID = Field(..., description="ID of the agent instance")
|
||||
termination_reason: str = Field(..., description="Reason for termination")
|
||||
final_status: str = Field(..., description="Final status at termination")
|
||||
|
||||
|
||||
class IssueCreatedPayload(BaseModel):
|
||||
"""Payload for ISSUE_CREATED events."""
|
||||
|
||||
issue_id: str = Field(..., description="Issue ID (from external tracker)")
|
||||
title: str = Field(..., description="Issue title")
|
||||
priority: str | None = Field(default=None, description="Issue priority")
|
||||
labels: list[str] = Field(default_factory=list, description="Issue labels")
|
||||
|
||||
|
||||
class IssueUpdatedPayload(BaseModel):
|
||||
"""Payload for ISSUE_UPDATED events."""
|
||||
|
||||
issue_id: str = Field(..., description="Issue ID (from external tracker)")
|
||||
changes: dict = Field(..., description="Dictionary of field changes")
|
||||
|
||||
|
||||
class IssueAssignedPayload(BaseModel):
|
||||
"""Payload for ISSUE_ASSIGNED events."""
|
||||
|
||||
issue_id: str = Field(..., description="Issue ID (from external tracker)")
|
||||
assignee_id: UUID | None = Field(
|
||||
default=None, description="Agent or user assigned to"
|
||||
)
|
||||
assignee_name: str | None = Field(default=None, description="Assignee name")
|
||||
|
||||
|
||||
class IssueClosedPayload(BaseModel):
|
||||
"""Payload for ISSUE_CLOSED events."""
|
||||
|
||||
issue_id: str = Field(..., description="Issue ID (from external tracker)")
|
||||
resolution: str = Field(..., description="Resolution status")
|
||||
|
||||
|
||||
class SprintStartedPayload(BaseModel):
|
||||
"""Payload for SPRINT_STARTED events."""
|
||||
|
||||
sprint_id: UUID = Field(..., description="Sprint ID")
|
||||
sprint_name: str = Field(..., description="Sprint name")
|
||||
goal: str | None = Field(default=None, description="Sprint goal")
|
||||
issue_count: int = Field(default=0, description="Number of issues in sprint")
|
||||
|
||||
|
||||
class SprintCompletedPayload(BaseModel):
|
||||
"""Payload for SPRINT_COMPLETED events."""
|
||||
|
||||
sprint_id: UUID = Field(..., description="Sprint ID")
|
||||
sprint_name: str = Field(..., description="Sprint name")
|
||||
completed_issues: int = Field(default=0, description="Number of completed issues")
|
||||
incomplete_issues: int = Field(default=0, description="Number of incomplete issues")
|
||||
|
||||
|
||||
class ApprovalRequestedPayload(BaseModel):
|
||||
"""Payload for APPROVAL_REQUESTED events."""
|
||||
|
||||
approval_id: UUID = Field(..., description="Approval request ID")
|
||||
approval_type: str = Field(..., description="Type of approval needed")
|
||||
description: str = Field(..., description="Description of what needs approval")
|
||||
requested_by: UUID | None = Field(
|
||||
default=None, description="Agent/user requesting approval"
|
||||
)
|
||||
timeout_minutes: int | None = Field(
|
||||
default=None, description="Minutes before auto-escalation"
|
||||
)
|
||||
|
||||
|
||||
class ApprovalGrantedPayload(BaseModel):
|
||||
"""Payload for APPROVAL_GRANTED events."""
|
||||
|
||||
approval_id: UUID = Field(..., description="Approval request ID")
|
||||
approved_by: UUID = Field(..., description="User who granted approval")
|
||||
comments: str | None = Field(default=None, description="Approval comments")
|
||||
|
||||
|
||||
class ApprovalDeniedPayload(BaseModel):
|
||||
"""Payload for APPROVAL_DENIED events."""
|
||||
|
||||
approval_id: UUID = Field(..., description="Approval request ID")
|
||||
denied_by: UUID = Field(..., description="User who denied approval")
|
||||
reason: str = Field(..., description="Reason for denial")
|
||||
|
||||
|
||||
class WorkflowStartedPayload(BaseModel):
|
||||
"""Payload for WORKFLOW_STARTED events."""
|
||||
|
||||
workflow_id: UUID = Field(..., description="Workflow execution ID")
|
||||
workflow_type: str = Field(..., description="Type of workflow")
|
||||
total_steps: int = Field(default=0, description="Total number of steps")
|
||||
|
||||
|
||||
class WorkflowStepCompletedPayload(BaseModel):
|
||||
"""Payload for WORKFLOW_STEP_COMPLETED events."""
|
||||
|
||||
workflow_id: UUID = Field(..., description="Workflow execution ID")
|
||||
step_name: str = Field(..., description="Name of completed step")
|
||||
step_number: int = Field(..., description="Step number (1-indexed)")
|
||||
total_steps: int = Field(..., description="Total number of steps")
|
||||
result: dict = Field(default_factory=dict, description="Step result data")
|
||||
|
||||
|
||||
class WorkflowCompletedPayload(BaseModel):
|
||||
"""Payload for WORKFLOW_COMPLETED events."""
|
||||
|
||||
workflow_id: UUID = Field(..., description="Workflow execution ID")
|
||||
duration_seconds: float = Field(..., description="Total execution duration")
|
||||
result: dict = Field(default_factory=dict, description="Workflow result data")
|
||||
|
||||
|
||||
class WorkflowFailedPayload(BaseModel):
|
||||
"""Payload for WORKFLOW_FAILED events."""
|
||||
|
||||
workflow_id: UUID = Field(..., description="Workflow execution ID")
|
||||
error_message: str = Field(..., description="Error message")
|
||||
failed_step: str | None = Field(default=None, description="Step that failed")
|
||||
recoverable: bool = Field(default=False, description="Whether error is recoverable")
|
||||
@@ -60,8 +60,8 @@ class OAuthAccountCreate(OAuthAccountBase):
|
||||
|
||||
user_id: UUID
|
||||
provider_user_id: str = Field(..., max_length=255)
|
||||
access_token: str | None = None
|
||||
refresh_token: str | None = None
|
||||
access_token_encrypted: str | None = None
|
||||
refresh_token_encrypted: str | None = None
|
||||
token_expires_at: datetime | None = None
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ class OrganizationCreate(OrganizationBase):
|
||||
"""Schema for creating a new organization."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
slug: str = Field(..., min_length=1, max_length=255) # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
slug: str = Field(..., min_length=1, max_length=255)
|
||||
|
||||
|
||||
class OrganizationUpdate(BaseModel):
|
||||
|
||||
113
backend/app/schemas/syndarix/__init__.py
Normal file
113
backend/app/schemas/syndarix/__init__.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# app/schemas/syndarix/__init__.py
|
||||
"""
|
||||
Syndarix domain schemas.
|
||||
|
||||
This package contains Pydantic schemas for validating and serializing
|
||||
Syndarix domain entities.
|
||||
"""
|
||||
|
||||
from .agent_instance import (
|
||||
AgentInstanceCreate,
|
||||
AgentInstanceInDB,
|
||||
AgentInstanceListResponse,
|
||||
AgentInstanceMetrics,
|
||||
AgentInstanceResponse,
|
||||
AgentInstanceTerminate,
|
||||
AgentInstanceUpdate,
|
||||
)
|
||||
from .agent_type import (
|
||||
AgentTypeCreate,
|
||||
AgentTypeInDB,
|
||||
AgentTypeListResponse,
|
||||
AgentTypeResponse,
|
||||
AgentTypeUpdate,
|
||||
)
|
||||
from .enums import (
|
||||
AgentStatus,
|
||||
AutonomyLevel,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
ProjectStatus,
|
||||
SprintStatus,
|
||||
SyncStatus,
|
||||
)
|
||||
from .issue import (
|
||||
IssueAssign,
|
||||
IssueClose,
|
||||
IssueCreate,
|
||||
IssueInDB,
|
||||
IssueListResponse,
|
||||
IssueResponse,
|
||||
IssueStats,
|
||||
IssueSyncUpdate,
|
||||
IssueUpdate,
|
||||
)
|
||||
from .project import (
|
||||
ProjectCreate,
|
||||
ProjectInDB,
|
||||
ProjectListResponse,
|
||||
ProjectResponse,
|
||||
ProjectUpdate,
|
||||
)
|
||||
from .sprint import (
|
||||
SprintBurndown,
|
||||
SprintComplete,
|
||||
SprintCreate,
|
||||
SprintInDB,
|
||||
SprintListResponse,
|
||||
SprintResponse,
|
||||
SprintStart,
|
||||
SprintUpdate,
|
||||
SprintVelocity,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# AgentInstance schemas
|
||||
"AgentInstanceCreate",
|
||||
"AgentInstanceInDB",
|
||||
"AgentInstanceListResponse",
|
||||
"AgentInstanceMetrics",
|
||||
"AgentInstanceResponse",
|
||||
"AgentInstanceTerminate",
|
||||
"AgentInstanceUpdate",
|
||||
# Enums
|
||||
"AgentStatus",
|
||||
# AgentType schemas
|
||||
"AgentTypeCreate",
|
||||
"AgentTypeInDB",
|
||||
"AgentTypeListResponse",
|
||||
"AgentTypeResponse",
|
||||
"AgentTypeUpdate",
|
||||
"AutonomyLevel",
|
||||
# Issue schemas
|
||||
"IssueAssign",
|
||||
"IssueClose",
|
||||
"IssueCreate",
|
||||
"IssueInDB",
|
||||
"IssueListResponse",
|
||||
"IssuePriority",
|
||||
"IssueResponse",
|
||||
"IssueStats",
|
||||
"IssueStatus",
|
||||
"IssueSyncUpdate",
|
||||
"IssueUpdate",
|
||||
# Project schemas
|
||||
"ProjectCreate",
|
||||
"ProjectInDB",
|
||||
"ProjectListResponse",
|
||||
"ProjectResponse",
|
||||
"ProjectStatus",
|
||||
"ProjectUpdate",
|
||||
# Sprint schemas
|
||||
"SprintBurndown",
|
||||
"SprintComplete",
|
||||
"SprintCreate",
|
||||
"SprintInDB",
|
||||
"SprintListResponse",
|
||||
"SprintResponse",
|
||||
"SprintStart",
|
||||
"SprintStatus",
|
||||
"SprintUpdate",
|
||||
"SprintVelocity",
|
||||
"SyncStatus",
|
||||
]
|
||||
124
backend/app/schemas/syndarix/agent_instance.py
Normal file
124
backend/app/schemas/syndarix/agent_instance.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# app/schemas/syndarix/agent_instance.py
|
||||
"""
|
||||
Pydantic schemas for AgentInstance entity.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from .enums import AgentStatus
|
||||
|
||||
|
||||
class AgentInstanceBase(BaseModel):
|
||||
"""Base agent instance schema with common fields."""
|
||||
|
||||
agent_type_id: UUID
|
||||
project_id: UUID
|
||||
status: AgentStatus = AgentStatus.IDLE
|
||||
current_task: str | None = None
|
||||
short_term_memory: dict[str, Any] = Field(default_factory=dict)
|
||||
long_term_memory_ref: str | None = Field(None, max_length=500)
|
||||
session_id: str | None = Field(None, max_length=255)
|
||||
|
||||
|
||||
class AgentInstanceCreate(BaseModel):
|
||||
"""Schema for creating a new agent instance."""
|
||||
|
||||
agent_type_id: UUID
|
||||
project_id: UUID
|
||||
name: str = Field(..., min_length=1, max_length=100)
|
||||
status: AgentStatus = AgentStatus.IDLE
|
||||
current_task: str | None = None
|
||||
short_term_memory: dict[str, Any] = Field(default_factory=dict)
|
||||
long_term_memory_ref: str | None = Field(None, max_length=500)
|
||||
session_id: str | None = Field(None, max_length=255)
|
||||
|
||||
|
||||
class AgentInstanceUpdate(BaseModel):
|
||||
"""Schema for updating an agent instance."""
|
||||
|
||||
status: AgentStatus | None = None
|
||||
current_task: str | None = None
|
||||
short_term_memory: dict[str, Any] | None = None
|
||||
long_term_memory_ref: str | None = None
|
||||
session_id: str | None = None
|
||||
last_activity_at: datetime | None = None
|
||||
tasks_completed: int | None = Field(None, ge=0)
|
||||
tokens_used: int | None = Field(None, ge=0)
|
||||
cost_incurred: Decimal | None = Field(None, ge=0)
|
||||
|
||||
|
||||
class AgentInstanceTerminate(BaseModel):
|
||||
"""Schema for terminating an agent instance."""
|
||||
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class AgentInstanceInDB(AgentInstanceBase):
|
||||
"""Schema for agent instance in database."""
|
||||
|
||||
id: UUID
|
||||
last_activity_at: datetime | None = None
|
||||
terminated_at: datetime | None = None
|
||||
tasks_completed: int = 0
|
||||
tokens_used: int = 0
|
||||
cost_incurred: Decimal = Decimal("0.0000")
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class AgentInstanceResponse(BaseModel):
|
||||
"""Schema for agent instance API responses."""
|
||||
|
||||
id: UUID
|
||||
agent_type_id: UUID
|
||||
project_id: UUID
|
||||
name: str
|
||||
status: AgentStatus
|
||||
current_task: str | None = None
|
||||
short_term_memory: dict[str, Any] = Field(default_factory=dict)
|
||||
long_term_memory_ref: str | None = None
|
||||
session_id: str | None = None
|
||||
last_activity_at: datetime | None = None
|
||||
terminated_at: datetime | None = None
|
||||
tasks_completed: int = 0
|
||||
tokens_used: int = 0
|
||||
cost_incurred: Decimal = Decimal("0.0000")
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
# Expanded fields from relationships
|
||||
agent_type_name: str | None = None
|
||||
agent_type_slug: str | None = None
|
||||
project_name: str | None = None
|
||||
project_slug: str | None = None
|
||||
assigned_issues_count: int | None = 0
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class AgentInstanceListResponse(BaseModel):
|
||||
"""Schema for paginated agent instance list responses."""
|
||||
|
||||
agent_instances: list[AgentInstanceResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
pages: int
|
||||
|
||||
|
||||
class AgentInstanceMetrics(BaseModel):
|
||||
"""Schema for agent instance metrics summary."""
|
||||
|
||||
total_instances: int
|
||||
active_instances: int
|
||||
idle_instances: int
|
||||
total_tasks_completed: int
|
||||
total_tokens_used: int
|
||||
total_cost_incurred: Decimal
|
||||
197
backend/app/schemas/syndarix/agent_type.py
Normal file
197
backend/app/schemas/syndarix/agent_type.py
Normal file
@@ -0,0 +1,197 @@
|
||||
# app/schemas/syndarix/agent_type.py
|
||||
"""
|
||||
Pydantic schemas for AgentType entity.
|
||||
"""
|
||||
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from app.models.syndarix.enums import AgentTypeCategory
|
||||
|
||||
|
||||
class AgentTypeBase(BaseModel):
|
||||
"""Base agent type schema with common fields."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
slug: str | None = Field(None, min_length=1, max_length=255)
|
||||
description: str | None = None
|
||||
expertise: list[str] = Field(default_factory=list)
|
||||
personality_prompt: str = Field(..., min_length=1)
|
||||
primary_model: str = Field(..., min_length=1, max_length=100)
|
||||
fallback_models: list[str] = Field(default_factory=list)
|
||||
model_params: dict[str, Any] = Field(default_factory=dict)
|
||||
mcp_servers: list[str] = Field(default_factory=list)
|
||||
tool_permissions: dict[str, Any] = Field(default_factory=dict)
|
||||
is_active: bool = True
|
||||
|
||||
# Category and display fields
|
||||
category: AgentTypeCategory | None = None
|
||||
icon: str | None = Field(None, max_length=50)
|
||||
color: str | None = Field(None, pattern=r"^#[0-9A-Fa-f]{6}$")
|
||||
sort_order: int = Field(default=0, ge=0, le=1000)
|
||||
typical_tasks: list[str] = Field(default_factory=list)
|
||||
collaboration_hints: list[str] = Field(default_factory=list)
|
||||
|
||||
@field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str | None) -> str | None:
|
||||
"""Validate slug format: lowercase, alphanumeric, hyphens only."""
|
||||
if v is None:
|
||||
return v
|
||||
if not re.match(r"^[a-z0-9-]+$", v):
|
||||
raise ValueError(
|
||||
"Slug must contain only lowercase letters, numbers, and hyphens"
|
||||
)
|
||||
if v.startswith("-") or v.endswith("-"):
|
||||
raise ValueError("Slug cannot start or end with a hyphen")
|
||||
if "--" in v:
|
||||
raise ValueError("Slug cannot contain consecutive hyphens")
|
||||
return v
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str) -> str:
|
||||
"""Validate agent type name."""
|
||||
if not v or v.strip() == "":
|
||||
raise ValueError("Agent type name cannot be empty")
|
||||
return v.strip()
|
||||
|
||||
@field_validator("expertise")
|
||||
@classmethod
|
||||
def validate_expertise(cls, v: list[str]) -> list[str]:
|
||||
"""Validate and normalize expertise list."""
|
||||
return [e.strip().lower() for e in v if e.strip()]
|
||||
|
||||
@field_validator("mcp_servers")
|
||||
@classmethod
|
||||
def validate_mcp_servers(cls, v: list[str]) -> list[str]:
|
||||
"""Validate MCP server list."""
|
||||
return [s.strip() for s in v if s.strip()]
|
||||
|
||||
@field_validator("typical_tasks")
|
||||
@classmethod
|
||||
def validate_typical_tasks(cls, v: list[str]) -> list[str]:
|
||||
"""Validate and normalize typical tasks list."""
|
||||
return [t.strip() for t in v if t.strip()]
|
||||
|
||||
@field_validator("collaboration_hints")
|
||||
@classmethod
|
||||
def validate_collaboration_hints(cls, v: list[str]) -> list[str]:
|
||||
"""Validate and normalize collaboration hints (agent slugs)."""
|
||||
return [h.strip().lower() for h in v if h.strip()]
|
||||
|
||||
|
||||
class AgentTypeCreate(AgentTypeBase):
|
||||
"""Schema for creating a new agent type."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
slug: str = Field(..., min_length=1, max_length=255)
|
||||
personality_prompt: str = Field(..., min_length=1)
|
||||
primary_model: str = Field(..., min_length=1, max_length=100)
|
||||
|
||||
|
||||
class AgentTypeUpdate(BaseModel):
|
||||
"""Schema for updating an agent type."""
|
||||
|
||||
name: str | None = Field(None, min_length=1, max_length=255)
|
||||
slug: str | None = Field(None, min_length=1, max_length=255)
|
||||
description: str | None = None
|
||||
expertise: list[str] | None = None
|
||||
personality_prompt: str | None = None
|
||||
primary_model: str | None = Field(None, min_length=1, max_length=100)
|
||||
fallback_models: list[str] | None = None
|
||||
model_params: dict[str, Any] | None = None
|
||||
mcp_servers: list[str] | None = None
|
||||
tool_permissions: dict[str, Any] | None = None
|
||||
is_active: bool | None = None
|
||||
|
||||
# Category and display fields (all optional for updates)
|
||||
category: AgentTypeCategory | None = None
|
||||
icon: str | None = Field(None, max_length=50)
|
||||
color: str | None = Field(None, pattern=r"^#[0-9A-Fa-f]{6}$")
|
||||
sort_order: int | None = Field(None, ge=0, le=1000)
|
||||
typical_tasks: list[str] | None = None
|
||||
collaboration_hints: list[str] | None = None
|
||||
|
||||
@field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str | None) -> str | None:
|
||||
"""Validate slug format."""
|
||||
if v is None:
|
||||
return v
|
||||
if not re.match(r"^[a-z0-9-]+$", v):
|
||||
raise ValueError(
|
||||
"Slug must contain only lowercase letters, numbers, and hyphens"
|
||||
)
|
||||
if v.startswith("-") or v.endswith("-"):
|
||||
raise ValueError("Slug cannot start or end with a hyphen")
|
||||
if "--" in v:
|
||||
raise ValueError("Slug cannot contain consecutive hyphens")
|
||||
return v
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str | None) -> str | None:
|
||||
"""Validate agent type name."""
|
||||
if v is not None and (not v or v.strip() == ""):
|
||||
raise ValueError("Agent type name cannot be empty")
|
||||
return v.strip() if v else v
|
||||
|
||||
@field_validator("expertise")
|
||||
@classmethod
|
||||
def validate_expertise(cls, v: list[str] | None) -> list[str] | None:
|
||||
"""Validate and normalize expertise list."""
|
||||
if v is None:
|
||||
return v
|
||||
return [e.strip().lower() for e in v if e.strip()]
|
||||
|
||||
@field_validator("typical_tasks")
|
||||
@classmethod
|
||||
def validate_typical_tasks(cls, v: list[str] | None) -> list[str] | None:
|
||||
"""Validate and normalize typical tasks list."""
|
||||
if v is None:
|
||||
return v
|
||||
return [t.strip() for t in v if t.strip()]
|
||||
|
||||
@field_validator("collaboration_hints")
|
||||
@classmethod
|
||||
def validate_collaboration_hints(cls, v: list[str] | None) -> list[str] | None:
|
||||
"""Validate and normalize collaboration hints (agent slugs)."""
|
||||
if v is None:
|
||||
return v
|
||||
return [h.strip().lower() for h in v if h.strip()]
|
||||
|
||||
|
||||
class AgentTypeInDB(AgentTypeBase):
|
||||
"""Schema for agent type in database."""
|
||||
|
||||
id: UUID
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class AgentTypeResponse(AgentTypeBase):
|
||||
"""Schema for agent type API responses."""
|
||||
|
||||
id: UUID
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
instance_count: int | None = 0
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class AgentTypeListResponse(BaseModel):
|
||||
"""Schema for paginated agent type list responses."""
|
||||
|
||||
agent_types: list[AgentTypeResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
pages: int
|
||||
26
backend/app/schemas/syndarix/enums.py
Normal file
26
backend/app/schemas/syndarix/enums.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# app/schemas/syndarix/enums.py
|
||||
"""
|
||||
Re-export enums from models for use in schemas.
|
||||
|
||||
This allows schemas to import enums without depending on SQLAlchemy models directly.
|
||||
"""
|
||||
|
||||
from app.models.syndarix.enums import (
|
||||
AgentStatus,
|
||||
AutonomyLevel,
|
||||
IssuePriority,
|
||||
IssueStatus,
|
||||
ProjectStatus,
|
||||
SprintStatus,
|
||||
SyncStatus,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AgentStatus",
|
||||
"AutonomyLevel",
|
||||
"IssuePriority",
|
||||
"IssueStatus",
|
||||
"ProjectStatus",
|
||||
"SprintStatus",
|
||||
"SyncStatus",
|
||||
]
|
||||
191
backend/app/schemas/syndarix/issue.py
Normal file
191
backend/app/schemas/syndarix/issue.py
Normal file
@@ -0,0 +1,191 @@
|
||||
# app/schemas/syndarix/issue.py
|
||||
"""
|
||||
Pydantic schemas for Issue entity.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Literal
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from .enums import IssuePriority, IssueStatus, SyncStatus
|
||||
|
||||
|
||||
class IssueBase(BaseModel):
|
||||
"""Base issue schema with common fields."""
|
||||
|
||||
title: str = Field(..., min_length=1, max_length=500)
|
||||
body: str = ""
|
||||
status: IssueStatus = IssueStatus.OPEN
|
||||
priority: IssuePriority = IssuePriority.MEDIUM
|
||||
labels: list[str] = Field(default_factory=list)
|
||||
story_points: int | None = Field(None, ge=0, le=100)
|
||||
|
||||
@field_validator("title")
|
||||
@classmethod
|
||||
def validate_title(cls, v: str) -> str:
|
||||
"""Validate issue title."""
|
||||
if not v or v.strip() == "":
|
||||
raise ValueError("Issue title cannot be empty")
|
||||
return v.strip()
|
||||
|
||||
@field_validator("labels")
|
||||
@classmethod
|
||||
def validate_labels(cls, v: list[str]) -> list[str]:
|
||||
"""Validate and normalize labels."""
|
||||
return [label.strip().lower() for label in v if label.strip()]
|
||||
|
||||
|
||||
class IssueCreate(IssueBase):
|
||||
"""Schema for creating a new issue."""
|
||||
|
||||
project_id: UUID
|
||||
assigned_agent_id: UUID | None = None
|
||||
human_assignee: str | None = Field(None, max_length=255)
|
||||
sprint_id: UUID | None = None
|
||||
|
||||
# External tracker fields (optional, for importing from external systems)
|
||||
external_tracker_type: Literal["gitea", "github", "gitlab"] | None = None
|
||||
external_issue_id: str | None = Field(None, max_length=255)
|
||||
remote_url: str | None = Field(None, max_length=1000)
|
||||
external_issue_number: int | None = None
|
||||
|
||||
|
||||
class IssueUpdate(BaseModel):
|
||||
"""Schema for updating an issue."""
|
||||
|
||||
title: str | None = Field(None, min_length=1, max_length=500)
|
||||
body: str | None = None
|
||||
status: IssueStatus | None = None
|
||||
priority: IssuePriority | None = None
|
||||
labels: list[str] | None = None
|
||||
assigned_agent_id: UUID | None = None
|
||||
human_assignee: str | None = Field(None, max_length=255)
|
||||
sprint_id: UUID | None = None
|
||||
story_points: int | None = Field(None, ge=0, le=100)
|
||||
sync_status: SyncStatus | None = None
|
||||
|
||||
@field_validator("title")
|
||||
@classmethod
|
||||
def validate_title(cls, v: str | None) -> str | None:
|
||||
"""Validate issue title."""
|
||||
if v is not None and (not v or v.strip() == ""):
|
||||
raise ValueError("Issue title cannot be empty")
|
||||
return v.strip() if v else v
|
||||
|
||||
@field_validator("labels")
|
||||
@classmethod
|
||||
def validate_labels(cls, v: list[str] | None) -> list[str] | None:
|
||||
"""Validate and normalize labels."""
|
||||
if v is None:
|
||||
return v
|
||||
return [label.strip().lower() for label in v if label.strip()]
|
||||
|
||||
|
||||
class IssueClose(BaseModel):
|
||||
"""Schema for closing an issue."""
|
||||
|
||||
resolution: str | None = None # Optional resolution note
|
||||
|
||||
|
||||
class IssueAssign(BaseModel):
|
||||
"""Schema for assigning an issue."""
|
||||
|
||||
assigned_agent_id: UUID | None = None
|
||||
human_assignee: str | None = Field(None, max_length=255)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_assignment(self) -> "IssueAssign":
|
||||
"""Ensure only one type of assignee is set."""
|
||||
if self.assigned_agent_id and self.human_assignee:
|
||||
raise ValueError("Cannot assign to both an agent and a human. Choose one.")
|
||||
return self
|
||||
|
||||
|
||||
class IssueSyncUpdate(BaseModel):
|
||||
"""Schema for updating sync-related fields."""
|
||||
|
||||
sync_status: SyncStatus
|
||||
last_synced_at: datetime | None = None
|
||||
external_updated_at: datetime | None = None
|
||||
|
||||
|
||||
class IssueInDB(IssueBase):
|
||||
"""Schema for issue in database."""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
assigned_agent_id: UUID | None = None
|
||||
human_assignee: str | None = None
|
||||
sprint_id: UUID | None = None
|
||||
external_tracker_type: str | None = None
|
||||
external_issue_id: str | None = None
|
||||
remote_url: str | None = None
|
||||
external_issue_number: int | None = None
|
||||
sync_status: SyncStatus = SyncStatus.SYNCED
|
||||
last_synced_at: datetime | None = None
|
||||
external_updated_at: datetime | None = None
|
||||
closed_at: datetime | None = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class IssueResponse(BaseModel):
|
||||
"""Schema for issue API responses."""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
title: str
|
||||
body: str
|
||||
status: IssueStatus
|
||||
priority: IssuePriority
|
||||
labels: list[str] = Field(default_factory=list)
|
||||
assigned_agent_id: UUID | None = None
|
||||
human_assignee: str | None = None
|
||||
sprint_id: UUID | None = None
|
||||
story_points: int | None = None
|
||||
external_tracker_type: str | None = None
|
||||
external_issue_id: str | None = None
|
||||
remote_url: str | None = None
|
||||
external_issue_number: int | None = None
|
||||
sync_status: SyncStatus = SyncStatus.SYNCED
|
||||
last_synced_at: datetime | None = None
|
||||
external_updated_at: datetime | None = None
|
||||
closed_at: datetime | None = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
# Expanded fields from relationships
|
||||
project_name: str | None = None
|
||||
project_slug: str | None = None
|
||||
sprint_name: str | None = None
|
||||
assigned_agent_type_name: str | None = None
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class IssueListResponse(BaseModel):
|
||||
"""Schema for paginated issue list responses."""
|
||||
|
||||
issues: list[IssueResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
pages: int
|
||||
|
||||
|
||||
class IssueStats(BaseModel):
|
||||
"""Schema for issue statistics."""
|
||||
|
||||
total: int
|
||||
open: int
|
||||
in_progress: int
|
||||
in_review: int
|
||||
blocked: int
|
||||
closed: int
|
||||
by_priority: dict[str, int]
|
||||
total_story_points: int | None = None
|
||||
completed_story_points: int | None = None
|
||||
131
backend/app/schemas/syndarix/project.py
Normal file
131
backend/app/schemas/syndarix/project.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# app/schemas/syndarix/project.py
|
||||
"""
|
||||
Pydantic schemas for Project entity.
|
||||
"""
|
||||
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from .enums import AutonomyLevel, ProjectStatus
|
||||
|
||||
|
||||
class ProjectBase(BaseModel):
|
||||
"""Base project schema with common fields."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
slug: str | None = Field(None, min_length=1, max_length=255)
|
||||
description: str | None = None
|
||||
autonomy_level: AutonomyLevel = AutonomyLevel.MILESTONE
|
||||
status: ProjectStatus = ProjectStatus.ACTIVE
|
||||
settings: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str | None) -> str | None:
|
||||
"""Validate slug format: lowercase, alphanumeric, hyphens only."""
|
||||
if v is None:
|
||||
return v
|
||||
if not re.match(r"^[a-z0-9-]+$", v):
|
||||
raise ValueError(
|
||||
"Slug must contain only lowercase letters, numbers, and hyphens"
|
||||
)
|
||||
if v.startswith("-") or v.endswith("-"):
|
||||
raise ValueError("Slug cannot start or end with a hyphen")
|
||||
if "--" in v:
|
||||
raise ValueError("Slug cannot contain consecutive hyphens")
|
||||
return v
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str) -> str:
|
||||
"""Validate project name."""
|
||||
if not v or v.strip() == "":
|
||||
raise ValueError("Project name cannot be empty")
|
||||
return v.strip()
|
||||
|
||||
|
||||
class ProjectCreate(ProjectBase):
|
||||
"""Schema for creating a new project."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
slug: str = Field(..., min_length=1, max_length=255)
|
||||
owner_id: UUID | None = None
|
||||
|
||||
|
||||
class ProjectUpdate(BaseModel):
|
||||
"""Schema for updating a project.
|
||||
|
||||
Note: owner_id is intentionally excluded to prevent IDOR vulnerabilities.
|
||||
Project ownership transfer should be done via a dedicated endpoint with
|
||||
proper authorization checks.
|
||||
"""
|
||||
|
||||
name: str | None = Field(None, min_length=1, max_length=255)
|
||||
slug: str | None = Field(None, min_length=1, max_length=255)
|
||||
description: str | None = None
|
||||
autonomy_level: AutonomyLevel | None = None
|
||||
status: ProjectStatus | None = None
|
||||
settings: dict[str, Any] | None = None
|
||||
|
||||
@field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str | None) -> str | None:
|
||||
"""Validate slug format."""
|
||||
if v is None:
|
||||
return v
|
||||
if not re.match(r"^[a-z0-9-]+$", v):
|
||||
raise ValueError(
|
||||
"Slug must contain only lowercase letters, numbers, and hyphens"
|
||||
)
|
||||
if v.startswith("-") or v.endswith("-"):
|
||||
raise ValueError("Slug cannot start or end with a hyphen")
|
||||
if "--" in v:
|
||||
raise ValueError("Slug cannot contain consecutive hyphens")
|
||||
return v
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str | None) -> str | None:
|
||||
"""Validate project name."""
|
||||
if v is not None and (not v or v.strip() == ""):
|
||||
raise ValueError("Project name cannot be empty")
|
||||
return v.strip() if v else v
|
||||
|
||||
|
||||
class ProjectInDB(ProjectBase):
|
||||
"""Schema for project in database."""
|
||||
|
||||
id: UUID
|
||||
owner_id: UUID | None = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class ProjectResponse(ProjectBase):
|
||||
"""Schema for project API responses."""
|
||||
|
||||
id: UUID
|
||||
owner_id: UUID | None = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
agent_count: int | None = 0
|
||||
issue_count: int | None = 0
|
||||
active_sprint_name: str | None = None
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class ProjectListResponse(BaseModel):
|
||||
"""Schema for paginated project list responses."""
|
||||
|
||||
projects: list[ProjectResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
pages: int
|
||||
135
backend/app/schemas/syndarix/sprint.py
Normal file
135
backend/app/schemas/syndarix/sprint.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# app/schemas/syndarix/sprint.py
|
||||
"""
|
||||
Pydantic schemas for Sprint entity.
|
||||
"""
|
||||
|
||||
from datetime import date, datetime
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from .enums import SprintStatus
|
||||
|
||||
|
||||
class SprintBase(BaseModel):
|
||||
"""Base sprint schema with common fields."""
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=255)
|
||||
number: int = Field(..., ge=1)
|
||||
goal: str | None = None
|
||||
start_date: date
|
||||
end_date: date
|
||||
status: SprintStatus = SprintStatus.PLANNED
|
||||
planned_points: int | None = Field(None, ge=0)
|
||||
velocity: int | None = Field(None, ge=0)
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str) -> str:
|
||||
"""Validate sprint name."""
|
||||
if not v or v.strip() == "":
|
||||
raise ValueError("Sprint name cannot be empty")
|
||||
return v.strip()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_dates(self) -> "SprintBase":
|
||||
"""Validate that end_date is after start_date."""
|
||||
if self.end_date < self.start_date:
|
||||
raise ValueError("End date must be after or equal to start date")
|
||||
return self
|
||||
|
||||
|
||||
class SprintCreate(SprintBase):
|
||||
"""Schema for creating a new sprint."""
|
||||
|
||||
project_id: UUID
|
||||
|
||||
|
||||
class SprintUpdate(BaseModel):
|
||||
"""Schema for updating a sprint."""
|
||||
|
||||
name: str | None = Field(None, min_length=1, max_length=255)
|
||||
goal: str | None = None
|
||||
start_date: date | None = None
|
||||
end_date: date | None = None
|
||||
status: SprintStatus | None = None
|
||||
planned_points: int | None = Field(None, ge=0)
|
||||
velocity: int | None = Field(None, ge=0)
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def validate_name(cls, v: str | None) -> str | None:
|
||||
"""Validate sprint name."""
|
||||
if v is not None and (not v or v.strip() == ""):
|
||||
raise ValueError("Sprint name cannot be empty")
|
||||
return v.strip() if v else v
|
||||
|
||||
|
||||
class SprintStart(BaseModel):
|
||||
"""Schema for starting a sprint."""
|
||||
|
||||
start_date: date | None = None # Optionally override start date
|
||||
|
||||
|
||||
class SprintComplete(BaseModel):
|
||||
"""Schema for completing a sprint."""
|
||||
|
||||
velocity: int | None = Field(None, ge=0)
|
||||
notes: str | None = None
|
||||
|
||||
|
||||
class SprintInDB(SprintBase):
|
||||
"""Schema for sprint in database."""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class SprintResponse(SprintBase):
|
||||
"""Schema for sprint API responses."""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
# Expanded fields from relationships
|
||||
project_name: str | None = None
|
||||
project_slug: str | None = None
|
||||
issue_count: int | None = 0
|
||||
open_issues: int | None = 0
|
||||
completed_issues: int | None = 0
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class SprintListResponse(BaseModel):
|
||||
"""Schema for paginated sprint list responses."""
|
||||
|
||||
sprints: list[SprintResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
pages: int
|
||||
|
||||
|
||||
class SprintVelocity(BaseModel):
|
||||
"""Schema for sprint velocity metrics."""
|
||||
|
||||
sprint_number: int
|
||||
sprint_name: str
|
||||
planned_points: int | None
|
||||
velocity: int | None # Sum of completed story points
|
||||
velocity_ratio: float | None # velocity/planned ratio
|
||||
|
||||
|
||||
class SprintBurndown(BaseModel):
|
||||
"""Schema for sprint burndown data point."""
|
||||
|
||||
date: date
|
||||
remaining_points: int
|
||||
ideal_remaining: float
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user