generated code

This commit is contained in:
laxmanhalaki 2026-03-10 16:44:04 +05:30
commit 131f7ad8e8
244 changed files with 50684 additions and 0 deletions

142
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,142 @@
name: CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
env:
DOCKER_REGISTRY: ghcr.io
IMAGE_NAME: $
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: test_db
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: 'pip'
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest pytest-cov pytest-asyncio black flake8 mypy
- name: Run linter
run: |
flake8 src/ tests/ --max-line-length=120 --ignore=E203,W503 || true
black --check src/ tests/ || true
- name: Run type checker
run: mypy src/ --ignore-missing-imports || true
- name: Run unit tests
run: pytest tests/ --cov=src --cov-report=xml --cov-report=html
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/test_db
JWT_SECRET: test-secret
REDIS_HOST: localhost
REDIS_PORT: 6379
- name: Upload coverage reports
uses: codecov/codecov-action@v3
with:
files: ./coverage.xml
build:
needs: test
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: $
username: $
password: $
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
push: true
tags: |
$/$:$
$/$:latest
deploy-staging:
needs: build
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Configure kubectl
uses: azure/k8s-set-context@v3
with:
method: kubeconfig
kubeconfig: $
- name: Deploy to staging
run: |
kubectl set image deployment/test_project-deployment test_project=$/$:$ -n staging
deploy-production:
needs: build
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Configure kubectl
uses: azure/k8s-set-context@v3
with:
method: kubeconfig
kubeconfig: $
- name: Deploy to production
run: |
kubectl set image deployment/test_project-deployment test_project=$/$:$ -n production

44
Dockerfile Normal file
View File

@ -0,0 +1,44 @@
# Multi-stage Dockerfile for FastAPI application
FROM python:3.11-slim AS builder
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir --user -r requirements.txt
# Production stage
FROM python:3.11-slim
WORKDIR /app
# Copy dependencies from builder
COPY --from=builder /root/.local /root/.local
COPY --from=builder /app/requirements.txt .
# Copy application code
COPY . .
# Make sure scripts in .local are usable
ENV PATH=/root/.local/bin:$PATH
# Create non-root user
RUN useradd -m -u 1001 appuser && chown -R appuser:appuser /app
USER appuser
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]

72
docker-compose.yml Normal file
View File

@ -0,0 +1,72 @@
version: '3.8'
services:
app:
build:
context: .
dockerfile: Dockerfile
container_name: test_project_app
ports:
- "${PORT:-8000}:8000"
environment:
- ENV=production
- DATABASE_URL=${DATABASE_URL}
- JWT_SECRET=${JWT_SECRET}
- REDIS_HOST=redis
- REDIS_PORT=6379
depends_on:
- postgres
- redis
networks:
- test_project_network
restart: unless-stopped
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 30s
timeout: 10s
retries: 3
postgres:
image: postgres:15-alpine
container_name: test_project_postgres
environment:
- POSTGRES_DB=${POSTGRES_DB:-test_project_db}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres}
ports:
- "${POSTGRES_PORT:-5432}:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- test_project_network
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
container_name: test_project_redis
ports:
- "${REDIS_PORT:-6379}:6379"
volumes:
- redis_data:/data
networks:
- test_project_network
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
volumes:
postgres_data:
redis_data:
networks:
test_project_network:
driver: bridge

79
error.py Normal file
View File

@ -0,0 +1,79 @@
"""
Custom Error Classes
Framework-specific error handling for FastAPI
"""
from fastapi import HTTPException, status
from typing import Optional, Dict, Any
class AppError(Exception):
"""Base application error"""
def __init__(
self,
message: str,
status_code: int = 500,
details: Optional[Dict[str, Any]] = None
):
self.message = message
self.status_code = status_code
self.details = details or {}
super().__init__(self.message)
class ValidationError(AppError):
"""Validation error (400)"""
def __init__(self, message: str = "Validation error", errors: Optional[Dict] = None):
super().__init__(message, status_code=400, details={"errors": errors or {}})
class NotFoundError(AppError):
"""Resource not found error (404)"""
def __init__(self, resource: str = "Resource"):
super().__init__(f"{resource} not found", status_code=404)
class UnauthorizedError(AppError):
"""Unauthorized access error (401)"""
def __init__(self, message: str = "Unauthorized access"):
super().__init__(message, status_code=401)
class ForbiddenError(AppError):
"""Forbidden access error (403)"""
def __init__(self, message: str = "Forbidden access"):
super().__init__(message, status_code=403)
class ConflictError(AppError):
"""Resource conflict error (409)"""
def __init__(self, message: str = "Resource conflict"):
super().__init__(message, status_code=409)
class BadRequestError(AppError):
"""Bad request error (400)"""
def __init__(self, message: str = "Bad request"):
super().__init__(message, status_code=400)
# HTTP Exception helpers
def raise_validation_error(message: str, errors: Optional[Dict] = None):
"""Raise validation error as HTTPException"""
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={"message": message, "errors": errors or {}}
)
def raise_not_found_error(resource: str = "Resource"):
"""Raise not found error as HTTPException"""
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail={"message": f"{resource} not found"}
)
def raise_unauthorized_error(message: str = "Unauthorized access"):
"""Raise unauthorized error as HTTPException"""
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={"message": message}
)

49
k8s/configmap.yaml Normal file
View File

@ -0,0 +1,49 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test_project-config
namespace: test_project
labels:
app: test_project
data:
# Application Configuration
ENVIRONMENT: "production"
APP_NAME: "test_project"
APP_VERSION: "1.0.0"
PORT: "8000"
HOST: "0.0.0.0"
API_PREFIX: "/api/v1"
# Database Configuration (non-sensitive)
DB_HOST: "postgres-service"
DB_PORT: "5432"
DB_NAME: "test_project_db"
# Redis Configuration
# Kafka Configuration
# Logging Configuration
LOG_LEVEL: "info"
LOG_FORMAT: "json"
# Observability Configuration
# CORS Configuration
CORS_ORIGIN: "*"
CORS_METHODS: "GET,POST,PUT,DELETE,OPTIONS"
CORS_CREDENTIALS: "true"
# Rate Limiting
RATE_LIMIT_WINDOW_MS: "900000"
RATE_LIMIT_MAX: "100"
# File Upload
MAX_FILE_SIZE: "10485760"
UPLOAD_DIR: "/tmp/uploads"
# Health Check Configuration
HEALTH_CHECK_INTERVAL: "30"
READINESS_TIMEOUT: "5"

118
k8s/deployment.yaml Normal file
View File

@ -0,0 +1,118 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: test_project-deployment
namespace: test_project
labels:
app: test_project
version: "1.0.0"
component: backend
spec:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: test_project
template:
metadata:
labels:
app: test_project
version: "1.0.0"
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/metrics"
spec:
serviceAccountName: test_project-service-account
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
containers:
- name: test_project
image: ghcr.io/test_project:latest
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8000
protocol: TCP
envFrom:
- configMapRef:
name: test_project-config
- secretRef:
name: test_project-secrets
env:
# Override sensitive values from secrets
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: test_project-secrets
key: database-url
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: test_project-secrets
key: jwt-secret
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8000
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /health/ready
port: 8000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /health
port: 8000
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 3
successThreshold: 1
failureThreshold: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
volumeMounts:
- name: tmp
mountPath: /tmp
- name: logs
mountPath: /var/log/app
volumes:
- name: tmp
emptyDir: {}
- name: logs
emptyDir: {}
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst

45
k8s/hpa.yaml Normal file
View File

@ -0,0 +1,45 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: test_project-hpa
namespace: test_project
labels:
app: test_project
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: test_project-deployment
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
- type: Pods
value: 2
periodSeconds: 15
selectPolicy: Max

10
k8s/namespace.yaml Normal file
View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Namespace
metadata:
name: test_project
labels:
name: test_project
app: test_project
environment: production
managed-by: codenuk

63
k8s/networkpolicy.yaml Normal file
View File

@ -0,0 +1,63 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: test_project-network-policy
namespace: test_project
labels:
app: test_project
spec:
podSelector:
matchLabels:
app: test_project
policyTypes:
- Ingress
- Egress
ingress:
# Allow ingress from same namespace
- from:
- namespaceSelector:
matchLabels:
name: test_project
- podSelector:
matchLabels:
app: test_project
ports:
- protocol: TCP
port: 8000
# Allow ingress from ingress controller
- from:
- namespaceSelector:
matchLabels:
name: ingress-nginx
- podSelector:
matchLabels:
app: ingress-nginx
ports:
- protocol: TCP
port: 8000
# Allow ingress from monitoring namespace (Prometheus)
egress:
# Allow DNS resolution
- to:
- namespaceSelector: {}
ports:
- protocol: UDP
port: 53
# Allow egress to database
- to:
- podSelector:
matchLabels:
app: postgres
ports:
- protocol: TCP
port: 5432
# Allow egress to Redis
# Allow egress to Kafka
# Allow egress to external APIs (HTTPS)
- to:
- namespaceSelector: {}
ports:
- protocol: TCP
port: 443
# Allow egress to monitoring (Prometheus)

19
k8s/secret.yaml Normal file
View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Secret
metadata:
name: test_project-secrets
namespace: test_project
labels:
app: test_project
type: Opaque
data:
# Database credentials (base64 encoded)
# JWT secrets
# API Keys
# Kafka credentials (if enabled)
# Redis credentials (if enabled)
# External service API keys
# SAML/OAuth secrets (if enabled)

26
k8s/service.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: Service
metadata:
name: test_project-service
namespace: test_project
labels:
app: test_project
service: test_project
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/metrics"
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 8000
protocol: TCP
selector:
app: test_project
sessionAffinity: None
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800

9
k8s/serviceaccount.yaml Normal file
View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: test_project-service-account
namespace: test_project
labels:
app: test_project
automountServiceAccountToken: true

87
main.py Normal file
View File

@ -0,0 +1,87 @@
"""
FastAPI Application Entry Point
Enterprise-grade FastAPI application with proper structure and middleware
"""
import logging
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from src.config.config import settings
from src.config.migrate import migrate_sync
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Import routers
# ========== DATABASE MIGRATIONS ==========
# Run database migrations on application startup
logger.info("🔄 Running database migrations...")
if not migrate_sync(settings.DATABASE_URL):
logger.warning("⚠️ Some migrations failed, but application will continue")
# ========== FASTAPI APPLICATION INITIALIZATION ==========
# Initialize FastAPI application
app = FastAPI(
title=settings.APP_NAME,
version=settings.APP_VERSION,
description="",
docs_url="/docs" if settings.DEBUG else None,
redoc_url="/redoc" if settings.DEBUG else None,
openapi_url="/openapi.json" if settings.DEBUG else None,
)
# CORS Middleware Configuration
app.add_middleware(
CORSMiddleware,
allow_origins=settings.CORS_ORIGINS if isinstance(settings.CORS_ORIGINS, list) else ["*"],
allow_credentials=True,
allow_methods=settings.CORS_METHODS if isinstance(settings.CORS_METHODS, list) else ["*"],
allow_headers=settings.CORS_HEADERS if isinstance(settings.CORS_HEADERS, list) else ["*"],
)
# Include routers
@app.on_event("startup")
async def startup_event():
"""
Application startup event handler
Performs initialization tasks before accepting requests
"""
logger.info("🚀 FastAPI application started successfully")
logger.info(f"📚 API Documentation available at: http://localhost:{settings.PORT}/docs")
@app.on_event("shutdown")
async def shutdown_event():
"""
Application shutdown event handler
Performs cleanup tasks when application stops
"""
logger.info("🛑 FastAPI application shutting down")
@app.get("/")
async def root():
"""
Root endpoint - API information and health status
"""
return {
"message": "Welcome to API",
"version": settings.APP_VERSION,
"docs": "/docs" if settings.DEBUG else "disabled",
"status": "running"
}
@app.get("/health")
async def health_check():
"""
Health check endpoint for monitoring
"""
return {
"status": "healthy",
"app": settings.APP_NAME,
"version": settings.APP_VERSION
}

7987
postman_collection.json Normal file

File diff suppressed because it is too large Load Diff

4
src/config/base.py Normal file
View File

@ -0,0 +1,4 @@
from sqlalchemy.orm import declarative_base
Base = declarative_base()

61
src/config/config.py Normal file
View File

@ -0,0 +1,61 @@
"""
FastAPI Application Configuration
Enterprise-grade configuration management using Pydantic Settings
"""
from pydantic_settings import BaseSettings
from typing import List, Optional
class Settings(BaseSettings):
"""
Application settings loaded from environment variables.
Uses Pydantic Settings for type-safe configuration management.
"""
# Application
APP_NAME: str = ""
APP_VERSION: str = "1.0.0"
DEBUG: bool = False
APP_DESCRIPTION: str = "Enterprise FastAPI Application"
# Database
DATABASE_URL: str = "postgresql://user:password@localhost:5432/"
DB_POOL_SIZE: int = 10
DB_MAX_OVERFLOW: int = 20
DB_POOL_RECYCLE: int = 3600
DB_ECHO: bool = False
# Server
HOST: str = "0.0.0.0"
PORT: int = 8000
# Security
SECRET_KEY: str = ""
ALGORITHM: str = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES: int = 30
REFRESH_TOKEN_EXPIRE_DAYS: int = 7
# CORS Configuration
CORS_ORIGINS: List[str] = ["*"]
CORS_METHODS: List[str] = ["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"]
CORS_HEADERS: List[str] = ["*"]
# Logging
LOG_LEVEL: str = "INFO"
LOG_FORMAT: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# RAG Configuration
OPENAI_API_KEY: Optional[str] = None
ANTHROPIC_API_KEY: Optional[str] = None
VECTOR_DB_DIR: str = "./chroma_db"
EMBEDDING_PROVIDER: str = "huggingface" # "openai" or "huggingface"
LLM_PROVIDER: str = "openai" # "openai" or "anthropic"
RAG_CHUNK_SIZE: int = 1000
RAG_CHUNK_OVERLAP: int = 100
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
case_sensitive = False
# Global settings instance
settings = Settings()

14
src/config/database.py Normal file
View File

@ -0,0 +1,14 @@
from sqlalchemy.orm import Session
from src.config.database import SessionLocal, get_db
def get_db():
"""
Database dependency for FastAPI endpoints.
Provides a database session and ensures it's closed after use.
"""
db = SessionLocal()
try:
yield db
finally:
db.close()

299
src/config/migrate.py Normal file
View File

@ -0,0 +1,299 @@
"""
Database Migration Manager
Handles automatic database schema creation and migrations on application startup
"""
import os
import sys
import logging
from pathlib import Path
from sqlalchemy import create_engine, inspect, MetaData
from sqlalchemy.orm import sessionmaker
import importlib.util
logger = logging.getLogger(__name__)
class MigrationManager:
"""Manages database migrations and schema setup"""
def __init__(self, database_url: str, migrations_dir: str = "src/migrations"):
"""
Initialize migration manager
Args:
database_url: Database connection URL
migrations_dir: Path to migrations directory relative to project root
"""
self.database_url = database_url
self.migrations_dir = Path(migrations_dir)
self.engine = None
self.SessionLocal = None
def connect(self):
"""Establish database connection"""
try:
self.engine = create_engine(
self.database_url,
pool_pre_ping=True,
pool_size=10,
max_overflow=20,
echo=False
)
self.SessionLocal = sessionmaker(
autocommit=False,
autoflush=False,
bind=self.engine
)
logger.info(f"✅ Database connection established: {self.database_url}")
return True
except Exception as e:
logger.error(f"❌ Failed to connect to database: {e}")
return False
def get_applied_migrations(self) -> set:
"""Get list of already applied migrations from database"""
try:
inspector = inspect(self.engine)
tables = inspector.get_table_names()
# Check if migrations table exists
if '_migrations' not in tables:
# Create migrations tracking table
with self.engine.begin() as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS _migrations (
id SERIAL PRIMARY KEY,
migration_name VARCHAR(255) NOT NULL UNIQUE,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
""")
logger.info("✅ Created migrations tracking table")
return set()
# Get applied migrations
with self.engine.connect() as conn:
result = conn.execute("SELECT migration_name FROM _migrations ORDER BY applied_at")
applied = {row[0] for row in result}
logger.debug(f"📋 Found {len(applied)} previously applied migrations")
return applied
except Exception as e:
logger.warning(f"⚠️ Could not fetch applied migrations: {e}")
return set()
def get_pending_migrations(self) -> list:
"""Get list of migration files that haven't been applied yet"""
try:
if not self.migrations_dir.exists():
logger.warning(f"⚠️ Migrations directory not found: {self.migrations_dir}")
return []
migration_files = sorted([
f for f in self.migrations_dir.glob("*.py")
if f.name != "__init__.py" and f.name != "migrate.py"
])
logger.debug(f"🔍 Found {len(migration_files)} migration files")
return migration_files
except Exception as e:
logger.error(f"❌ Error scanning migrations directory: {e}")
return []
def load_migration(self, migration_file: Path):
"""
Load and execute a migration file
Args:
migration_file: Path to migration file
Returns:
Module with upgrade and downgrade functions
"""
try:
spec = importlib.util.spec_from_file_location(
migration_file.stem,
migration_file
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
except Exception as e:
logger.error(f"❌ Failed to load migration {migration_file.name}: {e}")
return None
def apply_migration(self, migration_file: Path, migration_module) -> bool:
"""
Apply a single migration
Args:
migration_file: Path to migration file
migration_module: Loaded migration module
Returns:
True if successful, False otherwise
"""
try:
# Check if migration has upgrade function
if not hasattr(migration_module, 'upgrade'):
logger.warning(f"⚠️ Migration {migration_file.name} has no upgrade() function")
return False
# Create a mock op object with connection
class OpMock:
def __init__(self, connection):
self.connection = connection
def create_table(self, name, *args, **kwargs):
"""Create a new table"""
from sqlalchemy import Table
table = Table(name, MetaData(), *args, **kwargs)
table.create(self.connection, checkfirst=True)
logger.debug(f" 📊 Created table: {name}")
def create_index(self, name, table, columns, **kwargs):
"""Create an index"""
try:
if isinstance(columns, str):
columns = [columns]
# Build index creation SQL
unique_clause = "UNIQUE" if kwargs.get('unique') else ""
columns_str = ", ".join(f'"{col}"' for col in columns)
index_sql = f'CREATE {unique_clause} INDEX IF NOT EXISTS "{name}" ON "{table}" ({columns_str})'
self.connection.execute(index_sql)
logger.debug(f" 🔑 Created index: {name} on {table}({columns_str})")
except Exception as e:
logger.warning(f" ⚠️ Could not create index {name}: {e}")
def add_column(self, table, column):
"""Add a column to table"""
try:
self.connection.execute(f'ALTER TABLE "{table}" ADD COLUMN {column}')
logger.debug(f" Added column to {table}")
except Exception as e:
logger.warning(f" ⚠️ Could not add column to {table}: {e}")
def drop_table(self, name):
"""Drop a table"""
try:
self.connection.execute(f'DROP TABLE IF EXISTS "{name}"')
logger.debug(f" 🗑️ Dropped table: {name}")
except Exception as e:
logger.warning(f" ⚠️ Could not drop table {name}: {e}")
# Execute migration within a transaction
with self.engine.begin() as connection:
op = OpMock(connection)
migration_module.upgrade()
# Record migration as applied
connection.execute("""
INSERT INTO _migrations (migration_name)
VALUES (%s)
ON CONFLICT DO NOTHING
""", (migration_file.stem,))
logger.info(f"✅ Applied migration: {migration_file.name}")
return True
except Exception as e:
logger.error(f"❌ Error applying migration {migration_file.name}: {e}")
return False
def run(self):
"""Run all pending migrations"""
logger.info("=" * 70)
logger.info("🚀 Starting Database Migration Process")
logger.info("=" * 70)
# Step 1: Connect to database
if not self.connect():
logger.error("❌ Failed to connect to database. Migrations aborted.")
return False
# Step 2: Get applied and pending migrations
applied_migrations = self.get_applied_migrations()
migration_files = self.get_pending_migrations()
if not migration_files:
logger.info("✅ No migration files found - skipping migrations")
logger.info("=" * 70)
return True
logger.info(f"📊 Total migration files: {len(migration_files)}")
logger.info(f"📋 Already applied: {len(applied_migrations)}")
logger.info(f"⏳ Pending: {len(migration_files) - len(applied_migrations)}")
logger.info("")
# Step 3: Apply pending migrations
successful = 0
failed = 0
for migration_file in migration_files:
if migration_file.stem in applied_migrations:
logger.debug(f"⏭️ Skipping already applied migration: {migration_file.name}")
continue
logger.info(f"⚙️ Applying migration: {migration_file.name}")
# Load migration module
migration_module = self.load_migration(migration_file)
if not migration_module:
failed += 1
continue
# Apply migration
if self.apply_migration(migration_file, migration_module):
successful += 1
else:
failed += 1
# Step 4: Summary
logger.info("")
logger.info("=" * 70)
logger.info(f"✅ Migration Summary: {successful} successful, {failed} failed")
logger.info("=" * 70)
return failed == 0
async def run_migrations(database_url: str) -> bool:
"""
Run database migrations
Args:
database_url: Database connection URL
Returns:
True if successful, False otherwise
"""
try:
# Determine migrations directory
current_dir = Path(__file__).parent.parent
migrations_dir = current_dir / "migrations"
manager = MigrationManager(database_url, str(migrations_dir))
return manager.run()
except Exception as e:
logger.error(f"❌ Unexpected error in migrations: {e}")
return False
def migrate_sync(database_url: str) -> bool:
"""
Synchronous version of run_migrations for startup hooks
Args:
database_url: Database connection URL
Returns:
True if successful, False otherwise
"""
try:
current_dir = Path(__file__).parent.parent
migrations_dir = current_dir / "migrations"
manager = MigrationManager(database_url, str(migrations_dir))
return manager.run()
except Exception as e:
logger.error(f"❌ Unexpected error in migrations: {e}")
return False

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.audio_recording_model import AudioRecording
from src.validation.audio_recording_schemas import AudioRecordingCreate, AudioRecordingUpdate
class AudioRecordingCRUD:
"""CRUD operations for AudioRecording"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[AudioRecording], int]:
"""Get all audiorecordings with pagination"""
query = self.db.query(AudioRecording)
total = query.count()
items = query.order_by(AudioRecording.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, audio_recording_id: UUID) -> Optional[AudioRecording]:
"""Get audiorecording by ID"""
return self.db.query(AudioRecording).filter(AudioRecording.id == audio_recording_id).first()
def create(self, audio_recording_in: AudioRecordingCreate) -> AudioRecording:
"""Create a new audiorecording"""
db_audio_recording = AudioRecording(**audio_recording_in.model_dump())
self.db.add(db_audio_recording)
self.db.commit()
self.db.refresh(db_audio_recording)
return db_audio_recording
def update(
self,
audio_recording_id: UUID,
audio_recording_in: AudioRecordingUpdate
) -> Optional[AudioRecording]:
"""Update an existing audiorecording"""
db_audio_recording = self.get_by_id(audio_recording_id)
if not db_audio_recording:
return None
update_data = audio_recording_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_audio_recording, field, value)
self.db.commit()
self.db.refresh(db_audio_recording)
return db_audio_recording
def delete(self, audio_recording_id: UUID) -> bool:
"""Delete a audiorecording"""
db_audio_recording = self.get_by_id(audio_recording_id)
if not db_audio_recording:
return False
self.db.delete(db_audio_recording)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.audit_log_model import AuditLog
from src.validation.audit_log_schemas import AuditLogCreate, AuditLogUpdate
class AuditLogCRUD:
"""CRUD operations for AuditLog"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[AuditLog], int]:
"""Get all auditlogs with pagination"""
query = self.db.query(AuditLog)
total = query.count()
items = query.order_by(AuditLog.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, audit_log_id: UUID) -> Optional[AuditLog]:
"""Get auditlog by ID"""
return self.db.query(AuditLog).filter(AuditLog.id == audit_log_id).first()
def create(self, audit_log_in: AuditLogCreate) -> AuditLog:
"""Create a new auditlog"""
db_audit_log = AuditLog(**audit_log_in.model_dump())
self.db.add(db_audit_log)
self.db.commit()
self.db.refresh(db_audit_log)
return db_audit_log
def update(
self,
audit_log_id: UUID,
audit_log_in: AuditLogUpdate
) -> Optional[AuditLog]:
"""Update an existing auditlog"""
db_audit_log = self.get_by_id(audit_log_id)
if not db_audit_log:
return None
update_data = audit_log_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_audit_log, field, value)
self.db.commit()
self.db.refresh(db_audit_log)
return db_audit_log
def delete(self, audit_log_id: UUID) -> bool:
"""Delete a auditlog"""
db_audit_log = self.get_by_id(audit_log_id)
if not db_audit_log:
return False
self.db.delete(db_audit_log)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.claim_model import Claim
from src.validation.claim_schemas import ClaimCreate, ClaimUpdate
class ClaimCRUD:
"""CRUD operations for Claim"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[Claim], int]:
"""Get all claims with pagination"""
query = self.db.query(Claim)
total = query.count()
items = query.order_by(Claim.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, claim_id: UUID) -> Optional[Claim]:
"""Get claim by ID"""
return self.db.query(Claim).filter(Claim.id == claim_id).first()
def create(self, claim_in: ClaimCreate) -> Claim:
"""Create a new claim"""
db_claim = Claim(**claim_in.model_dump())
self.db.add(db_claim)
self.db.commit()
self.db.refresh(db_claim)
return db_claim
def update(
self,
claim_id: UUID,
claim_in: ClaimUpdate
) -> Optional[Claim]:
"""Update an existing claim"""
db_claim = self.get_by_id(claim_id)
if not db_claim:
return None
update_data = claim_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_claim, field, value)
self.db.commit()
self.db.refresh(db_claim)
return db_claim
def delete(self, claim_id: UUID) -> bool:
"""Delete a claim"""
db_claim = self.get_by_id(claim_id)
if not db_claim:
return False
self.db.delete(db_claim)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.claim_review_model import ClaimReview
from src.validation.claim_review_schemas import ClaimReviewCreate, ClaimReviewUpdate
class ClaimReviewCRUD:
"""CRUD operations for ClaimReview"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ClaimReview], int]:
"""Get all claimreviews with pagination"""
query = self.db.query(ClaimReview)
total = query.count()
items = query.order_by(ClaimReview.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, claim_review_id: UUID) -> Optional[ClaimReview]:
"""Get claimreview by ID"""
return self.db.query(ClaimReview).filter(ClaimReview.id == claim_review_id).first()
def create(self, claim_review_in: ClaimReviewCreate) -> ClaimReview:
"""Create a new claimreview"""
db_claim_review = ClaimReview(**claim_review_in.model_dump())
self.db.add(db_claim_review)
self.db.commit()
self.db.refresh(db_claim_review)
return db_claim_review
def update(
self,
claim_review_id: UUID,
claim_review_in: ClaimReviewUpdate
) -> Optional[ClaimReview]:
"""Update an existing claimreview"""
db_claim_review = self.get_by_id(claim_review_id)
if not db_claim_review:
return None
update_data = claim_review_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_claim_review, field, value)
self.db.commit()
self.db.refresh(db_claim_review)
return db_claim_review
def delete(self, claim_review_id: UUID) -> bool:
"""Delete a claimreview"""
db_claim_review = self.get_by_id(claim_review_id)
if not db_claim_review:
return False
self.db.delete(db_claim_review)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.claim_scrub_result_model import ClaimScrubResult
from src.validation.claim_scrub_result_schemas import ClaimScrubResultCreate, ClaimScrubResultUpdate
class ClaimScrubResultCRUD:
"""CRUD operations for ClaimScrubResult"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ClaimScrubResult], int]:
"""Get all claimscrubresults with pagination"""
query = self.db.query(ClaimScrubResult)
total = query.count()
items = query.order_by(ClaimScrubResult.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, claim_scrub_result_id: UUID) -> Optional[ClaimScrubResult]:
"""Get claimscrubresult by ID"""
return self.db.query(ClaimScrubResult).filter(ClaimScrubResult.id == claim_scrub_result_id).first()
def create(self, claim_scrub_result_in: ClaimScrubResultCreate) -> ClaimScrubResult:
"""Create a new claimscrubresult"""
db_claim_scrub_result = ClaimScrubResult(**claim_scrub_result_in.model_dump())
self.db.add(db_claim_scrub_result)
self.db.commit()
self.db.refresh(db_claim_scrub_result)
return db_claim_scrub_result
def update(
self,
claim_scrub_result_id: UUID,
claim_scrub_result_in: ClaimScrubResultUpdate
) -> Optional[ClaimScrubResult]:
"""Update an existing claimscrubresult"""
db_claim_scrub_result = self.get_by_id(claim_scrub_result_id)
if not db_claim_scrub_result:
return None
update_data = claim_scrub_result_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_claim_scrub_result, field, value)
self.db.commit()
self.db.refresh(db_claim_scrub_result)
return db_claim_scrub_result
def delete(self, claim_scrub_result_id: UUID) -> bool:
"""Delete a claimscrubresult"""
db_claim_scrub_result = self.get_by_id(claim_scrub_result_id)
if not db_claim_scrub_result:
return False
self.db.delete(db_claim_scrub_result)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.clinical_entity_model import ClinicalEntity
from src.validation.clinical_entity_schemas import ClinicalEntityCreate, ClinicalEntityUpdate
class ClinicalEntityCRUD:
"""CRUD operations for ClinicalEntity"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ClinicalEntity], int]:
"""Get all clinicalentities with pagination"""
query = self.db.query(ClinicalEntity)
total = query.count()
items = query.order_by(ClinicalEntity.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, clinical_entity_id: UUID) -> Optional[ClinicalEntity]:
"""Get clinicalentity by ID"""
return self.db.query(ClinicalEntity).filter(ClinicalEntity.id == clinical_entity_id).first()
def create(self, clinical_entity_in: ClinicalEntityCreate) -> ClinicalEntity:
"""Create a new clinicalentity"""
db_clinical_entity = ClinicalEntity(**clinical_entity_in.model_dump())
self.db.add(db_clinical_entity)
self.db.commit()
self.db.refresh(db_clinical_entity)
return db_clinical_entity
def update(
self,
clinical_entity_id: UUID,
clinical_entity_in: ClinicalEntityUpdate
) -> Optional[ClinicalEntity]:
"""Update an existing clinicalentity"""
db_clinical_entity = self.get_by_id(clinical_entity_id)
if not db_clinical_entity:
return None
update_data = clinical_entity_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_clinical_entity, field, value)
self.db.commit()
self.db.refresh(db_clinical_entity)
return db_clinical_entity
def delete(self, clinical_entity_id: UUID) -> bool:
"""Delete a clinicalentity"""
db_clinical_entity = self.get_by_id(clinical_entity_id)
if not db_clinical_entity:
return False
self.db.delete(db_clinical_entity)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.confidence_score_model import ConfidenceScore
from src.validation.confidence_score_schemas import ConfidenceScoreCreate, ConfidenceScoreUpdate
class ConfidenceScoreCRUD:
"""CRUD operations for ConfidenceScore"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ConfidenceScore], int]:
"""Get all confidencescores with pagination"""
query = self.db.query(ConfidenceScore)
total = query.count()
items = query.order_by(ConfidenceScore.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, confidence_score_id: UUID) -> Optional[ConfidenceScore]:
"""Get confidencescore by ID"""
return self.db.query(ConfidenceScore).filter(ConfidenceScore.id == confidence_score_id).first()
def create(self, confidence_score_in: ConfidenceScoreCreate) -> ConfidenceScore:
"""Create a new confidencescore"""
db_confidence_score = ConfidenceScore(**confidence_score_in.model_dump())
self.db.add(db_confidence_score)
self.db.commit()
self.db.refresh(db_confidence_score)
return db_confidence_score
def update(
self,
confidence_score_id: UUID,
confidence_score_in: ConfidenceScoreUpdate
) -> Optional[ConfidenceScore]:
"""Update an existing confidencescore"""
db_confidence_score = self.get_by_id(confidence_score_id)
if not db_confidence_score:
return None
update_data = confidence_score_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_confidence_score, field, value)
self.db.commit()
self.db.refresh(db_confidence_score)
return db_confidence_score
def delete(self, confidence_score_id: UUID) -> bool:
"""Delete a confidencescore"""
db_confidence_score = self.get_by_id(confidence_score_id)
if not db_confidence_score:
return False
self.db.delete(db_confidence_score)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.cpt_code_model import CPTCode
from src.validation.cpt_code_schemas import CPTCodeCreate, CPTCodeUpdate
class CPTCodeCRUD:
"""CRUD operations for CPTCode"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[CPTCode], int]:
"""Get all cptcodes with pagination"""
query = self.db.query(CPTCode)
total = query.count()
items = query.order_by(CPTCode.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, cpt_code_id: UUID) -> Optional[CPTCode]:
"""Get cptcode by ID"""
return self.db.query(CPTCode).filter(CPTCode.id == cpt_code_id).first()
def create(self, cpt_code_in: CPTCodeCreate) -> CPTCode:
"""Create a new cptcode"""
db_cpt_code = CPTCode(**cpt_code_in.model_dump())
self.db.add(db_cpt_code)
self.db.commit()
self.db.refresh(db_cpt_code)
return db_cpt_code
def update(
self,
cpt_code_id: UUID,
cpt_code_in: CPTCodeUpdate
) -> Optional[CPTCode]:
"""Update an existing cptcode"""
db_cpt_code = self.get_by_id(cpt_code_id)
if not db_cpt_code:
return None
update_data = cpt_code_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_cpt_code, field, value)
self.db.commit()
self.db.refresh(db_cpt_code)
return db_cpt_code
def delete(self, cpt_code_id: UUID) -> bool:
"""Delete a cptcode"""
db_cpt_code = self.get_by_id(cpt_code_id)
if not db_cpt_code:
return False
self.db.delete(db_cpt_code)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.cpt_modifier_model import CPTModifier
from src.validation.cpt_modifier_schemas import CPTModifierCreate, CPTModifierUpdate
class CPTModifierCRUD:
"""CRUD operations for CPTModifier"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[CPTModifier], int]:
"""Get all cptmodifiers with pagination"""
query = self.db.query(CPTModifier)
total = query.count()
items = query.order_by(CPTModifier.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, cpt_modifier_id: UUID) -> Optional[CPTModifier]:
"""Get cptmodifier by ID"""
return self.db.query(CPTModifier).filter(CPTModifier.id == cpt_modifier_id).first()
def create(self, cpt_modifier_in: CPTModifierCreate) -> CPTModifier:
"""Create a new cptmodifier"""
db_cpt_modifier = CPTModifier(**cpt_modifier_in.model_dump())
self.db.add(db_cpt_modifier)
self.db.commit()
self.db.refresh(db_cpt_modifier)
return db_cpt_modifier
def update(
self,
cpt_modifier_id: UUID,
cpt_modifier_in: CPTModifierUpdate
) -> Optional[CPTModifier]:
"""Update an existing cptmodifier"""
db_cpt_modifier = self.get_by_id(cpt_modifier_id)
if not db_cpt_modifier:
return None
update_data = cpt_modifier_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_cpt_modifier, field, value)
self.db.commit()
self.db.refresh(db_cpt_modifier)
return db_cpt_modifier
def delete(self, cpt_modifier_id: UUID) -> bool:
"""Delete a cptmodifier"""
db_cpt_modifier = self.get_by_id(cpt_modifier_id)
if not db_cpt_modifier:
return False
self.db.delete(db_cpt_modifier)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.denial_pattern_model import DenialPattern
from src.validation.denial_pattern_schemas import DenialPatternCreate, DenialPatternUpdate
class DenialPatternCRUD:
"""CRUD operations for DenialPattern"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[DenialPattern], int]:
"""Get all denialpatterns with pagination"""
query = self.db.query(DenialPattern)
total = query.count()
items = query.order_by(DenialPattern.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, denial_pattern_id: UUID) -> Optional[DenialPattern]:
"""Get denialpattern by ID"""
return self.db.query(DenialPattern).filter(DenialPattern.id == denial_pattern_id).first()
def create(self, denial_pattern_in: DenialPatternCreate) -> DenialPattern:
"""Create a new denialpattern"""
db_denial_pattern = DenialPattern(**denial_pattern_in.model_dump())
self.db.add(db_denial_pattern)
self.db.commit()
self.db.refresh(db_denial_pattern)
return db_denial_pattern
def update(
self,
denial_pattern_id: UUID,
denial_pattern_in: DenialPatternUpdate
) -> Optional[DenialPattern]:
"""Update an existing denialpattern"""
db_denial_pattern = self.get_by_id(denial_pattern_id)
if not db_denial_pattern:
return None
update_data = denial_pattern_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_denial_pattern, field, value)
self.db.commit()
self.db.refresh(db_denial_pattern)
return db_denial_pattern
def delete(self, denial_pattern_id: UUID) -> bool:
"""Delete a denialpattern"""
db_denial_pattern = self.get_by_id(denial_pattern_id)
if not db_denial_pattern:
return False
self.db.delete(db_denial_pattern)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.emr_integration_model import EMRIntegration
from src.validation.emr_integration_schemas import EMRIntegrationCreate, EMRIntegrationUpdate
class EMRIntegrationCRUD:
"""CRUD operations for EMRIntegration"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[EMRIntegration], int]:
"""Get all emrintegrations with pagination"""
query = self.db.query(EMRIntegration)
total = query.count()
items = query.order_by(EMRIntegration.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, emr_integration_id: UUID) -> Optional[EMRIntegration]:
"""Get emrintegration by ID"""
return self.db.query(EMRIntegration).filter(EMRIntegration.id == emr_integration_id).first()
def create(self, emr_integration_in: EMRIntegrationCreate) -> EMRIntegration:
"""Create a new emrintegration"""
db_emr_integration = EMRIntegration(**emr_integration_in.model_dump())
self.db.add(db_emr_integration)
self.db.commit()
self.db.refresh(db_emr_integration)
return db_emr_integration
def update(
self,
emr_integration_id: UUID,
emr_integration_in: EMRIntegrationUpdate
) -> Optional[EMRIntegration]:
"""Update an existing emrintegration"""
db_emr_integration = self.get_by_id(emr_integration_id)
if not db_emr_integration:
return None
update_data = emr_integration_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_emr_integration, field, value)
self.db.commit()
self.db.refresh(db_emr_integration)
return db_emr_integration
def delete(self, emr_integration_id: UUID) -> bool:
"""Delete a emrintegration"""
db_emr_integration = self.get_by_id(emr_integration_id)
if not db_emr_integration:
return False
self.db.delete(db_emr_integration)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.icd10_code_model import ICD10Code
from src.validation.icd10_code_schemas import ICD10CodeCreate, ICD10CodeUpdate
class ICD10CodeCRUD:
"""CRUD operations for ICD10Code"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ICD10Code], int]:
"""Get all icd10codes with pagination"""
query = self.db.query(ICD10Code)
total = query.count()
items = query.order_by(ICD10Code.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, icd10_code_id: UUID) -> Optional[ICD10Code]:
"""Get icd10code by ID"""
return self.db.query(ICD10Code).filter(ICD10Code.id == icd10_code_id).first()
def create(self, icd10_code_in: ICD10CodeCreate) -> ICD10Code:
"""Create a new icd10code"""
db_icd10_code = ICD10Code(**icd10_code_in.model_dump())
self.db.add(db_icd10_code)
self.db.commit()
self.db.refresh(db_icd10_code)
return db_icd10_code
def update(
self,
icd10_code_id: UUID,
icd10_code_in: ICD10CodeUpdate
) -> Optional[ICD10Code]:
"""Update an existing icd10code"""
db_icd10_code = self.get_by_id(icd10_code_id)
if not db_icd10_code:
return None
update_data = icd10_code_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_icd10_code, field, value)
self.db.commit()
self.db.refresh(db_icd10_code)
return db_icd10_code
def delete(self, icd10_code_id: UUID) -> bool:
"""Delete a icd10code"""
db_icd10_code = self.get_by_id(icd10_code_id)
if not db_icd10_code:
return False
self.db.delete(db_icd10_code)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.lcd_model import LCD
from src.validation.lcd_schemas import LCDCreate, LCDUpdate
class LCDCRUD:
"""CRUD operations for LCD"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[LCD], int]:
"""Get all lcds with pagination"""
query = self.db.query(LCD)
total = query.count()
items = query.order_by(LCD.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, lcd_id: UUID) -> Optional[LCD]:
"""Get lcd by ID"""
return self.db.query(LCD).filter(LCD.id == lcd_id).first()
def create(self, lcd_in: LCDCreate) -> LCD:
"""Create a new lcd"""
db_lcd = LCD(**lcd_in.model_dump())
self.db.add(db_lcd)
self.db.commit()
self.db.refresh(db_lcd)
return db_lcd
def update(
self,
lcd_id: UUID,
lcd_in: LCDUpdate
) -> Optional[LCD]:
"""Update an existing lcd"""
db_lcd = self.get_by_id(lcd_id)
if not db_lcd:
return None
update_data = lcd_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_lcd, field, value)
self.db.commit()
self.db.refresh(db_lcd)
return db_lcd
def delete(self, lcd_id: UUID) -> bool:
"""Delete a lcd"""
db_lcd = self.get_by_id(lcd_id)
if not db_lcd:
return False
self.db.delete(db_lcd)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.ncci_edit_model import NCCIEdit
from src.validation.ncci_edit_schemas import NCCIEditCreate, NCCIEditUpdate
class NCCIEditCRUD:
"""CRUD operations for NCCIEdit"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[NCCIEdit], int]:
"""Get all ncciedits with pagination"""
query = self.db.query(NCCIEdit)
total = query.count()
items = query.order_by(NCCIEdit.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, ncci_edit_id: UUID) -> Optional[NCCIEdit]:
"""Get ncciedit by ID"""
return self.db.query(NCCIEdit).filter(NCCIEdit.id == ncci_edit_id).first()
def create(self, ncci_edit_in: NCCIEditCreate) -> NCCIEdit:
"""Create a new ncciedit"""
db_ncci_edit = NCCIEdit(**ncci_edit_in.model_dump())
self.db.add(db_ncci_edit)
self.db.commit()
self.db.refresh(db_ncci_edit)
return db_ncci_edit
def update(
self,
ncci_edit_id: UUID,
ncci_edit_in: NCCIEditUpdate
) -> Optional[NCCIEdit]:
"""Update an existing ncciedit"""
db_ncci_edit = self.get_by_id(ncci_edit_id)
if not db_ncci_edit:
return None
update_data = ncci_edit_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_ncci_edit, field, value)
self.db.commit()
self.db.refresh(db_ncci_edit)
return db_ncci_edit
def delete(self, ncci_edit_id: UUID) -> bool:
"""Delete a ncciedit"""
db_ncci_edit = self.get_by_id(ncci_edit_id)
if not db_ncci_edit:
return False
self.db.delete(db_ncci_edit)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.ncd_model import NCD
from src.validation.ncd_schemas import NCDCreate, NCDUpdate
class NCDCRUD:
"""CRUD operations for NCD"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[NCD], int]:
"""Get all ncds with pagination"""
query = self.db.query(NCD)
total = query.count()
items = query.order_by(NCD.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, ncd_id: UUID) -> Optional[NCD]:
"""Get ncd by ID"""
return self.db.query(NCD).filter(NCD.id == ncd_id).first()
def create(self, ncd_in: NCDCreate) -> NCD:
"""Create a new ncd"""
db_ncd = NCD(**ncd_in.model_dump())
self.db.add(db_ncd)
self.db.commit()
self.db.refresh(db_ncd)
return db_ncd
def update(
self,
ncd_id: UUID,
ncd_in: NCDUpdate
) -> Optional[NCD]:
"""Update an existing ncd"""
db_ncd = self.get_by_id(ncd_id)
if not db_ncd:
return None
update_data = ncd_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_ncd, field, value)
self.db.commit()
self.db.refresh(db_ncd)
return db_ncd
def delete(self, ncd_id: UUID) -> bool:
"""Delete a ncd"""
db_ncd = self.get_by_id(ncd_id)
if not db_ncd:
return False
self.db.delete(db_ncd)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.patient_model import Patient
from src.validation.patient_schemas import PatientCreate, PatientUpdate
class PatientCRUD:
"""CRUD operations for Patient"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[Patient], int]:
"""Get all patients with pagination"""
query = self.db.query(Patient)
total = query.count()
items = query.order_by(Patient.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, patient_id: UUID) -> Optional[Patient]:
"""Get patient by ID"""
return self.db.query(Patient).filter(Patient.id == patient_id).first()
def create(self, patient_in: PatientCreate) -> Patient:
"""Create a new patient"""
db_patient = Patient(**patient_in.model_dump())
self.db.add(db_patient)
self.db.commit()
self.db.refresh(db_patient)
return db_patient
def update(
self,
patient_id: UUID,
patient_in: PatientUpdate
) -> Optional[Patient]:
"""Update an existing patient"""
db_patient = self.get_by_id(patient_id)
if not db_patient:
return None
update_data = patient_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_patient, field, value)
self.db.commit()
self.db.refresh(db_patient)
return db_patient
def delete(self, patient_id: UUID) -> bool:
"""Delete a patient"""
db_patient = self.get_by_id(patient_id)
if not db_patient:
return False
self.db.delete(db_patient)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.payer_model import Payer
from src.validation.payer_schemas import PayerCreate, PayerUpdate
class PayerCRUD:
"""CRUD operations for Payer"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[Payer], int]:
"""Get all payers with pagination"""
query = self.db.query(Payer)
total = query.count()
items = query.order_by(Payer.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, payer_id: UUID) -> Optional[Payer]:
"""Get payer by ID"""
return self.db.query(Payer).filter(Payer.id == payer_id).first()
def create(self, payer_in: PayerCreate) -> Payer:
"""Create a new payer"""
db_payer = Payer(**payer_in.model_dump())
self.db.add(db_payer)
self.db.commit()
self.db.refresh(db_payer)
return db_payer
def update(
self,
payer_id: UUID,
payer_in: PayerUpdate
) -> Optional[Payer]:
"""Update an existing payer"""
db_payer = self.get_by_id(payer_id)
if not db_payer:
return None
update_data = payer_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_payer, field, value)
self.db.commit()
self.db.refresh(db_payer)
return db_payer
def delete(self, payer_id: UUID) -> bool:
"""Delete a payer"""
db_payer = self.get_by_id(payer_id)
if not db_payer:
return False
self.db.delete(db_payer)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.payer_rule_model import PayerRule
from src.validation.payer_rule_schemas import PayerRuleCreate, PayerRuleUpdate
class PayerRuleCRUD:
"""CRUD operations for PayerRule"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[PayerRule], int]:
"""Get all payerrules with pagination"""
query = self.db.query(PayerRule)
total = query.count()
items = query.order_by(PayerRule.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, payer_rule_id: UUID) -> Optional[PayerRule]:
"""Get payerrule by ID"""
return self.db.query(PayerRule).filter(PayerRule.id == payer_rule_id).first()
def create(self, payer_rule_in: PayerRuleCreate) -> PayerRule:
"""Create a new payerrule"""
db_payer_rule = PayerRule(**payer_rule_in.model_dump())
self.db.add(db_payer_rule)
self.db.commit()
self.db.refresh(db_payer_rule)
return db_payer_rule
def update(
self,
payer_rule_id: UUID,
payer_rule_in: PayerRuleUpdate
) -> Optional[PayerRule]:
"""Update an existing payerrule"""
db_payer_rule = self.get_by_id(payer_rule_id)
if not db_payer_rule:
return None
update_data = payer_rule_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_payer_rule, field, value)
self.db.commit()
self.db.refresh(db_payer_rule)
return db_payer_rule
def delete(self, payer_rule_id: UUID) -> bool:
"""Delete a payerrule"""
db_payer_rule = self.get_by_id(payer_rule_id)
if not db_payer_rule:
return False
self.db.delete(db_payer_rule)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.procedure_template_model import ProcedureTemplate
from src.validation.procedure_template_schemas import ProcedureTemplateCreate, ProcedureTemplateUpdate
class ProcedureTemplateCRUD:
"""CRUD operations for ProcedureTemplate"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[ProcedureTemplate], int]:
"""Get all proceduretemplates with pagination"""
query = self.db.query(ProcedureTemplate)
total = query.count()
items = query.order_by(ProcedureTemplate.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, procedure_template_id: UUID) -> Optional[ProcedureTemplate]:
"""Get proceduretemplate by ID"""
return self.db.query(ProcedureTemplate).filter(ProcedureTemplate.id == procedure_template_id).first()
def create(self, procedure_template_in: ProcedureTemplateCreate) -> ProcedureTemplate:
"""Create a new proceduretemplate"""
db_procedure_template = ProcedureTemplate(**procedure_template_in.model_dump())
self.db.add(db_procedure_template)
self.db.commit()
self.db.refresh(db_procedure_template)
return db_procedure_template
def update(
self,
procedure_template_id: UUID,
procedure_template_in: ProcedureTemplateUpdate
) -> Optional[ProcedureTemplate]:
"""Update an existing proceduretemplate"""
db_procedure_template = self.get_by_id(procedure_template_id)
if not db_procedure_template:
return None
update_data = procedure_template_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_procedure_template, field, value)
self.db.commit()
self.db.refresh(db_procedure_template)
return db_procedure_template
def delete(self, procedure_template_id: UUID) -> bool:
"""Delete a proceduretemplate"""
db_procedure_template = self.get_by_id(procedure_template_id)
if not db_procedure_template:
return False
self.db.delete(db_procedure_template)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.rag_document_model import RAGDocument
from src.validation.rag_document_schemas import RAGDocumentCreate, RAGDocumentUpdate
class RAGDocumentCRUD:
"""CRUD operations for RAGDocument"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[RAGDocument], int]:
"""Get all ragdocuments with pagination"""
query = self.db.query(RAGDocument)
total = query.count()
items = query.order_by(RAGDocument.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, rag_document_id: UUID) -> Optional[RAGDocument]:
"""Get ragdocument by ID"""
return self.db.query(RAGDocument).filter(RAGDocument.id == rag_document_id).first()
def create(self, rag_document_in: RAGDocumentCreate) -> RAGDocument:
"""Create a new ragdocument"""
db_rag_document = RAGDocument(**rag_document_in.model_dump())
self.db.add(db_rag_document)
self.db.commit()
self.db.refresh(db_rag_document)
return db_rag_document
def update(
self,
rag_document_id: UUID,
rag_document_in: RAGDocumentUpdate
) -> Optional[RAGDocument]:
"""Update an existing ragdocument"""
db_rag_document = self.get_by_id(rag_document_id)
if not db_rag_document:
return None
update_data = rag_document_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_rag_document, field, value)
self.db.commit()
self.db.refresh(db_rag_document)
return db_rag_document
def delete(self, rag_document_id: UUID) -> bool:
"""Delete a ragdocument"""
db_rag_document = self.get_by_id(rag_document_id)
if not db_rag_document:
return False
self.db.delete(db_rag_document)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.transcript_model import Transcript
from src.validation.transcript_schemas import TranscriptCreate, TranscriptUpdate
class TranscriptCRUD:
"""CRUD operations for Transcript"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[Transcript], int]:
"""Get all transcripts with pagination"""
query = self.db.query(Transcript)
total = query.count()
items = query.order_by(Transcript.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, transcript_id: UUID) -> Optional[Transcript]:
"""Get transcript by ID"""
return self.db.query(Transcript).filter(Transcript.id == transcript_id).first()
def create(self, transcript_in: TranscriptCreate) -> Transcript:
"""Create a new transcript"""
db_transcript = Transcript(**transcript_in.model_dump())
self.db.add(db_transcript)
self.db.commit()
self.db.refresh(db_transcript)
return db_transcript
def update(
self,
transcript_id: UUID,
transcript_in: TranscriptUpdate
) -> Optional[Transcript]:
"""Update an existing transcript"""
db_transcript = self.get_by_id(transcript_id)
if not db_transcript:
return None
update_data = transcript_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_transcript, field, value)
self.db.commit()
self.db.refresh(db_transcript)
return db_transcript
def delete(self, transcript_id: UUID) -> bool:
"""Delete a transcript"""
db_transcript = self.get_by_id(transcript_id)
if not db_transcript:
return False
self.db.delete(db_transcript)
self.db.commit()
return True

View File

@ -0,0 +1,62 @@
from sqlalchemy.orm import Session
from typing import Optional, List, Tuple
from uuid import UUID
from src.models.user_model import User
from src.validation.user_schemas import UserCreate, UserUpdate
class UserCRUD:
"""CRUD operations for User"""
def __init__(self, db: Session):
self.db = db
def get_all(
self,
skip: int = 0,
limit: int = 100
) -> Tuple[List[User], int]:
"""Get all users with pagination"""
query = self.db.query(User)
total = query.count()
items = query.order_by(User.created_at.desc()).offset(skip).limit(limit).all()
return items, total
def get_by_id(self, user_id: UUID) -> Optional[User]:
"""Get user by ID"""
return self.db.query(User).filter(User.id == user_id).first()
def create(self, user_in: UserCreate) -> User:
"""Create a new user"""
db_user = User(**user_in.model_dump())
self.db.add(db_user)
self.db.commit()
self.db.refresh(db_user)
return db_user
def update(
self,
user_id: UUID,
user_in: UserUpdate
) -> Optional[User]:
"""Update an existing user"""
db_user = self.get_by_id(user_id)
if not db_user:
return None
update_data = user_in.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(db_user, field, value)
self.db.commit()
self.db.refresh(db_user)
return db_user
def delete(self, user_id: UUID) -> bool:
"""Delete a user"""
db_user = self.get_by_id(user_id)
if not db_user:
return False
self.db.delete(db_user)
self.db.commit()
return True

View File

@ -0,0 +1,183 @@
from confluent_kafka import Consumer, KafkaException, KafkaError
from src.infrastructure.kafka.kafka.config import kafka_config
from src.infrastructure.observability.logger import logger
import json
import os
import asyncio
import signal
from typing import Callable, Dict, List
"""
Kafka Consumer Service
Production-ready event consumer with error handling, retries, and graceful shutdown
"""
class KafkaConsumerService:
def __init__(self):
self.consumers: Dict[str, Consumer] = {}
self.is_connected = False
self.max_retries = int(os.getenv('KAFKA_MAX_RETRIES', '3'))
self.running = False
async def connect(self, group_id: str, options: dict = None):
"""
Create and connect a consumer
Args:
group_id: Consumer group ID
options: Consumer options
"""
try:
if group_id in self.consumers:
logger.warning(f'Kafka Consumer: Group already exists: {group_id}')
return
config = kafka_config.get_config()
config.update({
'group.id': group_id,
'session.timeout.ms': (options or {}).get('sessionTimeout', 30000),
'heartbeat.interval.ms': (options or {}).get('heartbeatInterval', 3000),
'max.partition.fetch.bytes': (options or {}).get('maxBytesPerPartition', 1048576),
'fetch.min.bytes': (options or {}).get('minBytes', 1),
'fetch.max.bytes': (options or {}).get('maxBytes', 10485760),
'fetch.wait.max.ms': (options or {}).get('maxWaitTimeInMs', 5000),
'auto.offset.reset': 'latest', # or 'earliest' for fromBeginning
'enable.auto.commit': True,
})
consumer = Consumer(config)
self.consumers[group_id] = consumer
self.is_connected = True
logger.info(f'Kafka Consumer: Connected', extra={'groupId': group_id})
except Exception as e:
logger.error(f'Kafka Consumer: Connection failed: {e}', extra={'groupId': group_id})
raise
async def subscribe(self, group_id: str, topics: List[str], message_handler: Callable, options: dict = None):
"""
Subscribe to topics and start consuming
Args:
group_id: Consumer group ID
topics: Topics to subscribe to
message_handler: Handler function for messages
options: Consumer options
"""
try:
consumer = self.consumers.get(group_id)
if not consumer:
await self.connect(group_id, options)
consumer = self.consumers.get(group_id)
consumer.subscribe(topics)
self.running = True
logger.info(f'Kafka Consumer: Subscribed and consuming', extra={'groupId': group_id, 'topics': topics})
# Start consuming in background
asyncio.create_task(self._consume_loop(group_id, consumer, message_handler))
except Exception as e:
logger.error(f'Kafka Consumer: Subscribe error: {e}', extra={'groupId': group_id, 'topics': topics})
raise
async def _consume_loop(self, group_id: str, consumer: Consumer, message_handler: Callable):
"""Consume messages in a loop"""
try:
while self.running:
msg = consumer.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
logger.error(f'Kafka Consumer: Error: {msg.error()}')
continue
try:
key = msg.key().decode() if msg.key() else None
value = json.loads(msg.value().decode())
headers = {k: v.decode() if isinstance(v, bytes) else v for k, v in (msg.headers() or [])}
logger.debug('Kafka Consumer: Message received', extra={
'groupId': group_id,
'topic': msg.topic(),
'partition': msg.partition(),
'offset': msg.offset(),
'key': key
})
# Call message handler
message = {
'topic': msg.topic(),
'partition': msg.partition(),
'offset': msg.offset(),
'key': key,
'value': value,
'headers': headers,
'timestamp': msg.timestamp()[1] if msg.timestamp() else None
}
await message_handler(message)
logger.debug('Kafka Consumer: Message processed', extra={
'groupId': group_id,
'topic': msg.topic(),
'partition': msg.partition(),
'offset': msg.offset()
})
except Exception as error:
logger.error(f'Kafka Consumer: Message processing error: {error}', extra={
'groupId': group_id,
'topic': msg.topic(),
'partition': msg.partition(),
'offset': msg.offset()
})
# Message will be retried by Kafka
raise
except Exception as e:
logger.error(f'Kafka Consumer: Consume loop error: {e}', extra={'groupId': group_id})
async def disconnect(self, group_id: str):
"""Disconnect a consumer"""
try:
consumer = self.consumers.get(group_id)
if consumer:
consumer.close()
del self.consumers[group_id]
logger.info(f'Kafka Consumer: Disconnected', extra={'groupId': group_id})
if len(self.consumers) == 0:
self.is_connected = False
except Exception as e:
logger.error(f'Kafka Consumer: Disconnect error: {e}', extra={'groupId': group_id})
async def disconnect_all(self):
"""Disconnect all consumers"""
disconnect_tasks = [self.disconnect(group_id) for group_id in list(self.consumers.keys())]
await asyncio.gather(*disconnect_tasks, return_exceptions=True)
logger.info('Kafka Consumer: All consumers disconnected')
def get_consumer(self, group_id: str):
"""Get consumer instance"""
return self.consumers.get(group_id)
def is_ready(self, group_id: str):
"""Check if consumer is connected"""
return group_id in self.consumers
# Singleton instance
kafka_consumer = KafkaConsumerService()
# Graceful shutdown
def signal_handler(sig, frame):
logger.info('Kafka Consumer: Signal received, disconnecting...')
asyncio.create_task(kafka_consumer.disconnect_all())
kafka_consumer.running = False
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: audio.uploaded
This handler is scaffolded - implement your business logic in the handle method
"""
class AudioUploadedHandlerHandler:
def __init__(self):
self.topic = 'audio.uploaded'
self.group_id = 'test_project-audio.uploaded-handler' or 'test_project-audio.uploaded-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
audioUploadedHandler_handler = AudioUploadedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(audioUploadedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: claim.approved
This handler is scaffolded - implement your business logic in the handle method
"""
class ClaimApprovedHandlerHandler:
def __init__(self):
self.topic = 'claim.approved'
self.group_id = 'test_project-claim.approved-handler' or 'test_project-claim.approved-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
claimApprovedHandler_handler = ClaimApprovedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(claimApprovedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: claim.created
This handler is scaffolded - implement your business logic in the handle method
"""
class ClaimCreatedHandlerHandler:
def __init__(self):
self.topic = 'claim.created'
self.group_id = 'test_project-claim.created-handler' or 'test_project-claim.created-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
claimCreatedHandler_handler = ClaimCreatedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(claimCreatedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: claim.rejected
This handler is scaffolded - implement your business logic in the handle method
"""
class ClaimRejectedHandlerHandler:
def __init__(self):
self.topic = 'claim.rejected'
self.group_id = 'test_project-claim.rejected-handler' or 'test_project-claim.rejected-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
claimRejectedHandler_handler = ClaimRejectedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(claimRejectedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: claim.scrubbed
This handler is scaffolded - implement your business logic in the handle method
"""
class ClaimScrubbedHandlerHandler:
def __init__(self):
self.topic = 'claim.scrubbed'
self.group_id = 'test_project-claim.scrubbed-handler' or 'test_project-claim.scrubbed-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
claimScrubbedHandler_handler = ClaimScrubbedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(claimScrubbedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: claim.submitted
This handler is scaffolded - implement your business logic in the handle method
"""
class ClaimSubmittedHandlerHandler:
def __init__(self):
self.topic = 'claim.submitted'
self.group_id = 'test_project-claim.submitted-handler' or 'test_project-claim.submitted-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
claimSubmittedHandler_handler = ClaimSubmittedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(claimSubmittedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: code.mapped
This handler is scaffolded - implement your business logic in the handle method
"""
class CodeMappedHandlerHandler:
def __init__(self):
self.topic = 'code.mapped'
self.group_id = 'test_project-code.mapped-handler' or 'test_project-code.mapped-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
codeMappedHandler_handler = CodeMappedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(codeMappedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: denial.pattern.detected
This handler is scaffolded - implement your business logic in the handle method
"""
class DenialPatternDetectedHandlerHandler:
def __init__(self):
self.topic = 'denial.pattern.detected'
self.group_id = 'test_project-denial.pattern.detected-handler' or 'test_project-denial.pattern.detected-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
denialPatternDetectedHandler_handler = DenialPatternDetectedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(denialPatternDetectedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: entity.extracted
This handler is scaffolded - implement your business logic in the handle method
"""
class EntityExtractedHandlerHandler:
def __init__(self):
self.topic = 'entity.extracted'
self.group_id = 'test_project-entity.extracted-handler' or 'test_project-entity.extracted-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
entityExtractedHandler_handler = EntityExtractedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(entityExtractedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: phi.accessed
This handler is scaffolded - implement your business logic in the handle method
"""
class PhiAccessedHandlerHandler:
def __init__(self):
self.topic = 'phi.accessed'
self.group_id = 'test_project-phi.accessed-handler' or 'test_project-phi.accessed-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
phiAccessedHandler_handler = PhiAccessedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(phiAccessedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: review.completed
This handler is scaffolded - implement your business logic in the handle method
"""
class ReviewCompletedHandlerHandler:
def __init__(self):
self.topic = 'review.completed'
self.group_id = 'test_project-review.completed-handler' or 'test_project-review.completed-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
reviewCompletedHandler_handler = ReviewCompletedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(reviewCompletedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: review.required
This handler is scaffolded - implement your business logic in the handle method
"""
class ReviewRequiredHandlerHandler:
def __init__(self):
self.topic = 'review.required'
self.group_id = 'test_project-review.required-handler' or 'test_project-review.required-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
reviewRequiredHandler_handler = ReviewRequiredHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(reviewRequiredHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: template.used
This handler is scaffolded - implement your business logic in the handle method
"""
class TemplateUsedHandlerHandler:
def __init__(self):
self.topic = 'template.used'
self.group_id = 'test_project-template.used-handler' or 'test_project-template.used-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
templateUsedHandler_handler = TemplateUsedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(templateUsedHandler_handler.start())

View File

@ -0,0 +1,154 @@
from src.infrastructure.kafka.consumer.service import kafka_consumer
from src.infrastructure.observability.logger import logger
import os
"""
Event Handler Template
Auto-generated handler for topic: transcript.completed
This handler is scaffolded - implement your business logic in the handle method
"""
class TranscriptCompletedHandlerHandler:
def __init__(self):
self.topic = 'transcript.completed'
self.group_id = 'test_project-transcript.completed-handler' or 'test_project-transcript.completed-handler'
self.is_running = False
async def start(self):
"""Initialize and start consuming events"""
try:
if self.is_running:
logger.warning('Event Handler: Already running', extra={'topic': self.topic})
return
await kafka_consumer.subscribe(
self.group_id,
[self.topic],
self.handle,
{
'fromBeginning': False, # Start from latest offset
'sessionTimeout': 30000,
'heartbeatInterval': 3000
}
)
self.is_running = True
logger.info('Event Handler: Started', extra={'topic': self.topic, 'groupId': self.group_id})
except Exception as e:
logger.error('Event Handler: Start failed', extra={
'topic': self.topic,
'error': str(e)
})
raise
async def stop(self):
"""Stop consuming events"""
try:
if not self.is_running:
return
await kafka_consumer.disconnect(self.group_id)
self.is_running = False
logger.info('Event Handler: Stopped', extra={'topic': self.topic})
except Exception as e:
logger.error('Event Handler: Stop failed', extra={
'topic': self.topic,
'error': str(e)
})
async def handle(self, message: dict):
"""
Handle incoming event
TODO: Implement your business logic here
Args:
message: Kafka message dict with keys:
- topic: Topic name
- partition: Partition number
- offset: Message offset
- key: Message key
- value: Parsed message value
- headers: Message headers
- timestamp: Message timestamp
"""
topic = message['topic']
key = message.get('key')
value = message.get('value', {})
headers = message.get('headers', {})
timestamp = message.get('timestamp')
logger.info('Event Handler: Processing event', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId'),
'timestamp': timestamp
})
try:
# TODO: Implement your business logic here
# Examples:
# - Update related entities
# - Send notifications
# - Trigger workflows
# - Update cache
# - Write to database
# Example implementation:
# event_id = value.get('id')
# event_name = value.get('name')
# await self.process_event(event_id, event_name)
logger.info('Event Handler: Event processed successfully', extra={
'topic': topic,
'key': key,
'eventId': value.get('id') or value.get('eventId')
})
except Exception as e:
logger.error('Event Handler: Processing failed', extra={
'topic': topic,
'key': key,
'error': str(e)
})
# Re-throw to trigger Kafka retry mechanism
# For DLQ handling, implement custom retry logic here
raise
async def process_event(self, event_id: str, event_data: dict):
"""
Process event
TODO: Implement your business logic
Args:
event_id: Event ID
event_data: Event data
"""
# Example: Send notification
# await self.notification_service.notify_admins({
# 'type': 'EVENT_RECEIVED',
# 'eventId': event_id,
# 'data': event_data
# })
# Example: Update cache
# from src.infrastructure.redis.cache.service import cache_service
# await cache_service.set(f'event:{event_id}', event_data, 3600)
# Example: Write to audit log
# from src.services.audit_log_service import audit_log_service
# await audit_log_service.log({
# 'action': 'EVENT_PROCESSED',
# 'entityId': event_id,
# 'metadata': event_data
# })
pass
# Singleton instance
transcriptCompletedHandler_handler = TranscriptCompletedHandlerHandler()
# Auto-start if enabled
if os.getenv('KAFKA_AUTO_START_HANDLERS', 'true').lower() != 'false':
import asyncio
asyncio.create_task(transcriptCompletedHandler_handler.start())

View File

@ -0,0 +1,77 @@
from confluent_kafka import KafkaException
from confluent_kafka.admin import AdminClient
import os
import logging
logger = logging.getLogger(__name__)
"""
Kafka Configuration
Production-ready Kafka client configuration with error handling
"""
class KafkaConfig:
def __init__(self):
self.client_id = os.getenv('KAFKA_CLIENT_ID', 'test_project')
self.brokers = os.getenv('KAFKA_BOOTSTRAP_SERVERS', 'localhost:9092').split(',')
self.config = {
'bootstrap.servers': ','.join(self.brokers),
'client.id': self.client_id,
'acks': 'all', # Wait for all replicas
'retries': 8,
'retry.backoff.ms': 100,
'max.in.flight.requests.per.connection': 1,
'enable.idempotence': True,
'compression.type': 'snappy',
'request.timeout.ms': 30000,
'metadata.max.age.ms': 300000,
}
# SSL/TLS configuration (if needed)
if os.getenv('KAFKA_SSL', 'false').lower() == 'true':
self.config.update({
'security.protocol': 'SSL',
'ssl.ca.location': os.getenv('KAFKA_SSL_CA_LOCATION', ''),
'ssl.certificate.location': os.getenv('KAFKA_SSL_CERT_LOCATION', ''),
'ssl.key.location': os.getenv('KAFKA_SSL_KEY_LOCATION', ''),
})
# SASL authentication (if needed)
sasl_mechanism = os.getenv('KAFKA_SASL_MECHANISM')
if sasl_mechanism:
self.config.update({
'security.protocol': 'SASL_SSL',
'sasl.mechanism': sasl_mechanism, # 'PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'
'sasl.username': os.getenv('KAFKA_SASL_USERNAME', ''),
'sasl.password': os.getenv('KAFKA_SASL_PASSWORD', ''),
})
def get_config(self):
"""Get Kafka configuration dictionary"""
return self.config.copy()
def get_client_id(self):
"""Get client ID"""
return self.client_id
def get_brokers(self):
"""Get broker list"""
return self.brokers
async def test_connection(self):
"""Test Kafka connection"""
try:
admin_client = AdminClient(self.config)
metadata = admin_client.list_topics(timeout=10)
logger.info(f'Kafka: Connection test successful, found {len(metadata.topics)} topics')
return True
except KafkaException as e:
logger.error(f'Kafka: Connection test failed: {e}')
return False
except Exception as e:
logger.error(f'Kafka: Connection test failed: {e}')
return False
# Singleton instance
kafka_config = KafkaConfig()

View File

@ -0,0 +1,167 @@
from confluent_kafka import Producer, KafkaException
from confluent_kafka.admin import AdminClient
from src.infrastructure.kafka.kafka.config import kafka_config
from src.infrastructure.observability.logger import logger
import json
import os
from datetime import datetime
"""
Kafka Producer Service
Production-ready event producer with error handling, retries, and DLQ support
"""
class KafkaProducerService:
def __init__(self):
self.producer = None
self.is_connected = False
self.dlq_enabled = os.getenv('KAFKA_DLQ_ENABLED', 'true').lower() == 'true'
self.max_retries = int(os.getenv('KAFKA_MAX_RETRIES', '3'))
async def connect(self):
"""Connect producer to Kafka"""
try:
if self.is_connected:
logger.warning('Kafka Producer: Already connected')
return
config = kafka_config.get_config()
self.producer = Producer(config)
self.is_connected = True
logger.info('Kafka Producer: Connected successfully')
except Exception as e:
logger.error(f'Kafka Producer: Connection failed: {e}')
self.is_connected = False
raise
async def disconnect(self):
"""Disconnect producer from Kafka"""
try:
if self.producer and self.is_connected:
self.producer.flush(timeout=10)
self.producer = None
self.is_connected = False
logger.info('Kafka Producer: Disconnected')
except Exception as e:
logger.error(f'Kafka Producer: Disconnect error: {e}')
async def publish(self, topic: str, data: dict, key: str = None, headers: dict = None):
"""
Publish event to Kafka topic
Args:
topic: Topic name
data: Event data
key: Optional message key (for partitioning)
headers: Optional message headers
Returns:
bool: True if published successfully
"""
try:
if not self.is_connected:
logger.warning('Kafka Producer: Not connected, attempting to connect...')
await self.connect()
message_value = {
**data,
'timestamp': datetime.utcnow().isoformat(),
'source': kafka_config.get_client_id()
}
message_headers = {
'content-type': 'application/json',
'event-type': data.get('eventType', topic),
**(headers or {})
}
message_key = key or data.get('id') or data.get('key')
def delivery_callback(err, msg):
if err:
logger.error(f'Kafka Producer: Message delivery failed: {err}')
else:
logger.debug(f'Kafka Producer: Event published to {msg.topic()} partition {msg.partition()} offset {msg.offset()}')
self.producer.produce(
topic=topic,
key=message_key.encode() if message_key else None,
value=json.dumps(message_value).encode(),
headers=message_headers,
callback=delivery_callback
)
# Trigger delivery callbacks
self.producer.poll(0)
return True
except Exception as e:
logger.error(f'Kafka Producer: Publish error: {e}', extra={'topic': topic})
# Send to DLQ if enabled
if self.dlq_enabled:
await self._send_to_dlq(topic, data, e)
return False
async def publish_batch(self, events: list):
"""
Publish multiple events in batch
Args:
events: Array of {topic, data, key, headers}
Returns:
bool: True if all published successfully
"""
try:
if not self.is_connected:
await self.connect()
for event in events:
await self.publish(
topic=event['topic'],
data=event['data'],
key=event.get('key'),
headers=event.get('headers')
)
# Flush all messages
self.producer.flush(timeout=10)
logger.debug(f'Kafka Producer: Batch published {len(events)} events')
return True
except Exception as e:
logger.error(f'Kafka Producer: Batch publish error: {e}')
return False
async def _send_to_dlq(self, topic: str, data: dict, error: Exception):
"""Send failed message to Dead Letter Queue"""
try:
dlq_topic = f'{topic}.dlq'
dlq_data = {
'originalTopic': topic,
'originalData': data,
'error': {
'message': str(error),
'type': type(error).__name__,
'timestamp': datetime.utcnow().isoformat()
},
'retryCount': 0
}
await self.publish(dlq_topic, dlq_data)
logger.warning(f'Kafka Producer: Sent to DLQ', extra={'originalTopic': topic, 'dlqTopic': dlq_topic})
except Exception as dlq_error:
logger.error(f'Kafka Producer: DLQ send failed: {dlq_error}', extra={'topic': topic})
def is_ready(self):
"""Check if producer is connected"""
return self.is_connected and self.producer is not None
# Singleton instance
kafka_producer = KafkaProducerService()
# Auto-connect on module load (optional)
if os.getenv('KAFKA_AUTO_CONNECT', 'true').lower() != 'false':
import asyncio
asyncio.create_task(kafka_producer.connect())

View File

@ -0,0 +1,94 @@
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from pythonjsonlogger import jsonlogger
"""
Structured Logger Service
Production-ready logger with JSON formatting, file rotation, and log levels
"""
class Logger:
def __init__(self):
self.log_level = os.getenv('LOG_LEVEL', 'INFO').upper()
self.log_format = os.getenv('LOG_FORMAT', 'json') # 'json' or 'text'
self.log_dir = os.getenv('LOG_DIR', './logs')
# Ensure log directory exists
os.makedirs(self.log_dir, exist_ok=True)
# Create logger
self.logger = logging.getLogger('test_project')
self.logger.setLevel(getattr(logging, self.log_level, logging.INFO))
# Remove existing handlers
self.logger.handlers = []
# Add handlers
self._setup_handlers()
def _setup_handlers(self):
"""Setup log handlers"""
# Console handler (always enabled)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(getattr(logging, self.log_level, logging.INFO))
if self.log_format == 'json':
formatter = jsonlogger.JsonFormatter(
'%(timestamp)s %(level)s %(name)s %(message)s',
timestamp=True
)
else:
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
# File handlers (only in production or if LOG_TO_FILE is enabled)
if os.getenv('ENVIRONMENT') == 'production' or os.getenv('LOG_TO_FILE', 'false').lower() == 'true':
# Combined log file
combined_handler = RotatingFileHandler(
filename=os.path.join(self.log_dir, 'combined.log'),
maxBytes=10 * 1024 * 1024, # 10MB
backupCount=5
)
combined_handler.setLevel(logging.DEBUG)
combined_handler.setFormatter(formatter)
self.logger.addHandler(combined_handler)
# Error log file
error_handler = RotatingFileHandler(
filename=os.path.join(self.log_dir, 'error.log'),
maxBytes=10 * 1024 * 1024, # 10MB
backupCount=5
)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(formatter)
self.logger.addHandler(error_handler)
def info(self, message: str, extra: dict = None):
"""Log info message"""
self.logger.info(message, extra=extra or {})
def error(self, message: str, extra: dict = None):
"""Log error message"""
self.logger.error(message, extra=extra or {})
def warning(self, message: str, extra: dict = None):
"""Log warning message"""
self.logger.warning(message, extra=extra or {})
def debug(self, message: str, extra: dict = None):
"""Log debug message"""
self.logger.debug(message, extra=extra or {})
def critical(self, message: str, extra: dict = None):
"""Log critical message"""
self.logger.critical(message, extra=extra or {})
# Singleton instance
logger = Logger()

View File

@ -0,0 +1,175 @@
from prometheus_client import Counter, Histogram, Gauge, generate_latest, REGISTRY
from prometheus_client import CollectorRegistry, multiprocess, start_http_server
from src.infrastructure.observability.logger import logger
import os
"""
Prometheus Metrics Service
Production-ready metrics collection with Prometheus format
"""
class MetricsService:
def __init__(self):
self.registry = REGISTRY
# Default labels for all metrics
self.default_labels = {
'service': os.getenv('APP_NAME', 'test_project'),
'environment': os.getenv('ENVIRONMENT', 'development')
}
# Custom metrics
self._initialize_custom_metrics()
# Start metrics server if enabled
if os.getenv('METRICS_ENABLED', 'true').lower() == 'true':
self._start_metrics_server()
def _initialize_custom_metrics(self):
"""Initialize custom application metrics"""
# HTTP request metrics
self.http_request_duration = Histogram(
'http_request_duration_seconds',
'Duration of HTTP requests in seconds',
['method', 'route', 'status_code'],
buckets=[0.1, 0.5, 1, 2, 5, 10, 30]
)
self.http_request_total = Counter(
'http_requests_total',
'Total number of HTTP requests',
['method', 'route', 'status_code']
)
# Business metrics
self.business_events_total = Counter(
'business_events_total',
'Total number of business events',
['event_type', 'status']
)
# Database metrics
self.database_query_duration = Histogram(
'database_query_duration_seconds',
'Duration of database queries in seconds',
['operation', 'table'],
buckets=[0.01, 0.05, 0.1, 0.5, 1, 2, 5]
)
# Cache metrics
self.cache_hits = Counter(
'cache_hits_total',
'Total number of cache hits',
['cache_type']
)
self.cache_misses = Counter(
'cache_misses_total',
'Total number of cache misses',
['cache_type']
)
# Kafka metrics
self.kafka_messages_published = Counter(
'kafka_messages_published_total',
'Total number of Kafka messages published',
['topic', 'status']
)
self.kafka_messages_consumed = Counter(
'kafka_messages_consumed_total',
'Total number of Kafka messages consumed',
['topic', 'status']
)
def _start_metrics_server(self):
"""Start Prometheus metrics HTTP server"""
try:
port = int(os.getenv('PROMETHEUS_PORT', '9090'))
start_http_server(port)
logger.info(f'Metrics: Prometheus metrics server started on port {port}')
except Exception as e:
logger.error(f'Metrics: Failed to start metrics server: {e}')
def record_http_request(self, method: str, route: str, status_code: int, duration: float):
"""
Record HTTP request duration
Args:
method: HTTP method
route: Route path
status_code: HTTP status code
duration: Duration in seconds
"""
self.http_request_duration.labels(method=method, route=route, status_code=status_code).observe(duration)
self.http_request_total.labels(method=method, route=route, status_code=status_code).inc()
def record_business_event(self, event_type: str, status: str = 'success'):
"""
Record business event
Args:
event_type: Event type
status: Event status ('success' or 'error')
"""
self.business_events_total.labels(event_type=event_type, status=status).inc()
def record_database_query(self, operation: str, table: str, duration: float):
"""
Record database query duration
Args:
operation: Operation type (select, insert, update, delete)
table: Table name
duration: Duration in seconds
"""
self.database_query_duration.labels(operation=operation, table=table).observe(duration)
def record_cache_hit(self, cache_type: str = 'redis'):
"""
Record cache hit
Args:
cache_type: Cache type (e.g., 'redis', 'memory')
"""
self.cache_hits.labels(cache_type=cache_type).inc()
def record_cache_miss(self, cache_type: str = 'redis'):
"""
Record cache miss
Args:
cache_type: Cache type
"""
self.cache_misses.labels(cache_type=cache_type).inc()
def record_kafka_published(self, topic: str, status: str = 'success'):
"""
Record Kafka message published
Args:
topic: Topic name
status: Status ('success' or 'error')
"""
self.kafka_messages_published.labels(topic=topic, status=status).inc()
def record_kafka_consumed(self, topic: str, status: str = 'success'):
"""
Record Kafka message consumed
Args:
topic: Topic name
status: Status ('success' or 'error')
"""
self.kafka_messages_consumed.labels(topic=topic, status=status).inc()
def get_metrics(self):
"""Get metrics in Prometheus format"""
return generate_latest(self.registry)
def get_registry(self):
"""Get metrics registry"""
return self.registry
# Singleton instance
metrics_service = MetricsService()

View File

@ -0,0 +1,88 @@
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.exporter.jaeger.thrift import JaegerExporter
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
from src.infrastructure.observability.logger import logger
import os
"""
OpenTelemetry Tracer Service
Production-ready distributed tracing with OpenTelemetry
Note: This is optional and only enabled if OBSERVABILITY_TRACING_ENABLED=true
"""
class TracerService:
def __init__(self):
self.tracer_provider = None
self.is_enabled = os.getenv('OBSERVABILITY_TRACING_ENABLED', 'false').lower() == 'true'
self.sample_rate = float(os.getenv('TRACING_SAMPLE_RATE', '0.1'))
def initialize(self):
"""Initialize OpenTelemetry SDK"""
if not self.is_enabled:
logger.info('Tracer: Tracing disabled, skipping initialization')
return
try:
resource = Resource.create({
'service.name': os.getenv('APP_NAME', 'test_project'),
'service.version': os.getenv('APP_VERSION', '1.0.0'),
'deployment.environment': os.getenv('ENVIRONMENT', 'development')
})
self.tracer_provider = TracerProvider(resource=resource, sampler=self._get_sampler())
# Add span processor
span_processor = BatchSpanProcessor(self._get_trace_exporter())
self.tracer_provider.add_span_processor(span_processor)
# Set global tracer provider
trace.set_tracer_provider(self.tracer_provider)
logger.info('Tracer: OpenTelemetry SDK initialized')
except Exception as e:
logger.error(f'Tracer: Initialization failed: {e}')
# Don't raise - tracing is optional
def _get_trace_exporter(self):
"""Get trace exporter based on configuration"""
exporter_type = os.getenv('TRACING_EXPORTER', 'console')
if exporter_type == 'jaeger':
jaeger_endpoint = os.getenv('JAEGER_ENDPOINT', 'http://localhost:14268/api/traces')
return JaegerExporter(agent_host_name='localhost', agent_port=6831)
elif exporter_type == 'otlp':
otlp_endpoint = os.getenv('OTLP_ENDPOINT', 'http://localhost:4317')
return OTLPSpanExporter(endpoint=otlp_endpoint)
else:
# Console exporter for development
return ConsoleSpanExporter()
def _get_sampler(self):
"""Get sampler based on configuration"""
return TraceIdRatioBased(self.sample_rate)
def shutdown(self):
"""Shutdown tracer"""
if self.tracer_provider:
self.tracer_provider.shutdown()
logger.info('Tracer: Shutdown complete')
def get_tracer(self, name: str):
"""Get tracer instance"""
if not self.is_enabled:
return trace.NoOpTracer()
return trace.get_tracer(name)
# Singleton instance
tracer_service = TracerService()
# Auto-initialize if enabled
if tracer_service.is_enabled:
tracer_service.initialize()

View File

@ -0,0 +1,225 @@
from src.infrastructure.redis.redis.client import redis_client
from src.infrastructure.observability.logger import logger
import json
import os
"""
Cache Service
Production-ready caching service with TTL, namespacing, and error handling
"""
class CacheService:
def __init__(self):
self.default_ttl = int(os.getenv('CACHE_DEFAULT_TTL', '3600')) # 1 hour
self.key_prefix = os.getenv('CACHE_KEY_PREFIX', 'test_project:cache:')
async def get(self, key: str):
"""
Get value from cache
Args:
key: Cache key
Returns:
Cached value or None if not found
"""
try:
if not redis_client.is_ready():
logger.warning('Cache: Redis not ready, skipping cache get', extra={'key': key})
return None
full_key = self._build_key(key)
client = redis_client.get_client()
value = await client.get(full_key)
if value is None:
return None
try:
return json.loads(value)
except (json.JSONDecodeError, TypeError):
# If not JSON, return as string
return value
except Exception as e:
logger.error(f'Cache: Get error: {e}', extra={'key': key})
return None # Fail gracefully
async def set(self, key: str, value, ttl: int = None):
"""
Set value in cache
Args:
key: Cache key
value: Value to cache (will be JSON stringified)
ttl: Time to live in seconds (optional, uses default if not provided)
Returns:
bool: True if successful
"""
try:
if not redis_client.is_ready():
logger.warning('Cache: Redis not ready, skipping cache set', extra={'key': key})
return False
full_key = self._build_key(key)
client = redis_client.get_client()
cache_value = value if isinstance(value, str) else json.dumps(value)
expiration = ttl or self.default_ttl
await client.setex(full_key, expiration, cache_value)
return True
except Exception as e:
logger.error(f'Cache: Set error: {e}', extra={'key': key})
return False # Fail gracefully
async def delete(self, key: str):
"""
Delete value from cache
Args:
key: Cache key
Returns:
bool: True if deleted
"""
try:
if not redis_client.is_ready():
return False
full_key = self._build_key(key)
client = redis_client.get_client()
result = await client.delete(full_key)
return result > 0
except Exception as e:
logger.error(f'Cache: Delete error: {e}', extra={'key': key})
return False
async def delete_pattern(self, pattern: str):
"""
Delete multiple keys matching pattern
Args:
pattern: Redis key pattern (e.g., 'user:*')
Returns:
int: Number of keys deleted
"""
try:
if not redis_client.is_ready():
return 0
full_pattern = self._build_key(pattern)
client = redis_client.get_client()
keys = []
async for key in client.scan_iter(match=full_pattern):
keys.append(key)
if not keys:
return 0
return await client.delete(*keys)
except Exception as e:
logger.error(f'Cache: Delete pattern error: {e}', extra={'pattern': pattern})
return 0
async def exists(self, key: str):
"""
Check if key exists in cache
Args:
key: Cache key
Returns:
bool: True if exists
"""
try:
if not redis_client.is_ready():
return False
full_key = self._build_key(key)
client = redis_client.get_client()
result = await client.exists(full_key)
return result > 0
except Exception as e:
logger.error(f'Cache: Exists error: {e}', extra={'key': key})
return False
async def get_or_set(self, key: str, fetch_fn, ttl: int = None):
"""
Get or set pattern (cache-aside)
Args:
key: Cache key
fetch_fn: Async function to fetch value if not in cache
ttl: Time to live in seconds
Returns:
Cached or fetched value
"""
try:
# Try to get from cache
cached = await self.get(key)
if cached is not None:
return cached
# Not in cache, fetch and set
value = await fetch_fn()
await self.set(key, value, ttl)
return value
except Exception as e:
logger.error(f'Cache: GetOrSet error: {e}', extra={'key': key})
# If cache fails, still try to fetch
return await fetch_fn()
async def increment(self, key: str, amount: int = 1):
"""
Increment a numeric value in cache
Args:
key: Cache key
amount: Amount to increment (default: 1)
Returns:
int: New value after increment
"""
try:
if not redis_client.is_ready():
return None
full_key = self._build_key(key)
client = redis_client.get_client()
return await client.incrby(full_key, amount)
except Exception as e:
logger.error(f'Cache: Increment error: {e}', extra={'key': key})
return None
async def expire(self, key: str, ttl: int):
"""
Set expiration on existing key
Args:
key: Cache key
ttl: Time to live in seconds
Returns:
bool: True if expiration was set
"""
try:
if not redis_client.is_ready():
return False
full_key = self._build_key(key)
client = redis_client.get_client()
result = await client.expire(full_key, ttl)
return result
except Exception as e:
logger.error(f'Cache: Expire error: {e}', extra={'key': key})
return False
def _build_key(self, key: str):
"""Build full cache key with prefix"""
return f'{self.key_prefix}{key}'
# Singleton instance
cache_service = CacheService()

View File

@ -0,0 +1,72 @@
import redis.asyncio as redis
from src.infrastructure.observability.logger import logger
import os
"""
Redis Client Service
Production-ready Redis connection with reconnection logic and error handling
"""
class RedisClient:
def __init__(self):
self.client = None
self.is_connected = False
self.reconnect_attempts = 0
self.max_reconnect_attempts = 10
self.config = {
'host': os.getenv('REDIS_HOST', 'localhost'),
'port': int(os.getenv('REDIS_PORT', '6379')),
'password': os.getenv('REDIS_PASSWORD') or None,
'db': int(os.getenv('REDIS_DB', '0')),
'decode_responses': True,
'socket_connect_timeout': 5,
'socket_timeout': 5,
'retry_on_timeout': True,
'health_check_interval': 30,
}
async def connect(self):
"""Connect to Redis"""
try:
if self.is_connected:
logger.info('Redis: Already connected')
return
self.client = redis.Redis(**self.config)
# Test connection
await self.client.ping()
self.is_connected = True
self.reconnect_attempts = 0
logger.info('Redis: Connected successfully')
except Exception as e:
self.is_connected = False
logger.error(f'Redis: Connection failed: {e}')
raise
async def disconnect(self):
"""Disconnect from Redis"""
if self.client:
await self.client.close()
self.is_connected = False
logger.info('Redis: Disconnected')
def get_client(self):
"""Get Redis client instance"""
if not self.client or not self.is_connected:
raise ConnectionError('Redis client not connected. Call connect() first.')
return self.client
def is_ready(self):
"""Check if Redis is connected"""
return self.is_connected and self.client is not None
async def ping(self):
"""Ping Redis server"""
if not self.is_ready():
raise ConnectionError('Redis client not ready')
return await self.client.ping()
# Singleton instance
redis_client = RedisClient()

View File

@ -0,0 +1,221 @@
from src.infrastructure.redis.redis.client import redis_client
from src.infrastructure.observability.logger import logger
import json
import os
from datetime import datetime
"""
Session Store Service
Redis-based session storage for JWT refresh tokens and user sessions
"""
class SessionStore:
def __init__(self):
self.key_prefix = os.getenv('SESSION_KEY_PREFIX', 'test_project:session:')
self.default_ttl = int(os.getenv('SESSION_TTL', str(7 * 24 * 60 * 60))) # 7 days
async def set(self, session_id: str, session_data: dict, ttl: int = None):
"""
Store session data
Args:
session_id: Session identifier (e.g., user ID or token hash)
session_data: Session data to store
ttl: Time to live in seconds (optional)
Returns:
bool: True if successful
"""
try:
if not redis_client.is_ready():
logger.warning('SessionStore: Redis not ready, skipping session set', extra={'sessionId': session_id})
return False
key = self._build_key(session_id)
client = redis_client.get_client()
value = json.dumps(session_data)
expiration = ttl or self.default_ttl
await client.setex(key, expiration, value)
logger.debug('SessionStore: Session stored', extra={'sessionId': session_id, 'ttl': expiration})
return True
except Exception as e:
logger.error(f'SessionStore: Set error: {e}', extra={'sessionId': session_id})
return False
async def get(self, session_id: str):
"""
Get session data
Args:
session_id: Session identifier
Returns:
dict: Session data or None if not found
"""
try:
if not redis_client.is_ready():
return None
key = self._build_key(session_id)
client = redis_client.get_client()
value = await client.get(key)
if value is None:
return None
return json.loads(value)
except Exception as e:
logger.error(f'SessionStore: Get error: {e}', extra={'sessionId': session_id})
return None
async def delete(self, session_id: str):
"""
Delete session
Args:
session_id: Session identifier
Returns:
bool: True if deleted
"""
try:
if not redis_client.is_ready():
return False
key = self._build_key(session_id)
client = redis_client.get_client()
result = await client.delete(key)
logger.debug('SessionStore: Session deleted', extra={'sessionId': session_id})
return result > 0
except Exception as e:
logger.error(f'SessionStore: Delete error: {e}', extra={'sessionId': session_id})
return False
async def exists(self, session_id: str):
"""
Check if session exists
Args:
session_id: Session identifier
Returns:
bool: True if exists
"""
try:
if not redis_client.is_ready():
return False
key = self._build_key(session_id)
client = redis_client.get_client()
result = await client.exists(key)
return result > 0
except Exception as e:
logger.error(f'SessionStore: Exists error: {e}', extra={'sessionId': session_id})
return False
async def refresh(self, session_id: str, ttl: int = None):
"""
Refresh session TTL (extend expiration)
Args:
session_id: Session identifier
ttl: New time to live in seconds
Returns:
bool: True if refreshed
"""
try:
if not redis_client.is_ready():
return False
key = self._build_key(session_id)
client = redis_client.get_client()
expiration = ttl or self.default_ttl
result = await client.expire(key, expiration)
return result
except Exception as e:
logger.error(f'SessionStore: Refresh error: {e}', extra={'sessionId': session_id})
return False
async def store_refresh_token(self, user_id: str, refresh_token: str, ttl: int = None):
"""
Store refresh token
Args:
user_id: User ID
refresh_token: Refresh token
ttl: Time to live in seconds
Returns:
bool: True if successful
"""
session_data = {
'refreshToken': refresh_token,
'userId': user_id,
'createdAt': datetime.utcnow().isoformat()
}
return await self.set(f'refresh:{user_id}', session_data, ttl)
async def get_refresh_token(self, user_id: str):
"""
Get refresh token for user
Args:
user_id: User ID
Returns:
str: Refresh token or None
"""
session_data = await self.get(f'refresh:{user_id}')
return session_data.get('refreshToken') if session_data else None
async def delete_refresh_token(self, user_id: str):
"""
Delete refresh token
Args:
user_id: User ID
Returns:
bool: True if deleted
"""
return await self.delete(f'refresh:{user_id}')
async def delete_user_sessions(self, user_id: str):
"""
Delete all sessions for a user
Args:
user_id: User ID
Returns:
int: Number of sessions deleted
"""
try:
if not redis_client.is_ready():
return 0
pattern = self._build_key(f'*:{user_id}*')
client = redis_client.get_client()
keys = []
async for key in client.scan_iter(match=pattern):
keys.append(key)
if not keys:
return 0
deleted = await client.delete(*keys)
logger.info('SessionStore: Deleted user sessions', extra={'userId': user_id, 'count': deleted})
return deleted
except Exception as e:
logger.error(f'SessionStore: Delete user sessions error: {e}', extra={'userId': user_id})
return 0
def _build_key(self, session_id: str):
"""Build full session key with prefix"""
return f'{self.key_prefix}{session_id}'
# Singleton instance
session_store = SessionStore()

View File

@ -0,0 +1,71 @@
"""
Correlation ID Middleware for FastAPI
Adds unique request ID for distributed tracing
"""
from fastapi import Request
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import Response
import uuid
import time
from typing import Callable
class CorrelationIdMiddleware(BaseHTTPMiddleware):
"""
Middleware that adds correlation ID to all requests for distributed tracing
"""
async def dispatch(self, request: Request, call_next: Callable) -> Response:
# Check for existing correlation ID in headers
existing_id = (
request.headers.get('x-correlation-id') or
request.headers.get('x-request-id') or
request.headers.get('traceparent')
)
# Generate new ID if not provided
correlation_id = existing_id or self._generate_correlation_id()
# Attach to request state
request.state.correlation_id = correlation_id
request.state.request_start_time = time.time()
# Process request
response = await call_next(request)
# Add correlation ID to response headers
response.headers['X-Correlation-ID'] = correlation_id
response.headers['X-Request-ID'] = correlation_id
return response
@staticmethod
def _generate_correlation_id() -> str:
"""
Generate a unique correlation ID
Format: timestamp-random (e.g., 1703423456789-a1b2c3d4)
"""
timestamp = int(time.time() * 1000)
random_part = uuid.uuid4().hex[:8]
return f"{timestamp}-{random_part}"
def get_correlation_id(request: Request) -> str:
"""
Get correlation ID from request (for use in services)
Args:
request: FastAPI request object
Returns:
Correlation ID string
"""
return getattr(request.state, 'correlation_id', 'unknown')
def generate_correlation_id() -> str:
"""
Generate a new correlation ID
Returns:
Correlation ID string
"""
return CorrelationIdMiddleware._generate_correlation_id()

View File

@ -0,0 +1,51 @@
from fastapi import Request
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import Response
from src.infrastructure.observability.metrics import metrics_service
import time
import re
"""
Metrics Middleware
Records HTTP request metrics for Prometheus
"""
class MetricsMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
start_time = time.time()
# Process request
response = await call_next(request)
# Calculate duration
duration = time.time() - start_time
# Normalize route for metrics (replace IDs with :id)
route = self._normalize_route(request.url.path)
try:
metrics_service.record_http_request(
method=request.method,
route=route,
status_code=response.status_code,
duration=duration
)
except Exception as e:
# Don't let metrics recording break the app
pass
return response
def _normalize_route(self, path: str) -> str:
"""Normalize route path by replacing UUIDs and numeric IDs"""
if not path:
return 'unknown'
# Replace UUIDs
path = re.sub(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', ':id', path, flags=re.IGNORECASE)
# Replace numeric IDs
path = re.sub(r'/\d+', '/:id', path)
# Clean up any double colons
path = re.sub(r':+', ':', path)
return path

View File

@ -0,0 +1,137 @@
"""
Redis-based Rate Limiter Middleware for FastAPI
Production-ready rate limiting with Redis storage
"""
from fastapi import Request, HTTPException, status
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse
from typing import Callable
import time
import os
from src.infrastructure.redis.redis.client import redis_client
from src.infrastructure.observability.logger import logger
class RateLimiterMiddleware(BaseHTTPMiddleware):
"""
Rate limiter middleware using Redis for distributed rate limiting
"""
def __init__(self, app, max_requests: int = 100, window_seconds: int = 900):
"""
Initialize rate limiter
Args:
app: FastAPI application
max_requests: Maximum number of requests per window
window_seconds: Time window in seconds (default: 15 minutes)
"""
super().__init__(app)
self.max_requests = max_requests
self.window_seconds = window_seconds
self.redis_prefix = f"{os.getenv('PROJECT_NAME', 'test_project')}:ratelimit:"
async def dispatch(self, request: Request, call_next: Callable):
# Skip rate limiting for health checks
if request.url.path in ["/health", "/health/live", "/health/ready"]:
return await call_next(request)
# Get client IP
client_ip = request.client.host if request.client else "unknown"
# Create Redis key
redis_key = f"{self.redis_prefix}{client_ip}"
try:
# Check if Redis is available
if redis_client and hasattr(redis_client, 'get_client'):
redis = redis_client.get_client()
if redis:
# Get current count
current_count = await redis.get(redis_key)
if current_count is None:
# First request in window
await redis.setex(redis_key, self.window_seconds, 1)
return await call_next(request)
current_count = int(current_count)
if current_count >= self.max_requests:
# Rate limit exceeded
ttl = await redis.ttl(redis_key)
logger.warning('Rate limit exceeded', extra={
'ip': client_ip,
'path': request.url.path,
'method': request.method,
'count': current_count
})
return JSONResponse(
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
content={
"success": False,
"error": "Too many requests from this IP, please try again later.",
"code": "RATE_LIMIT_EXCEEDED",
"retryAfter": ttl
},
headers={
"Retry-After": str(ttl),
"X-RateLimit-Limit": str(self.max_requests),
"X-RateLimit-Remaining": "0"
}
)
# Increment counter
await redis.incr(redis_key)
remaining = self.max_requests - current_count - 1
# Process request
response = await call_next(request)
# Add rate limit headers
response.headers["X-RateLimit-Limit"] = str(self.max_requests)
response.headers["X-RateLimit-Remaining"] = str(remaining)
return response
except Exception as e:
# If Redis fails, log and continue (fail open)
logger.error('Rate limiter error', extra={'error': str(e)})
# Fallback: process request without rate limiting if Redis unavailable
return await call_next(request)
def create_auth_rate_limiter(max_requests: int = 5, window_seconds: int = 900):
"""
Create a strict rate limiter for authentication endpoints
Args:
max_requests: Maximum requests per window (default: 5)
window_seconds: Time window in seconds (default: 15 minutes)
Returns:
RateLimiterMiddleware instance
"""
class AuthRateLimiterMiddleware(RateLimiterMiddleware):
def __init__(self, app):
super().__init__(app, max_requests=max_requests, window_seconds=window_seconds)
self.redis_prefix = f"{os.getenv('PROJECT_NAME', 'test_project')}:ratelimit:auth:"
return AuthRateLimiterMiddleware
def create_api_rate_limiter(max_requests: int = 100, window_seconds: int = 900):
"""
Create a configurable API rate limiter
Args:
max_requests: Maximum requests per window (default: 100)
window_seconds: Time window in seconds (default: 15 minutes)
Returns:
RateLimiterMiddleware class
"""
class ApiRateLimiterMiddleware(RateLimiterMiddleware):
def __init__(self, app):
super().__init__(app, max_requests=max_requests, window_seconds=window_seconds)
self.redis_prefix = f"{os.getenv('PROJECT_NAME', 'test_project')}:ratelimit:api:"
return ApiRateLimiterMiddleware

View File

@ -0,0 +1,60 @@
from fastapi import Request
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import Response
from src.infrastructure.observability.logger import logger
import uuid
import time
"""
Request Logging Middleware
Structured logging for all HTTP requests with correlation IDs
"""
class RequestLoggerMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
# Generate correlation ID if not present
correlation_id = request.headers.get('x-correlation-id') or str(uuid.uuid4())
# Log request start
start_time = time.time()
request_log = {
'correlationId': correlation_id,
'method': request.method,
'path': request.url.path,
'query': str(request.query_params),
'ip': request.client.host if request.client else None,
'userAgent': request.headers.get('user-agent'),
'userId': getattr(request.state, 'user_id', None),
'tenantId': request.headers.get('x-tenant-id') or getattr(request.state, 'tenant_id', None)
}
logger.info('HTTP Request', extra=request_log)
# Process request
response = await call_next(request)
# Log response
duration = (time.time() - start_time) * 1000 # Convert to milliseconds
response_log = {
'correlationId': correlation_id,
'method': request.method,
'path': request.url.path,
'statusCode': response.status_code,
'duration': f'{duration:.2f}ms',
'ip': request.client.host if request.client else None,
'userId': getattr(request.state, 'user_id', None),
'tenantId': request.headers.get('x-tenant-id') or getattr(request.state, 'tenant_id', None)
}
# Add correlation ID to response header
response.headers['x-correlation-id'] = correlation_id
# Log at appropriate level based on status code
if response.status_code >= 500:
logger.error('HTTP Response', extra=response_log)
elif response.status_code >= 400:
logger.warning('HTTP Response', extra=response_log)
else:
logger.info('HTTP Response', extra=response_log)
return response

View File

@ -0,0 +1,87 @@
"""
Validation Middleware for Pydantic Schemas
Generic validation middleware factory that works with any Pydantic schema
"""
from fastapi import Request, HTTPException, status
from starlette.middleware.base import BaseHTTPMiddleware
from typing import Callable, Type, Any
from pydantic import BaseModel, ValidationError
import json
class ValidationMiddleware(BaseHTTPMiddleware):
"""
Generic validation middleware for FastAPI
Validates request data against Pydantic schemas
"""
async def dispatch(self, request: Request, call_next: Callable):
# Validation is typically handled by FastAPI's dependency injection
# This middleware provides a generic validation utility if needed
response = await call_next(request)
return response
def validate_request(schema: Type[BaseModel], source: str = "body") -> Callable:
"""
Create a validation dependency for FastAPI routes
Args:
schema: Pydantic schema class to validate against
source: Source of data to validate ('body', 'query', 'path', default: 'body')
Returns:
FastAPI dependency function
"""
async def validate_dependency(request: Request) -> BaseModel:
try:
# Get data from the specified source
if source == "query":
data = dict(request.query_params)
elif source == "path":
data = dict(request.path_params)
else: # body
body = await request.body()
if not body:
data = {}
else:
data = await request.json()
# Validate data against schema
validated_data = schema(**data)
return validated_data
except ValidationError as e:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"success": False,
"error": "Validation failed",
"details": [
{
"path": ".".join(str(loc) for loc in error["loc"]),
"message": error["msg"],
"type": error["type"]
}
for error in e.errors()
]
}
)
except json.JSONDecodeError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={
"success": False,
"error": "Invalid JSON",
"message": "Request body must be valid JSON"
}
)
except Exception as error:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={
"success": False,
"error": "Validation middleware error",
"message": str(error)
}
)
return validate_dependency

View File

@ -0,0 +1,66 @@
"""Migration for users
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'users',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('username', sa.String(100), nullable=False, unique=True),
sa.Column('email', sa.String(255), nullable=False, unique=True),
sa.Column('password_hash', sa.String(255), nullable=False),
sa.Column('first_name', sa.String(100), nullable=False),
sa.Column('last_name', sa.String(100), nullable=False),
sa.Column('specialty', sa.String(100), nullable=True),
sa.Column('npi', sa.String(10), nullable=True),
sa.Column('last_login_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_users_username',
'users',
['username'],
unique=True
)
op.create_index(
'idx_users_email',
'users',
['email'],
unique=True
)
op.create_index(
'idx_users_npi',
'users',
['npi'],
)
op.create_index(
'idx_users_role_is_active',
'users',
['role', 'is_active'],
)
def downgrade() -> None:
op.drop_index('idx_users_username', table_name='users')
op.drop_index('idx_users_email', table_name='users')
op.drop_index('idx_users_npi', table_name='users')
op.drop_index('idx_users_role_is_active', table_name='users')
op.drop_table('users')

View File

@ -0,0 +1,85 @@
"""Migration for patients
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'patients',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('mrn', sa.String(50), nullable=False, unique=True),
sa.Column('first_name', sa.String(100), nullable=False),
sa.Column('last_name', sa.String(100), nullable=False),
sa.Column('date_of_birth', sa.Date(), nullable=False),
sa.Column('gender', sa.String(255), nullable=False),
sa.Column('ssn', sa.String(11), nullable=True),
sa.Column('address_line1', sa.String(255), nullable=True),
sa.Column('address_line2', sa.String(255), nullable=True),
sa.Column('city', sa.String(100), nullable=True),
sa.Column('state', sa.String(2), nullable=True),
sa.Column('zip_code', sa.String(10), nullable=True),
sa.Column('phone', sa.String(20), nullable=True),
sa.Column('email', sa.String(255), nullable=True),
sa.Column('primary_payer_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('primary_insurance_member_id', sa.String(100), nullable=True),
sa.Column('secondary_payer_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('secondary_insurance_member_id', sa.String(100), nullable=True),
sa.Column('emr_patient_id', sa.String(100), nullable=True),
sa.Column('primary_payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('secondary_payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_patients_mrn',
'patients',
['mrn'],
unique=True
)
op.create_index(
'idx_patients_last_name_first_name',
'patients',
['last_name', 'first_name'],
)
op.create_index(
'idx_patients_date_of_birth',
'patients',
['date_of_birth'],
)
op.create_index(
'idx_patients_primary_payer_id',
'patients',
['primary_payer_id'],
)
op.create_index(
'idx_patients_emr_patient_id',
'patients',
['emr_patient_id'],
)
def downgrade() -> None:
op.drop_index('idx_patients_mrn', table_name='patients')
op.drop_index('idx_patients_last_name_first_name', table_name='patients')
op.drop_index('idx_patients_date_of_birth', table_name='patients')
op.drop_index('idx_patients_primary_payer_id', table_name='patients')
op.drop_index('idx_patients_emr_patient_id', table_name='patients')
op.drop_table('patients')

View File

@ -0,0 +1,89 @@
"""Migration for audio_recordings
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'audio_recordings',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('patient_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('encounter_id', sa.String(100), nullable=True),
sa.Column('file_path', sa.String(500), nullable=False),
sa.Column('file_name', sa.String(255), nullable=False),
sa.Column('file_format', sa.String(255), nullable=False),
sa.Column('file_size_bytes', sa.BigInteger(), nullable=False),
sa.Column('duration_seconds', sa.Integer(), nullable=False),
sa.Column('recording_date', sa.DateTime(timezone=True), nullable=False),
sa.Column('encryption_key_id', sa.String(100), nullable=True),
sa.Column('device_info', postgresql.JSONB(), nullable=True),
sa.Column('noise_level', sa.String(255), nullable=True),
sa.Column('template_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('is_template_based', sa.Boolean(), nullable=False),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('patient_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('template_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_audio_recordings_user_id',
'audio_recordings',
['user_id'],
)
op.create_index(
'idx_audio_recordings_patient_id',
'audio_recordings',
['patient_id'],
)
op.create_index(
'idx_audio_recordings_status',
'audio_recordings',
['status'],
)
op.create_index(
'idx_audio_recordings_recording_date',
'audio_recordings',
['recording_date'],
)
op.create_index(
'idx_audio_recordings_encounter_id',
'audio_recordings',
['encounter_id'],
)
op.create_index(
'idx_audio_recordings_template_id',
'audio_recordings',
['template_id'],
)
def downgrade() -> None:
op.drop_index('idx_audio_recordings_user_id', table_name='audio_recordings')
op.drop_index('idx_audio_recordings_patient_id', table_name='audio_recordings')
op.drop_index('idx_audio_recordings_status', table_name='audio_recordings')
op.drop_index('idx_audio_recordings_recording_date', table_name='audio_recordings')
op.drop_index('idx_audio_recordings_encounter_id', table_name='audio_recordings')
op.drop_index('idx_audio_recordings_template_id', table_name='audio_recordings')
op.drop_table('audio_recordings')

View File

@ -0,0 +1,72 @@
"""Migration for transcripts
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'transcripts',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('audio_recording_id', postgresql.UUID(as_uuid=True), nullable=False, unique=True),
sa.Column('raw_text', sa.Text(), nullable=False),
sa.Column('corrected_text', sa.Text(), nullable=True),
sa.Column('word_error_rate', sa.Numeric(10, 2), nullable=True),
sa.Column('confidence_score', sa.Numeric(10, 2), nullable=False),
sa.Column('timestamps', postgresql.JSONB(), nullable=True),
sa.Column('low_confidence_segments', postgresql.JSONB(), nullable=True),
sa.Column('processing_time_seconds', sa.Integer(), nullable=True),
sa.Column('model_version', sa.String(50), nullable=False),
sa.Column('is_manually_corrected', sa.Boolean(), nullable=False),
sa.Column('corrected_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('corrected_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('audio_recording_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('corrected_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_transcripts_audio_recording_id',
'transcripts',
['audio_recording_id'],
unique=True
)
op.create_index(
'idx_transcripts_status',
'transcripts',
['status'],
)
op.create_index(
'idx_transcripts_confidence_score',
'transcripts',
['confidence_score'],
)
op.create_index(
'idx_transcripts_created_at',
'transcripts',
['created_at'],
)
def downgrade() -> None:
op.drop_index('idx_transcripts_audio_recording_id', table_name='transcripts')
op.drop_index('idx_transcripts_status', table_name='transcripts')
op.drop_index('idx_transcripts_confidence_score', table_name='transcripts')
op.drop_index('idx_transcripts_created_at', table_name='transcripts')
op.drop_table('transcripts')

View File

@ -0,0 +1,74 @@
"""Migration for clinical_entities
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'clinical_entities',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('transcript_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('entity_type', sa.String(255), nullable=False),
sa.Column('entity_text', sa.String(500), nullable=False),
sa.Column('normalized_text', sa.String(500), nullable=True),
sa.Column('confidence_score', sa.Numeric(10, 2), nullable=False),
sa.Column('start_position', sa.Integer(), nullable=True),
sa.Column('end_position', sa.Integer(), nullable=True),
sa.Column('context', sa.Text(), nullable=True),
sa.Column('metadata', postgresql.JSONB(), nullable=True),
sa.Column('is_negated', sa.Boolean(), nullable=False),
sa.Column('is_historical', sa.Boolean(), nullable=False),
sa.Column('is_verified', sa.Boolean(), nullable=False),
sa.Column('verified_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('verified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('transcript_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('verified_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_clinical_entities_transcript_id',
'clinical_entities',
['transcript_id'],
)
op.create_index(
'idx_clinical_entities_entity_type',
'clinical_entities',
['entity_type'],
)
op.create_index(
'idx_clinical_entities_confidence_score',
'clinical_entities',
['confidence_score'],
)
op.create_index(
'idx_clinical_entities_is_verified',
'clinical_entities',
['is_verified'],
)
def downgrade() -> None:
op.drop_index('idx_clinical_entities_transcript_id', table_name='clinical_entities')
op.drop_index('idx_clinical_entities_entity_type', table_name='clinical_entities')
op.drop_index('idx_clinical_entities_confidence_score', table_name='clinical_entities')
op.drop_index('idx_clinical_entities_is_verified', table_name='clinical_entities')
op.drop_table('clinical_entities')

View File

@ -0,0 +1,66 @@
"""Migration for icd10_codes
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'icd10_codes',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('code', sa.String(10), nullable=False, unique=True),
sa.Column('description', sa.String(500), nullable=False),
sa.Column('short_description', sa.String(100), nullable=True),
sa.Column('category', sa.String(100), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=True),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('version', sa.String(20), nullable=False),
sa.Column('synonyms', postgresql.JSONB(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_icd10_codes_code',
'icd10_codes',
['code'],
unique=True
)
op.create_index(
'idx_icd10_codes_is_billable_is_active',
'icd10_codes',
['is_billable', 'is_active'],
)
op.create_index(
'idx_icd10_codes_category',
'icd10_codes',
['category'],
)
op.create_index(
'idx_icd10_codes_description',
'icd10_codes',
['description'],
)
def downgrade() -> None:
op.drop_index('idx_icd10_codes_code', table_name='icd10_codes')
op.drop_index('idx_icd10_codes_is_billable_is_active', table_name='icd10_codes')
op.drop_index('idx_icd10_codes_category', table_name='icd10_codes')
op.drop_index('idx_icd10_codes_description', table_name='icd10_codes')
op.drop_table('icd10_codes')

View File

@ -0,0 +1,78 @@
"""Migration for cpt_codes
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'cpt_codes',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('code', sa.String(5), nullable=False, unique=True),
sa.Column('description', sa.String(1000), nullable=False),
sa.Column('short_description', sa.String(100), nullable=True),
sa.Column('category', sa.String(100), nullable=True),
sa.Column('specialty', sa.String(100), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=True),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('version', sa.String(20), nullable=False),
sa.Column('rvu_work', sa.Numeric(10, 2), nullable=True),
sa.Column('rvu_facility', sa.Numeric(10, 2), nullable=True),
sa.Column('rvu_non_facility', sa.Numeric(10, 2), nullable=True),
sa.Column('global_period', sa.String(10), nullable=True),
sa.Column('synonyms', postgresql.JSONB(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_cpt_codes_code',
'cpt_codes',
['code'],
unique=True
)
op.create_index(
'idx_cpt_codes_is_active',
'cpt_codes',
['is_active'],
)
op.create_index(
'idx_cpt_codes_category',
'cpt_codes',
['category'],
)
op.create_index(
'idx_cpt_codes_specialty',
'cpt_codes',
['specialty'],
)
op.create_index(
'idx_cpt_codes_description',
'cpt_codes',
['description'],
)
def downgrade() -> None:
op.drop_index('idx_cpt_codes_code', table_name='cpt_codes')
op.drop_index('idx_cpt_codes_is_active', table_name='cpt_codes')
op.drop_index('idx_cpt_codes_category', table_name='cpt_codes')
op.drop_index('idx_cpt_codes_specialty', table_name='cpt_codes')
op.drop_index('idx_cpt_codes_description', table_name='cpt_codes')
op.drop_table('cpt_codes')

View File

@ -0,0 +1,59 @@
"""Migration for cpt_modifiers
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'cpt_modifiers',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('modifier', sa.String(2), nullable=False, unique=True),
sa.Column('description', sa.String(500), nullable=False),
sa.Column('short_description', sa.String(100), nullable=True),
sa.Column('category', sa.String(100), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=True),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('reimbursement_impact', sa.Numeric(10, 2), nullable=True),
sa.Column('usage_rules', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_cpt_modifiers_modifier',
'cpt_modifiers',
['modifier'],
unique=True
)
op.create_index(
'idx_cpt_modifiers_is_active',
'cpt_modifiers',
['is_active'],
)
op.create_index(
'idx_cpt_modifiers_category',
'cpt_modifiers',
['category'],
)
def downgrade() -> None:
op.drop_index('idx_cpt_modifiers_modifier', table_name='cpt_modifiers')
op.drop_index('idx_cpt_modifiers_is_active', table_name='cpt_modifiers')
op.drop_index('idx_cpt_modifiers_category', table_name='cpt_modifiers')
op.drop_table('cpt_modifiers')

View File

@ -0,0 +1,72 @@
"""Migration for payers
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'payers',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('payer_name', sa.String(255), nullable=False),
sa.Column('payer_id', sa.String(50), nullable=False, unique=True),
sa.Column('payer_type', sa.String(255), nullable=False),
sa.Column('address_line1', sa.String(255), nullable=True),
sa.Column('address_line2', sa.String(255), nullable=True),
sa.Column('city', sa.String(100), nullable=True),
sa.Column('state', sa.String(2), nullable=True),
sa.Column('zip_code', sa.String(10), nullable=True),
sa.Column('phone', sa.String(20), nullable=True),
sa.Column('fax', sa.String(20), nullable=True),
sa.Column('email', sa.String(255), nullable=True),
sa.Column('website', sa.String(255), nullable=True),
sa.Column('priority_rank', sa.Integer(), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_payers_payer_id',
'payers',
['payer_id'],
unique=True
)
op.create_index(
'idx_payers_payer_name',
'payers',
['payer_name'],
)
op.create_index(
'idx_payers_payer_type',
'payers',
['payer_type'],
)
op.create_index(
'idx_payers_is_active_priority_rank',
'payers',
['is_active', 'priority_rank'],
)
def downgrade() -> None:
op.drop_index('idx_payers_payer_id', table_name='payers')
op.drop_index('idx_payers_payer_name', table_name='payers')
op.drop_index('idx_payers_payer_type', table_name='payers')
op.drop_index('idx_payers_is_active_priority_rank', table_name='payers')
op.drop_table('payers')

View File

@ -0,0 +1,74 @@
"""Migration for payer_rules
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'payer_rules',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('rule_name', sa.String(255), nullable=False),
sa.Column('rule_type', sa.String(255), nullable=False),
sa.Column('rule_description', sa.Text(), nullable=False),
sa.Column('rule_logic', postgresql.JSONB(), nullable=False),
sa.Column('affected_cpt_codes', postgresql.JSONB(), nullable=True),
sa.Column('affected_icd10_codes', postgresql.JSONB(), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=False),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('updated_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('denial_count', sa.Integer(), nullable=False),
sa.Column('last_denial_date', sa.DateTime(timezone=True), nullable=True),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('updated_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_payer_rules_payer_id',
'payer_rules',
['payer_id'],
)
op.create_index(
'idx_payer_rules_rule_type',
'payer_rules',
['rule_type'],
)
op.create_index(
'idx_payer_rules_is_active_effective_date',
'payer_rules',
['is_active', 'effective_date'],
)
op.create_index(
'idx_payer_rules_severity',
'payer_rules',
['severity'],
)
def downgrade() -> None:
op.drop_index('idx_payer_rules_payer_id', table_name='payer_rules')
op.drop_index('idx_payer_rules_rule_type', table_name='payer_rules')
op.drop_index('idx_payer_rules_is_active_effective_date', table_name='payer_rules')
op.drop_index('idx_payer_rules_severity', table_name='payer_rules')
op.drop_table('payer_rules')

View File

@ -0,0 +1,58 @@
"""Migration for ncci_edits
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'ncci_edits',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('column1_code', sa.String(5), nullable=False),
sa.Column('column2_code', sa.String(5), nullable=False),
sa.Column('edit_type', sa.String(255), nullable=False),
sa.Column('modifier_indicator', sa.String(1), nullable=False),
sa.Column('effective_date', sa.Date(), nullable=False),
sa.Column('deletion_date', sa.Date(), nullable=True),
sa.Column('edit_rationale', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_ncci_edits_column1_code_column2_code',
'ncci_edits',
['column1_code', 'column2_code'],
unique=True
)
op.create_index(
'idx_ncci_edits_edit_type',
'ncci_edits',
['edit_type'],
)
op.create_index(
'idx_ncci_edits_is_active_effective_date',
'ncci_edits',
['is_active', 'effective_date'],
)
def downgrade() -> None:
op.drop_index('idx_ncci_edits_column1_code_column2_code', table_name='ncci_edits')
op.drop_index('idx_ncci_edits_edit_type', table_name='ncci_edits')
op.drop_index('idx_ncci_edits_is_active_effective_date', table_name='ncci_edits')
op.drop_table('ncci_edits')

View File

@ -0,0 +1,71 @@
"""Migration for lcds
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'lcds',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('lcd_id', sa.String(20), nullable=False, unique=True),
sa.Column('title', sa.String(500), nullable=False),
sa.Column('contractor_name', sa.String(255), nullable=False),
sa.Column('contractor_number', sa.String(20), nullable=False),
sa.Column('jurisdiction', sa.String(10), nullable=False),
sa.Column('coverage_description', sa.Text(), nullable=False),
sa.Column('indications_and_limitations', sa.Text(), nullable=True),
sa.Column('covered_cpt_codes', postgresql.JSONB(), nullable=True),
sa.Column('covered_icd10_codes', postgresql.JSONB(), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=False),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('last_review_date', sa.Date(), nullable=True),
sa.Column('document_url', sa.String(500), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_lcds_lcd_id',
'lcds',
['lcd_id'],
unique=True
)
op.create_index(
'idx_lcds_contractor_number',
'lcds',
['contractor_number'],
)
op.create_index(
'idx_lcds_jurisdiction',
'lcds',
['jurisdiction'],
)
op.create_index(
'idx_lcds_is_active_effective_date',
'lcds',
['is_active', 'effective_date'],
)
def downgrade() -> None:
op.drop_index('idx_lcds_lcd_id', table_name='lcds')
op.drop_index('idx_lcds_contractor_number', table_name='lcds')
op.drop_index('idx_lcds_jurisdiction', table_name='lcds')
op.drop_index('idx_lcds_is_active_effective_date', table_name='lcds')
op.drop_table('lcds')

View File

@ -0,0 +1,54 @@
"""Migration for ncds
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'ncds',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('ncd_id', sa.String(20), nullable=False, unique=True),
sa.Column('title', sa.String(500), nullable=False),
sa.Column('coverage_description', sa.Text(), nullable=False),
sa.Column('indications_and_limitations', sa.Text(), nullable=True),
sa.Column('covered_cpt_codes', postgresql.JSONB(), nullable=True),
sa.Column('covered_icd10_codes', postgresql.JSONB(), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=False),
sa.Column('termination_date', sa.Date(), nullable=True),
sa.Column('last_review_date', sa.Date(), nullable=True),
sa.Column('document_url', sa.String(500), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_ncds_ncd_id',
'ncds',
['ncd_id'],
unique=True
)
op.create_index(
'idx_ncds_is_active_effective_date',
'ncds',
['is_active', 'effective_date'],
)
def downgrade() -> None:
op.drop_index('idx_ncds_ncd_id', table_name='ncds')
op.drop_index('idx_ncds_is_active_effective_date', table_name='ncds')
op.drop_table('ncds')

View File

@ -0,0 +1,71 @@
"""Migration for procedure_templates
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'procedure_templates',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('template_name', sa.String(255), nullable=False),
sa.Column('specialty', sa.String(100), nullable=False),
sa.Column('procedure_type', sa.String(100), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('default_cpt_codes', postgresql.JSONB(), nullable=False),
sa.Column('default_icd10_codes', postgresql.JSONB(), nullable=False),
sa.Column('default_modifiers', postgresql.JSONB(), nullable=True),
sa.Column('medical_necessity_template', sa.Text(), nullable=True),
sa.Column('documentation_requirements', sa.Text(), nullable=True),
sa.Column('mdm_level', sa.String(255), nullable=True),
sa.Column('usage_count', sa.Integer(), nullable=False),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_procedure_templates_template_name',
'procedure_templates',
['template_name'],
)
op.create_index(
'idx_procedure_templates_specialty',
'procedure_templates',
['specialty'],
)
op.create_index(
'idx_procedure_templates_procedure_type',
'procedure_templates',
['procedure_type'],
)
op.create_index(
'idx_procedure_templates_is_active_usage_count',
'procedure_templates',
['is_active', 'usage_count'],
)
def downgrade() -> None:
op.drop_index('idx_procedure_templates_template_name', table_name='procedure_templates')
op.drop_index('idx_procedure_templates_specialty', table_name='procedure_templates')
op.drop_index('idx_procedure_templates_procedure_type', table_name='procedure_templates')
op.drop_index('idx_procedure_templates_is_active_usage_count', table_name='procedure_templates')
op.drop_table('procedure_templates')

View File

@ -0,0 +1,129 @@
"""Migration for claims
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'claims',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('claim_number', sa.String(50), nullable=False, unique=True),
sa.Column('patient_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('audio_recording_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('transcript_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('encounter_id', sa.String(100), nullable=True),
sa.Column('service_date', sa.Date(), nullable=False),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('diagnosis_codes', postgresql.JSONB(), nullable=False),
sa.Column('procedure_codes', postgresql.JSONB(), nullable=False),
sa.Column('modifiers', postgresql.JSONB(), nullable=True),
sa.Column('mdm_level', sa.String(255), nullable=True),
sa.Column('medical_necessity_justification', sa.Text(), nullable=True),
sa.Column('total_charge_amount', sa.Numeric(10, 2), nullable=False),
sa.Column('expected_reimbursement', sa.Numeric(10, 2), nullable=True),
sa.Column('actual_reimbursement', sa.Numeric(10, 2), nullable=True),
sa.Column('scrubbing_results', postgresql.JSONB(), nullable=True),
sa.Column('scrubbing_failures', postgresql.JSONB(), nullable=True),
sa.Column('corrective_actions', postgresql.JSONB(), nullable=True),
sa.Column('confidence_score', sa.Numeric(10, 2), nullable=True),
sa.Column('is_template_based', sa.Boolean(), nullable=False),
sa.Column('template_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('reviewed_by_user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('reviewed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('submitted_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('paid_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('denial_reason', sa.Text(), nullable=True),
sa.Column('denial_code', sa.String(50), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('patient_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('audio_recording_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('transcript_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('reviewed_by_user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('template_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_claims_claim_number',
'claims',
['claim_number'],
unique=True
)
op.create_index(
'idx_claims_patient_id',
'claims',
['patient_id'],
)
op.create_index(
'idx_claims_payer_id',
'claims',
['payer_id'],
)
op.create_index(
'idx_claims_status',
'claims',
['status'],
)
op.create_index(
'idx_claims_service_date',
'claims',
['service_date'],
)
op.create_index(
'idx_claims_created_by_user_id',
'claims',
['created_by_user_id'],
)
op.create_index(
'idx_claims_scrubbing_status',
'claims',
['scrubbing_status'],
)
op.create_index(
'idx_claims_encounter_id',
'claims',
['encounter_id'],
)
op.create_index(
'idx_claims_created_at',
'claims',
['created_at'],
)
def downgrade() -> None:
op.drop_index('idx_claims_claim_number', table_name='claims')
op.drop_index('idx_claims_patient_id', table_name='claims')
op.drop_index('idx_claims_payer_id', table_name='claims')
op.drop_index('idx_claims_status', table_name='claims')
op.drop_index('idx_claims_service_date', table_name='claims')
op.drop_index('idx_claims_created_by_user_id', table_name='claims')
op.drop_index('idx_claims_scrubbing_status', table_name='claims')
op.drop_index('idx_claims_encounter_id', table_name='claims')
op.drop_index('idx_claims_created_at', table_name='claims')
op.drop_table('claims')

View File

@ -0,0 +1,92 @@
"""Migration for claim_reviews
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'claim_reviews',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('reviewer_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('review_status', sa.String(50), nullable=False),
sa.Column('review_type', sa.String(50), nullable=False),
sa.Column('confidence_threshold_triggered', sa.Boolean(), nullable=True),
sa.Column('original_icd10_codes', postgresql.JSONB(), nullable=True),
sa.Column('original_cpt_codes', postgresql.JSONB(), nullable=True),
sa.Column('revised_icd10_codes', postgresql.JSONB(), nullable=True),
sa.Column('revised_cpt_codes', postgresql.JSONB(), nullable=True),
sa.Column('reviewer_notes', sa.Text(), nullable=True),
sa.Column('flagged_issues', postgresql.JSONB(), nullable=True),
sa.Column('corrective_actions', postgresql.JSONB(), nullable=True),
sa.Column('review_duration_seconds', sa.Integer(), nullable=True),
sa.Column('escalation_reason', sa.Text(), nullable=True),
sa.Column('escalated_to_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('escalated_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('reviewed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('reviewer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('escalated_to_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_claim_reviews_claim_id',
'claim_reviews',
['claim_id'],
)
op.create_index(
'idx_claim_reviews_reviewer_id',
'claim_reviews',
['reviewer_id'],
)
op.create_index(
'idx_claim_reviews_review_status',
'claim_reviews',
['review_status'],
)
op.create_index(
'idx_claim_reviews_created_at',
'claim_reviews',
['created_at'],
)
op.create_index(
'idx_claim_reviews_review_type',
'claim_reviews',
['review_type'],
)
op.create_index(
'idx_claim_reviews_escalated_to_id',
'claim_reviews',
['escalated_to_id'],
)
def downgrade() -> None:
op.drop_index('idx_claim_reviews_claim_id', table_name='claim_reviews')
op.drop_index('idx_claim_reviews_reviewer_id', table_name='claim_reviews')
op.drop_index('idx_claim_reviews_review_status', table_name='claim_reviews')
op.drop_index('idx_claim_reviews_created_at', table_name='claim_reviews')
op.drop_index('idx_claim_reviews_review_type', table_name='claim_reviews')
op.drop_index('idx_claim_reviews_escalated_to_id', table_name='claim_reviews')
op.drop_table('claim_reviews')

View File

@ -0,0 +1,97 @@
"""Migration for audit_logs
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'audit_logs',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('entity_type', sa.String(100), nullable=False),
sa.Column('entity_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('action', sa.String(50), nullable=False),
sa.Column('action_category', sa.String(50), nullable=True),
sa.Column('old_values', postgresql.JSONB(), nullable=True),
sa.Column('new_values', postgresql.JSONB(), nullable=True),
sa.Column('changes_summary', sa.Text(), nullable=True),
sa.Column('ip_address', sa.String(45), nullable=True),
sa.Column('user_agent', sa.Text(), nullable=True),
sa.Column('session_id', sa.String(255), nullable=True),
sa.Column('request_id', sa.String(255), nullable=True),
sa.Column('status', sa.String(20), nullable=False),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('metadata', postgresql.JSONB(), nullable=True),
sa.Column('phi_accessed', sa.Boolean(), nullable=True),
sa.Column('compliance_flag', sa.Boolean(), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_audit_logs_user_id',
'audit_logs',
['user_id'],
)
op.create_index(
'idx_audit_logs_entity_type_entity_id',
'audit_logs',
['entity_type', 'entity_id'],
)
op.create_index(
'idx_audit_logs_action',
'audit_logs',
['action'],
)
op.create_index(
'idx_audit_logs_created_at',
'audit_logs',
['created_at'],
)
op.create_index(
'idx_audit_logs_action_category',
'audit_logs',
['action_category'],
)
op.create_index(
'idx_audit_logs_phi_accessed',
'audit_logs',
['phi_accessed'],
)
op.create_index(
'idx_audit_logs_compliance_flag',
'audit_logs',
['compliance_flag'],
)
def downgrade() -> None:
op.drop_index('idx_audit_logs_user_id', table_name='audit_logs')
op.drop_index('idx_audit_logs_entity_type_entity_id', table_name='audit_logs')
op.drop_index('idx_audit_logs_action', table_name='audit_logs')
op.drop_index('idx_audit_logs_created_at', table_name='audit_logs')
op.drop_index('idx_audit_logs_action_category', table_name='audit_logs')
op.drop_index('idx_audit_logs_phi_accessed', table_name='audit_logs')
op.drop_index('idx_audit_logs_compliance_flag', table_name='audit_logs')
op.drop_table('audit_logs')

View File

@ -0,0 +1,105 @@
"""Migration for denial_patterns
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'denial_patterns',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('payer_name', sa.String(255), nullable=False),
sa.Column('denial_code', sa.String(50), nullable=False),
sa.Column('denial_reason', sa.Text(), nullable=False),
sa.Column('denial_category', sa.String(100), nullable=True),
sa.Column('icd10_code', sa.String(20), nullable=True),
sa.Column('cpt_code', sa.String(20), nullable=True),
sa.Column('modifier', sa.String(10), nullable=True),
sa.Column('procedure_type', sa.String(100), nullable=True),
sa.Column('specialty', sa.String(100), nullable=True),
sa.Column('total_denied_amount', sa.Numeric(10, 2), nullable=True),
sa.Column('first_occurrence_date', sa.Date(), nullable=False),
sa.Column('last_occurrence_date', sa.Date(), nullable=False),
sa.Column('risk_score', sa.Numeric(10, 2), nullable=True),
sa.Column('resolution_strategy', sa.Text(), nullable=True),
sa.Column('preventive_actions', postgresql.JSONB(), nullable=True),
sa.Column('related_lcd_ncd', postgresql.JSONB(), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_denial_patterns_payer_id',
'denial_patterns',
['payer_id'],
)
op.create_index(
'idx_denial_patterns_denial_code',
'denial_patterns',
['denial_code'],
)
op.create_index(
'idx_denial_patterns_cpt_code',
'denial_patterns',
['cpt_code'],
)
op.create_index(
'idx_denial_patterns_icd10_code',
'denial_patterns',
['icd10_code'],
)
op.create_index(
'idx_denial_patterns_specialty',
'denial_patterns',
['specialty'],
)
op.create_index(
'idx_denial_patterns_risk_score',
'denial_patterns',
['risk_score'],
)
op.create_index(
'idx_denial_patterns_is_active',
'denial_patterns',
['is_active'],
)
op.create_index(
'idx_denial_patterns_last_occurrence_date',
'denial_patterns',
['last_occurrence_date'],
)
def downgrade() -> None:
op.drop_index('idx_denial_patterns_payer_id', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_denial_code', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_cpt_code', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_icd10_code', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_specialty', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_risk_score', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_is_active', table_name='denial_patterns')
op.drop_index('idx_denial_patterns_last_occurrence_date', table_name='denial_patterns')
op.drop_table('denial_patterns')

View File

@ -0,0 +1,92 @@
"""Migration for emr_integrations
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'emr_integrations',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('organization_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('emr_system', sa.String(100), nullable=False),
sa.Column('emr_version', sa.String(50), nullable=True),
sa.Column('integration_type', sa.String(50), nullable=False),
sa.Column('fhir_base_url', sa.String(500), nullable=True),
sa.Column('api_endpoint', sa.String(500), nullable=True),
sa.Column('auth_type', sa.String(50), nullable=False),
sa.Column('client_id', sa.String(255), nullable=True),
sa.Column('client_secret_encrypted', sa.Text(), nullable=True),
sa.Column('api_key_encrypted', sa.Text(), nullable=True),
sa.Column('token_url', sa.String(500), nullable=True),
sa.Column('scopes', postgresql.JSONB(), nullable=True),
sa.Column('approval_status', sa.String(50), nullable=True),
sa.Column('approval_date', sa.Date(), nullable=True),
sa.Column('epic_approval_months_estimate', sa.Integer(), nullable=True),
sa.Column('data_mappings', postgresql.JSONB(), nullable=True),
sa.Column('supported_resources', postgresql.JSONB(), nullable=True),
sa.Column('last_sync_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('last_sync_status', sa.String(50), nullable=True),
sa.Column('last_error_message', sa.Text(), nullable=True),
sa.Column('retry_count', sa.Integer(), nullable=True),
sa.Column('rate_limit_per_minute', sa.Integer(), nullable=True),
sa.Column('use_mock_data', sa.Boolean(), nullable=True),
sa.Column('configuration_notes', sa.Text(), nullable=True),
sa.Column('created_by_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('organization_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_by_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_emr_integrations_organization_id',
'emr_integrations',
['organization_id'],
)
op.create_index(
'idx_emr_integrations_emr_system',
'emr_integrations',
['emr_system'],
)
op.create_index(
'idx_emr_integrations_connection_status',
'emr_integrations',
['connection_status'],
)
op.create_index(
'idx_emr_integrations_approval_status',
'emr_integrations',
['approval_status'],
)
op.create_index(
'idx_emr_integrations_last_sync_at',
'emr_integrations',
['last_sync_at'],
)
def downgrade() -> None:
op.drop_index('idx_emr_integrations_organization_id', table_name='emr_integrations')
op.drop_index('idx_emr_integrations_emr_system', table_name='emr_integrations')
op.drop_index('idx_emr_integrations_connection_status', table_name='emr_integrations')
op.drop_index('idx_emr_integrations_approval_status', table_name='emr_integrations')
op.drop_index('idx_emr_integrations_last_sync_at', table_name='emr_integrations')
op.drop_table('emr_integrations')

View File

@ -0,0 +1,118 @@
"""Migration for rag_documents
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'rag_documents',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('document_type', sa.String(100), nullable=False),
sa.Column('title', sa.String(500), nullable=False),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('payer_name', sa.String(255), nullable=True),
sa.Column('specialty', sa.String(100), nullable=True),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('content_hash', sa.String(64), nullable=True),
sa.Column('embedding_vector', sa.String(255), nullable=True),
sa.Column('chunk_index', sa.Integer(), nullable=True),
sa.Column('parent_document_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('source_url', sa.String(1000), nullable=True),
sa.Column('source_file_path', sa.String(1000), nullable=True),
sa.Column('effective_date', sa.Date(), nullable=True),
sa.Column('expiration_date', sa.Date(), nullable=True),
sa.Column('version', sa.String(50), nullable=True),
sa.Column('is_stale', sa.Boolean(), nullable=True),
sa.Column('relevance_score', sa.Numeric(10, 2), nullable=True),
sa.Column('usage_count', sa.Integer(), nullable=True),
sa.Column('last_used_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('metadata', postgresql.JSONB(), nullable=True),
sa.Column('tags', postgresql.JSONB(), nullable=True),
sa.Column('uploaded_by_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('payer_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('parent_document_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('uploaded_by_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_rag_documents_document_type',
'rag_documents',
['document_type'],
)
op.create_index(
'idx_rag_documents_payer_id',
'rag_documents',
['payer_id'],
)
op.create_index(
'idx_rag_documents_specialty',
'rag_documents',
['specialty'],
)
op.create_index(
'idx_rag_documents_is_active',
'rag_documents',
['is_active'],
)
op.create_index(
'idx_rag_documents_is_stale',
'rag_documents',
['is_stale'],
)
op.create_index(
'idx_rag_documents_effective_date',
'rag_documents',
['effective_date'],
)
op.create_index(
'idx_rag_documents_expiration_date',
'rag_documents',
['expiration_date'],
)
op.create_index(
'idx_rag_documents_parent_document_id',
'rag_documents',
['parent_document_id'],
)
op.create_index(
'idx_rag_documents_content_hash',
'rag_documents',
['content_hash'],
)
def downgrade() -> None:
op.drop_index('idx_rag_documents_document_type', table_name='rag_documents')
op.drop_index('idx_rag_documents_payer_id', table_name='rag_documents')
op.drop_index('idx_rag_documents_specialty', table_name='rag_documents')
op.drop_index('idx_rag_documents_is_active', table_name='rag_documents')
op.drop_index('idx_rag_documents_is_stale', table_name='rag_documents')
op.drop_index('idx_rag_documents_effective_date', table_name='rag_documents')
op.drop_index('idx_rag_documents_expiration_date', table_name='rag_documents')
op.drop_index('idx_rag_documents_parent_document_id', table_name='rag_documents')
op.drop_index('idx_rag_documents_content_hash', table_name='rag_documents')
op.drop_table('rag_documents')

View File

@ -0,0 +1,104 @@
"""Migration for confidence_scores
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'confidence_scores',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('entity_type', sa.String(100), nullable=False),
sa.Column('entity_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('score', sa.Numeric(10, 2), nullable=False),
sa.Column('threshold_category', sa.String(50), nullable=False),
sa.Column('model_name', sa.String(100), nullable=False),
sa.Column('model_version', sa.String(50), nullable=True),
sa.Column('prediction_value', sa.Text(), nullable=True),
sa.Column('alternative_predictions', postgresql.JSONB(), nullable=True),
sa.Column('features_used', postgresql.JSONB(), nullable=True),
sa.Column('context_data', postgresql.JSONB(), nullable=True),
sa.Column('requires_review', sa.Boolean(), nullable=True),
sa.Column('review_reason', sa.Text(), nullable=True),
sa.Column('human_feedback', sa.String(50), nullable=True),
sa.Column('corrected_value', sa.Text(), nullable=True),
sa.Column('feedback_notes', sa.Text(), nullable=True),
sa.Column('processing_time_ms', sa.Integer(), nullable=True),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_confidence_scores_entity_type_entity_id',
'confidence_scores',
['entity_type', 'entity_id'],
)
op.create_index(
'idx_confidence_scores_claim_id',
'confidence_scores',
['claim_id'],
)
op.create_index(
'idx_confidence_scores_score',
'confidence_scores',
['score'],
)
op.create_index(
'idx_confidence_scores_threshold_category',
'confidence_scores',
['threshold_category'],
)
op.create_index(
'idx_confidence_scores_requires_review',
'confidence_scores',
['requires_review'],
)
op.create_index(
'idx_confidence_scores_human_feedback',
'confidence_scores',
['human_feedback'],
)
op.create_index(
'idx_confidence_scores_model_name',
'confidence_scores',
['model_name'],
)
op.create_index(
'idx_confidence_scores_created_at',
'confidence_scores',
['created_at'],
)
def downgrade() -> None:
op.drop_index('idx_confidence_scores_entity_type_entity_id', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_claim_id', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_score', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_threshold_category', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_requires_review', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_human_feedback', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_model_name', table_name='confidence_scores')
op.drop_index('idx_confidence_scores_created_at', table_name='confidence_scores')
op.drop_table('confidence_scores')

View File

@ -0,0 +1,105 @@
"""Migration for claim_scrub_results
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'claim_scrub_results',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True, nullable=False),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('scrub_status', sa.String(50), nullable=False),
sa.Column('overall_risk_level', sa.String(50), nullable=True),
sa.Column('total_checks', sa.Integer(), nullable=False),
sa.Column('passed_checks', sa.Integer(), nullable=False),
sa.Column('failed_checks', sa.Integer(), nullable=False),
sa.Column('warning_checks', sa.Integer(), nullable=True),
sa.Column('ncci_violations', postgresql.JSONB(), nullable=True),
sa.Column('lcd_violations', postgresql.JSONB(), nullable=True),
sa.Column('ncd_violations', postgresql.JSONB(), nullable=True),
sa.Column('payer_rule_violations', postgresql.JSONB(), nullable=True),
sa.Column('coding_errors', postgresql.JSONB(), nullable=True),
sa.Column('medical_necessity_issues', postgresql.JSONB(), nullable=True),
sa.Column('modifier_issues', postgresql.JSONB(), nullable=True),
sa.Column('bundling_issues', postgresql.JSONB(), nullable=True),
sa.Column('denial_risk_patterns', postgresql.JSONB(), nullable=True),
sa.Column('corrective_actions', postgresql.JSONB(), nullable=True),
sa.Column('suggested_codes', postgresql.JSONB(), nullable=True),
sa.Column('rag_documents_used', postgresql.JSONB(), nullable=True),
sa.Column('scrub_engine_version', sa.String(50), nullable=True),
sa.Column('processing_time_ms', sa.Integer(), nullable=True),
sa.Column('auto_fix_applied', sa.Boolean(), nullable=True),
sa.Column('auto_fix_details', postgresql.JSONB(), nullable=True),
sa.Column('requires_manual_review', sa.Boolean(), nullable=True),
sa.Column('review_priority', sa.String(20), nullable=True),
sa.Column('claim_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('.id'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), onupdate=sa.text('CURRENT_TIMESTAMP'), nullable=False),
)
op.create_index(
'idx_claim_scrub_results_claim_id',
'claim_scrub_results',
['claim_id'],
)
op.create_index(
'idx_claim_scrub_results_scrub_status',
'claim_scrub_results',
['scrub_status'],
)
op.create_index(
'idx_claim_scrub_results_overall_risk_level',
'claim_scrub_results',
['overall_risk_level'],
)
op.create_index(
'idx_claim_scrub_results_requires_manual_review',
'claim_scrub_results',
['requires_manual_review'],
)
op.create_index(
'idx_claim_scrub_results_review_priority',
'claim_scrub_results',
['review_priority'],
)
op.create_index(
'idx_claim_scrub_results_scrubbed_at',
'claim_scrub_results',
['scrubbed_at'],
)
op.create_index(
'idx_claim_scrub_results_created_at',
'claim_scrub_results',
['created_at'],
)
def downgrade() -> None:
op.drop_index('idx_claim_scrub_results_claim_id', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_scrub_status', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_overall_risk_level', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_requires_manual_review', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_review_priority', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_scrubbed_at', table_name='claim_scrub_results')
op.drop_index('idx_claim_scrub_results_created_at', table_name='claim_scrub_results')
op.drop_table('claim_scrub_results')

View File

@ -0,0 +1,331 @@
"""Migration for 023_add_foreign_keyses
Revision ID: auto
Revises: None
Create Date: auto
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'auto'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_foreign_key(
'fk_audio_recordings_user_id',
'audio_recordings',
'users',
['user_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_created_by_user_id',
'claims',
'users',
['created_by_user_id'],
['id'],
ondelete='RESTRICT',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_patients_primary_payer_id',
'patients',
'payers',
['primary_payer_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_patients_secondary_payer_id',
'patients',
'payers',
['secondary_payer_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_audio_recordings_patient_id',
'audio_recordings',
'patients',
['patient_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_patient_id',
'claims',
'patients',
['patient_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_audio_recordings_template_id',
'audio_recordings',
'procedure_templates',
['template_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_transcripts_audio_recording_id',
'transcripts',
'audio_recordings',
['audio_recording_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_transcripts_corrected_by_user_id',
'transcripts',
'users',
['corrected_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_clinical_entities_transcript_id',
'clinical_entities',
'transcripts',
['transcript_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_clinical_entities_verified_by_user_id',
'clinical_entities',
'users',
['verified_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_payer_rules_payer_id',
'payer_rules',
'payers',
['payer_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_payer_rules_created_by_user_id',
'payer_rules',
'users',
['created_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_payer_rules_updated_by_user_id',
'payer_rules',
'users',
['updated_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_procedure_templates_created_by_user_id',
'procedure_templates',
'users',
['created_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_audio_recording_id',
'claims',
'audio_recordings',
['audio_recording_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_transcript_id',
'claims',
'transcripts',
['transcript_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_payer_id',
'claims',
'payers',
['payer_id'],
['id'],
ondelete='RESTRICT',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_reviewed_by_user_id',
'claims',
'users',
['reviewed_by_user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claims_template_id',
'claims',
'procedure_templates',
['template_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claim_reviews_claim_id',
'claim_reviews',
'claims',
['claim_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claim_reviews_reviewer_id',
'claim_reviews',
'users',
['reviewer_id'],
['id'],
ondelete='RESTRICT',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claim_reviews_escalated_to_id',
'claim_reviews',
'users',
['escalated_to_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_audit_logs_user_id',
'audit_logs',
'users',
['user_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_denial_patterns_payer_id',
'denial_patterns',
'payers',
['payer_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_emr_integrations_created_by_id',
'emr_integrations',
'users',
['created_by_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_rag_documents_payer_id',
'rag_documents',
'payers',
['payer_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_rag_documents_parent_document_id',
'rag_documents',
'rag_documents',
['parent_document_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_rag_documents_uploaded_by_id',
'rag_documents',
'users',
['uploaded_by_id'],
['id'],
ondelete='SET NULL',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_confidence_scores_claim_id',
'confidence_scores',
'claims',
['claim_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
op.create_foreign_key(
'fk_claim_scrub_results_claim_id',
'claim_scrub_results',
'claims',
['claim_id'],
['id'],
ondelete='CASCADE',
onupdate='CASCADE'
)
def downgrade() -> None:
op.drop_constraint('fk_audio_recordings_user_id', 'audio_recordings', type_='foreignkey')
op.drop_constraint('fk_claims_created_by_user_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_patients_primary_payer_id', 'patients', type_='foreignkey')
op.drop_constraint('fk_patients_secondary_payer_id', 'patients', type_='foreignkey')
op.drop_constraint('fk_audio_recordings_patient_id', 'audio_recordings', type_='foreignkey')
op.drop_constraint('fk_claims_patient_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_audio_recordings_template_id', 'audio_recordings', type_='foreignkey')
op.drop_constraint('fk_transcripts_audio_recording_id', 'transcripts', type_='foreignkey')
op.drop_constraint('fk_transcripts_corrected_by_user_id', 'transcripts', type_='foreignkey')
op.drop_constraint('fk_clinical_entities_transcript_id', 'clinical_entities', type_='foreignkey')
op.drop_constraint('fk_clinical_entities_verified_by_user_id', 'clinical_entities', type_='foreignkey')
op.drop_constraint('fk_payer_rules_payer_id', 'payer_rules', type_='foreignkey')
op.drop_constraint('fk_payer_rules_created_by_user_id', 'payer_rules', type_='foreignkey')
op.drop_constraint('fk_payer_rules_updated_by_user_id', 'payer_rules', type_='foreignkey')
op.drop_constraint('fk_procedure_templates_created_by_user_id', 'procedure_templates', type_='foreignkey')
op.drop_constraint('fk_claims_audio_recording_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_claims_transcript_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_claims_payer_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_claims_reviewed_by_user_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_claims_template_id', 'claims', type_='foreignkey')
op.drop_constraint('fk_claim_reviews_claim_id', 'claim_reviews', type_='foreignkey')
op.drop_constraint('fk_claim_reviews_reviewer_id', 'claim_reviews', type_='foreignkey')
op.drop_constraint('fk_claim_reviews_escalated_to_id', 'claim_reviews', type_='foreignkey')
op.drop_constraint('fk_audit_logs_user_id', 'audit_logs', type_='foreignkey')
op.drop_constraint('fk_denial_patterns_payer_id', 'denial_patterns', type_='foreignkey')
op.drop_constraint('fk_emr_integrations_created_by_id', 'emr_integrations', type_='foreignkey')
op.drop_constraint('fk_rag_documents_payer_id', 'rag_documents', type_='foreignkey')
op.drop_constraint('fk_rag_documents_parent_document_id', 'rag_documents', type_='foreignkey')
op.drop_constraint('fk_rag_documents_uploaded_by_id', 'rag_documents', type_='foreignkey')
op.drop_constraint('fk_confidence_scores_claim_id', 'confidence_scores', type_='foreignkey')
op.drop_constraint('fk_claim_scrub_results_claim_id', 'claim_scrub_results', type_='foreignkey')

View File

@ -0,0 +1,38 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class AudioRecording(Base):
__tablename__ = 'audio_recordings'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
user_id = Column(UUID(as_uuid=True), nullable=False)
patient_id = Column(UUID(as_uuid=True), nullable=False)
encounter_id = Column(String(255), nullable=True)
file_path = Column(String(255), nullable=False)
file_name = Column(String(255), nullable=False)
file_format = Column(String(255), nullable=False)
file_size_bytes = Column(BigInteger, nullable=False)
duration_seconds = Column(Integer, nullable=False)
recording_date = Column(DateTime, nullable=False)
encryption_key_id = Column(String(255), nullable=True)
device_info = Column(JSON, nullable=True)
noise_level = Column(String(255), nullable=True)
template_id = Column(UUID(as_uuid=True), nullable=True)
is_template_based = Column(Boolean, nullable=False)
user_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=False)
user = relationship('User', back_populates='')
patient_id = Column(UUID(as_uuid=True), ForeignKey('patients.id'), nullable=False)
patient = relationship('Patient', back_populates='')
template_id = Column(UUID(as_uuid=True), ForeignKey('procedure_templates.id'), nullable=True)
procedureTemplate = relationship('ProcedureTemplate', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<AudioRecording(id={self.id})>'

View File

@ -0,0 +1,37 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class AuditLog(Base):
__tablename__ = 'audit_logs'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
user_id = Column(UUID(as_uuid=True), nullable=True)
entity_type = Column(String(255), nullable=False)
entity_id = Column(UUID(as_uuid=True), nullable=True)
action = Column(String(255), nullable=False)
action_category = Column(String(255), nullable=True)
old_values = Column(JSON, nullable=True)
new_values = Column(JSON, nullable=True)
changes_summary = Column(Text, nullable=True)
ip_address = Column(String(255), nullable=True)
user_agent = Column(Text, nullable=True)
session_id = Column(String(255), nullable=True)
request_id = Column(String(255), nullable=True)
status = Column(String(255), nullable=False)
error_message = Column(Text, nullable=True)
metadata = Column(JSON, nullable=True)
phi_accessed = Column(Boolean, nullable=True)
compliance_flag = Column(Boolean, nullable=True)
user_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=True)
user = relationship('User', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<AuditLog(id={self.id})>'

61
src/models/claim_model.py Normal file
View File

@ -0,0 +1,61 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class Claim(Base):
__tablename__ = 'claims'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
claim_number = Column(String(255), nullable=False, unique=True)
patient_id = Column(UUID(as_uuid=True), nullable=False)
audio_recording_id = Column(UUID(as_uuid=True), nullable=True)
transcript_id = Column(UUID(as_uuid=True), nullable=True)
payer_id = Column(UUID(as_uuid=True), nullable=False)
encounter_id = Column(String(255), nullable=True)
service_date = Column(DateTime, nullable=False)
created_by_user_id = Column(UUID(as_uuid=True), nullable=False)
diagnosis_codes = Column(JSON, nullable=False)
procedure_codes = Column(JSON, nullable=False)
modifiers = Column(JSON, nullable=True)
mdm_level = Column(String(255), nullable=True)
medical_necessity_justification = Column(Text, nullable=True)
total_charge_amount = Column(String(255), nullable=False)
expected_reimbursement = Column(String(255), nullable=True)
actual_reimbursement = Column(String(255), nullable=True)
scrubbing_results = Column(JSON, nullable=True)
scrubbing_failures = Column(JSON, nullable=True)
corrective_actions = Column(JSON, nullable=True)
confidence_score = Column(String(255), nullable=True)
is_template_based = Column(Boolean, nullable=False)
template_id = Column(UUID(as_uuid=True), nullable=True)
reviewed_by_user_id = Column(UUID(as_uuid=True), nullable=True)
reviewed_at = Column(DateTime, nullable=True)
submitted_at = Column(DateTime, nullable=True)
paid_at = Column(DateTime, nullable=True)
denial_reason = Column(Text, nullable=True)
denial_code = Column(String(255), nullable=True)
notes = Column(Text, nullable=True)
patient_id = Column(UUID(as_uuid=True), ForeignKey('patients.id'), nullable=False)
patient = relationship('Patient', back_populates='')
audio_recording_id = Column(UUID(as_uuid=True), ForeignKey('audio_recordings.id'), nullable=True)
audioRecording = relationship('AudioRecording', back_populates='')
transcript_id = Column(UUID(as_uuid=True), ForeignKey('transcripts.id'), nullable=True)
transcript = relationship('Transcript', back_populates='')
payer_id = Column(UUID(as_uuid=True), ForeignKey('payers.id'), nullable=False)
payer = relationship('Payer', back_populates='')
created_by_user_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=False)
user = relationship('User', back_populates='')
reviewed_by_user_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=True)
user = relationship('User', back_populates='')
template_id = Column(UUID(as_uuid=True), ForeignKey('procedure_templates.id'), nullable=True)
procedureTemplate = relationship('ProcedureTemplate', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<Claim(id={self.id})>'

View File

@ -0,0 +1,41 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class ClaimReview(Base):
__tablename__ = 'claim_reviews'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
claim_id = Column(UUID(as_uuid=True), nullable=False)
reviewer_id = Column(UUID(as_uuid=True), nullable=False)
review_status = Column(String(255), nullable=False)
review_type = Column(String(255), nullable=False)
confidence_threshold_triggered = Column(Boolean, nullable=True)
original_icd10_codes = Column(JSON, nullable=True)
original_cpt_codes = Column(JSON, nullable=True)
revised_icd10_codes = Column(JSON, nullable=True)
revised_cpt_codes = Column(JSON, nullable=True)
reviewer_notes = Column(Text, nullable=True)
flagged_issues = Column(JSON, nullable=True)
corrective_actions = Column(JSON, nullable=True)
review_duration_seconds = Column(Integer, nullable=True)
escalation_reason = Column(Text, nullable=True)
escalated_to_id = Column(UUID(as_uuid=True), nullable=True)
escalated_at = Column(DateTime, nullable=True)
reviewed_at = Column(DateTime, nullable=True)
claim_id = Column(UUID(as_uuid=True), ForeignKey('claims.id'), nullable=False)
claim = relationship('Claim', back_populates='')
reviewer_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=False)
user = relationship('User', back_populates='')
escalated_to_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=True)
user = relationship('User', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<ClaimReview(id={self.id})>'

View File

@ -0,0 +1,45 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class ClaimScrubResult(Base):
__tablename__ = 'claim_scrub_results'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
claim_id = Column(UUID(as_uuid=True), nullable=False)
scrub_status = Column(String(255), nullable=False)
overall_risk_level = Column(String(255), nullable=True)
total_checks = Column(Integer, nullable=False)
passed_checks = Column(Integer, nullable=False)
failed_checks = Column(Integer, nullable=False)
warning_checks = Column(Integer, nullable=True)
ncci_violations = Column(JSON, nullable=True)
lcd_violations = Column(JSON, nullable=True)
ncd_violations = Column(JSON, nullable=True)
payer_rule_violations = Column(JSON, nullable=True)
coding_errors = Column(JSON, nullable=True)
medical_necessity_issues = Column(JSON, nullable=True)
modifier_issues = Column(JSON, nullable=True)
bundling_issues = Column(JSON, nullable=True)
denial_risk_patterns = Column(JSON, nullable=True)
corrective_actions = Column(JSON, nullable=True)
suggested_codes = Column(JSON, nullable=True)
rag_documents_used = Column(JSON, nullable=True)
scrub_engine_version = Column(String(255), nullable=True)
processing_time_ms = Column(Integer, nullable=True)
auto_fix_applied = Column(Boolean, nullable=True)
auto_fix_details = Column(JSON, nullable=True)
requires_manual_review = Column(Boolean, nullable=True)
review_priority = Column(String(255), nullable=True)
claim_id = Column(UUID(as_uuid=True), ForeignKey('claims.id'), nullable=False)
claim = relationship('Claim', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<ClaimScrubResult(id={self.id})>'

View File

@ -0,0 +1,36 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class ClinicalEntity(Base):
__tablename__ = 'clinical_entities'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
transcript_id = Column(UUID(as_uuid=True), nullable=False)
entity_type = Column(String(255), nullable=False)
entity_text = Column(String(255), nullable=False)
normalized_text = Column(String(255), nullable=True)
confidence_score = Column(String(255), nullable=False)
start_position = Column(Integer, nullable=True)
end_position = Column(Integer, nullable=True)
context = Column(Text, nullable=True)
metadata = Column(JSON, nullable=True)
is_negated = Column(Boolean, nullable=False)
is_historical = Column(Boolean, nullable=False)
is_verified = Column(Boolean, nullable=False)
verified_by_user_id = Column(UUID(as_uuid=True), nullable=True)
verified_at = Column(DateTime, nullable=True)
transcript_id = Column(UUID(as_uuid=True), ForeignKey('transcripts.id'), nullable=False)
transcript = relationship('Transcript', back_populates='')
verified_by_user_id = Column(UUID(as_uuid=True), ForeignKey('users.id'), nullable=True)
user = relationship('User', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<ClinicalEntity(id={self.id})>'

View File

@ -0,0 +1,37 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY, relationship
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class ConfidenceScore(Base):
__tablename__ = 'confidence_scores'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
entity_type = Column(String(255), nullable=False)
entity_id = Column(UUID(as_uuid=True), nullable=False)
claim_id = Column(UUID(as_uuid=True), nullable=True)
score = Column(String(255), nullable=False)
threshold_category = Column(String(255), nullable=False)
model_name = Column(String(255), nullable=False)
model_version = Column(String(255), nullable=True)
prediction_value = Column(Text, nullable=True)
alternative_predictions = Column(JSON, nullable=True)
features_used = Column(JSON, nullable=True)
context_data = Column(JSON, nullable=True)
requires_review = Column(Boolean, nullable=True)
review_reason = Column(Text, nullable=True)
human_feedback = Column(String(255), nullable=True)
corrected_value = Column(Text, nullable=True)
feedback_notes = Column(Text, nullable=True)
processing_time_ms = Column(Integer, nullable=True)
claim_id = Column(UUID(as_uuid=True), ForeignKey('claims.id'), nullable=True)
claim = relationship('Claim', back_populates='')
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<ConfidenceScore(id={self.id})>'

View File

@ -0,0 +1,30 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class CPTCode(Base):
__tablename__ = 'cpt_codes'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
code = Column(String(255), nullable=False, unique=True)
description = Column(String(255), nullable=False)
short_description = Column(String(255), nullable=True)
category = Column(String(255), nullable=True)
specialty = Column(String(255), nullable=True)
effective_date = Column(DateTime, nullable=True)
termination_date = Column(DateTime, nullable=True)
version = Column(String(255), nullable=False)
rvu_work = Column(String(255), nullable=True)
rvu_facility = Column(String(255), nullable=True)
rvu_non_facility = Column(String(255), nullable=True)
global_period = Column(String(255), nullable=True)
synonyms = Column(JSON, nullable=True)
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<CPTCode(id={self.id})>'

View File

@ -0,0 +1,25 @@
from sqlalchemy import Column, String, Integer, Boolean, DateTime, ForeignKey, Text, JSON, ARRAY
from sqlalchemy.dialects.postgresql import UUID
from src.config.database import Base
from sqlalchemy.sql import func
import uuid
class CPTModifier(Base):
__tablename__ = 'cpt_modifiers'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, nullable=False)
modifier = Column(String(255), nullable=False, unique=True)
description = Column(String(255), nullable=False)
short_description = Column(String(255), nullable=True)
category = Column(String(255), nullable=True)
effective_date = Column(DateTime, nullable=True)
termination_date = Column(DateTime, nullable=True)
reimbursement_impact = Column(String(255), nullable=True)
usage_rules = Column(Text, nullable=True)
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
def __repr__(self):
return f'<CPTModifier(id={self.id})>'

Some files were not shown because too many files have changed in this diff Show More