296 lines
10 KiB
Plaintext
296 lines
10 KiB
Plaintext
# FLEXIBLE REQUIREMENT-PROCESSOR - ACCEPTS ANY BODY STRUCTURE
|
|
# NO strict validation, accepts any JSON and extracts features dynamically
|
|
# Just extract features and let Claude decide everything
|
|
|
|
import os
|
|
import sys
|
|
import json
|
|
from datetime import datetime
|
|
from typing import Dict, Any, Optional, Union
|
|
from pydantic import BaseModel
|
|
from fastapi import FastAPI, HTTPException, Request
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
from loguru import logger
|
|
|
|
# Configure logging
|
|
logger.remove()
|
|
logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}")
|
|
|
|
# ================================================================================================
|
|
# FLEXIBLE MODELS
|
|
# ================================================================================================
|
|
|
|
class FlexibleRequirementRequest(BaseModel):
|
|
"""Flexible request model that accepts any structure"""
|
|
|
|
class Config:
|
|
extra = "allow" # Allow any additional fields
|
|
|
|
# ================================================================================================
|
|
# FLEXIBLE FASTAPI APPLICATION
|
|
# ================================================================================================
|
|
|
|
app = FastAPI(
|
|
title="Flexible Requirements Processor",
|
|
description="Flexible feature extraction - accepts any body structure, no strict validation",
|
|
version="5.0.0"
|
|
)
|
|
|
|
app.add_middleware(
|
|
CORSMiddleware,
|
|
allow_origins=["*"],
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
@app.get("/health")
|
|
async def health_check():
|
|
return {
|
|
"status": "healthy",
|
|
"service": "flexible-requirements-processor",
|
|
"version": "5.0.0",
|
|
"approach": "accepts_any_body_structure"
|
|
}
|
|
|
|
@app.post("/api/v1/process-requirements")
|
|
async def process_flexible_requirements(request: Request):
|
|
"""
|
|
FLEXIBLE: Accepts ANY body structure and extracts features dynamically
|
|
NO strict validation, NO required fields
|
|
Works with any JSON structure from n8n
|
|
"""
|
|
try:
|
|
# Get raw JSON body
|
|
raw_body = await request.json()
|
|
logger.info(f"Received raw body: {json.dumps(raw_body, indent=2)}")
|
|
|
|
# Extract project name from various possible locations
|
|
project_name = extract_project_name(raw_body)
|
|
|
|
# Extract description from various possible locations
|
|
description = extract_description(raw_body)
|
|
|
|
# Extract ALL features from ANY part of the data
|
|
all_features, scale_info, complete_requirements = extract_all_data(raw_body)
|
|
|
|
logger.info(f"✅ Extracted {len(all_features)} features from flexible structure")
|
|
|
|
# STEP 3: Build simple response with ALL data preserved
|
|
response = {
|
|
"success": True,
|
|
"data": {
|
|
"project_id": f"flexible-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}",
|
|
"project_name": project_name,
|
|
"project_description": description,
|
|
|
|
# PURE DATA - NO ANALYSIS
|
|
"all_features": all_features,
|
|
"total_features": len(all_features),
|
|
"scale_information": scale_info,
|
|
"complete_requirements": complete_requirements, # EVERYTHING PRESERVED
|
|
|
|
"processing_metadata": {
|
|
"approach": "flexible_data_extraction",
|
|
"analysis_performed": "none_let_llm_decide",
|
|
"features_extracted": len(all_features),
|
|
"timestamp": datetime.utcnow().isoformat(),
|
|
"input_structure": "flexible_any_body"
|
|
}
|
|
}
|
|
}
|
|
|
|
logger.info(f"✅ Successfully processed flexible requirements - {len(all_features)} features extracted")
|
|
return response
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Flexible requirements processing failed: {e}")
|
|
# Return error but don't crash
|
|
return {
|
|
"success": False,
|
|
"error": str(e),
|
|
"message": "Flexible processor encountered an error but continues running"
|
|
}
|
|
|
|
def extract_project_name(data: Dict[str, Any]) -> str:
|
|
"""Extract project name from various possible locations"""
|
|
|
|
# Try different possible locations
|
|
possible_locations = [
|
|
data.get('project_name'),
|
|
data.get('projectName'),
|
|
data.get('name'),
|
|
data.get('title'),
|
|
]
|
|
|
|
# Check in nested structures
|
|
if isinstance(data.get('body'), dict):
|
|
possible_locations.extend([
|
|
data['body'].get('project_name'),
|
|
data['body'].get('projectName'),
|
|
data['body'].get('name'),
|
|
])
|
|
|
|
if isinstance(data.get('requirements'), dict):
|
|
possible_locations.extend([
|
|
data['requirements'].get('project_name'),
|
|
data['requirements'].get('name'),
|
|
])
|
|
|
|
# Return the first non-empty value found
|
|
for location in possible_locations:
|
|
if location and isinstance(location, str) and location.strip():
|
|
return location.strip()
|
|
|
|
return "Unknown Project"
|
|
|
|
def extract_description(data: Dict[str, Any]) -> str:
|
|
"""Extract description from various possible locations"""
|
|
|
|
possible_locations = [
|
|
data.get('description'),
|
|
data.get('desc'),
|
|
data.get('project_description'),
|
|
]
|
|
|
|
# Check in nested structures
|
|
if isinstance(data.get('body'), dict):
|
|
possible_locations.extend([
|
|
data['body'].get('description'),
|
|
data['body'].get('desc'),
|
|
])
|
|
|
|
# Return the first non-empty value found
|
|
for location in possible_locations:
|
|
if location and isinstance(location, str) and location.strip():
|
|
return location.strip()
|
|
|
|
return ""
|
|
|
|
def extract_all_data(data: Dict[str, Any]) -> tuple[list, dict, dict]:
|
|
"""Extract ALL features, scale info, and complete requirements from ANY structure"""
|
|
|
|
all_features = []
|
|
scale_info = {}
|
|
complete_requirements = {}
|
|
|
|
# Recursive function to find all boolean features and scale info
|
|
def extract_from_object(obj: Any, path: str = ""):
|
|
if isinstance(obj, dict):
|
|
for key, value in obj.items():
|
|
current_path = f"{path}.{key}" if path else key
|
|
|
|
# Extract boolean features
|
|
if value is True:
|
|
all_features.append(key)
|
|
complete_requirements[key] = value
|
|
|
|
# Extract scale information
|
|
elif key in ['team_size', 'timeline', 'budget', 'expected_users', 'industry', 'scalability',
|
|
'concurrent_users', 'data_volume', 'performance_requirements', 'compliance_requirements']:
|
|
scale_info[key] = value
|
|
complete_requirements[key] = value
|
|
|
|
# Extract other non-boolean values that might be features
|
|
elif isinstance(value, str) and value.strip():
|
|
complete_requirements[key] = value
|
|
|
|
# Extract numeric values
|
|
elif isinstance(value, (int, float)) and not isinstance(value, bool):
|
|
complete_requirements[key] = value
|
|
|
|
# Recurse into nested objects
|
|
elif isinstance(value, (dict, list)):
|
|
extract_from_object(value, current_path)
|
|
|
|
elif isinstance(obj, list):
|
|
for i, item in enumerate(obj):
|
|
if isinstance(item, (dict, list)):
|
|
extract_from_object(item, f"{path}[{i}]" if path else f"[{i}]")
|
|
|
|
# Extract from the entire data structure
|
|
extract_from_object(data)
|
|
|
|
# Also try to extract from common nested locations
|
|
nested_locations = [
|
|
data.get('body'),
|
|
data.get('requirements'),
|
|
data.get('params'),
|
|
data.get('query'),
|
|
data.get('data')
|
|
]
|
|
|
|
for nested_data in nested_locations:
|
|
if isinstance(nested_data, dict):
|
|
extract_from_object(nested_data)
|
|
|
|
# Remove duplicates
|
|
all_features = list(set(all_features))
|
|
|
|
logger.info(f"Extracted features: {all_features}")
|
|
logger.info(f"Extracted scale info: {scale_info}")
|
|
|
|
return all_features, scale_info, complete_requirements
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
|
|
logger.info("🚀 FLEXIBLE REQUIREMENTS PROCESSOR - Accepts Any Body Structure")
|
|
logger.info("✅ NO strict validation, NO required fields")
|
|
logger.info("✅ Accepts any JSON structure from n8n")
|
|
logger.info("✅ Extracts features from anywhere in the data")
|
|
|
|
uvicorn.run("main:app", host="0.0.0.0", port=5678, log_level="info")
|
|
@app.post("/api/v1/analyze-feature")
|
|
async def analyze_custom_feature(request: Request):
|
|
"""Real AI-powered feature analysis using Claude"""
|
|
try:
|
|
data = await request.json()
|
|
feature_description = data.get('description', '')
|
|
project_type = data.get('project_type', '')
|
|
|
|
# Use Claude AI for real analysis
|
|
claude_prompt = f"""
|
|
Analyze this custom feature requirement for a {project_type} project:
|
|
|
|
Feature Description: {feature_description}
|
|
|
|
Provide a detailed technical analysis in JSON format:
|
|
{{
|
|
"feature_name": "Suggested technical name",
|
|
"complexity": "low|medium|high",
|
|
"implementation_details": ["detail1", "detail2"],
|
|
"technical_requirements": ["req1", "req2"],
|
|
"estimated_effort": "1-2 weeks|3-4 weeks|etc",
|
|
"dependencies": ["dependency1", "dependency2"],
|
|
"api_endpoints": ["POST /api/endpoint1", "GET /api/endpoint2"],
|
|
"database_tables": ["table1", "table2"],
|
|
"confidence_score": 0.85
|
|
}}
|
|
|
|
Return ONLY the JSON object.
|
|
"""
|
|
|
|
# Call Claude API (use your existing Claude client)
|
|
message = claude_client.messages.create(
|
|
model="claude-3-5-sonnet-20241022",
|
|
max_tokens=2000,
|
|
temperature=0.1,
|
|
messages=[{"role": "user", "content": claude_prompt}]
|
|
)
|
|
|
|
# Parse Claude's response
|
|
analysis = json.loads(message.content[0].text)
|
|
|
|
return {
|
|
"success": True,
|
|
"analysis": analysis
|
|
}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Feature analysis failed: {e}")
|
|
return {
|
|
"success": False,
|
|
"error": str(e)
|
|
}
|