codenuk_backend_mine/services/ai-analysis-service/test_progressive_context.py
2025-11-07 08:54:52 +05:30

310 lines
11 KiB
Python

#!/usr/bin/env python3
"""
Test script for progressive context implementation.
Tests the logic without requiring actual API calls or database connections.
"""
import sys
from pathlib import Path
from typing import Dict, List, Tuple
# Add current directory to path
sys.path.insert(0, str(Path(__file__).parent))
# Import the functions we need to test
from server import (
build_context_from_state,
update_state_with_findings,
create_intelligent_chunks,
build_intelligent_chunk_prompt
)
# Mock FileAnalysis class
class MockFileAnalysis:
def __init__(self, path, severity_score, issues_found=None, complexity_score=5.0):
self.path = path
self.severity_score = severity_score
self.issues_found = issues_found or []
self.complexity_score = complexity_score
self.language = "javascript"
self.lines_of_code = 100
self.recommendations = []
self.detailed_analysis = "Mock analysis"
def test_build_context_from_state():
"""Test building context from analysis state."""
print("=" * 60)
print("TEST 1: build_context_from_state()")
print("=" * 60)
# Create analysis state with progressive data
analysis_state = {
'modules_analyzed': ['project_overview', 'authentication'],
'project_overview': 'Node.js e-commerce platform with Express backend and React frontend',
'module_summaries': {
'project_overview': 'Modern e-commerce platform with microservices architecture',
'authentication': 'JWT-based authentication with rate limiting missing'
},
'architecture_patterns': ['MVC', 'Service Layer'],
'critical_issues': [
{'module': 'authentication', 'issue': 'Missing rate limiting on auth endpoints'}
],
'tech_stack': {
'frontend': 'React',
'backend': 'Node.js',
'database': 'PostgreSQL'
},
'dependency_context': {
'chunk_001': 'Project overview and setup',
'chunk_002': 'Authentication module with JWT'
}
}
# Test chunk (products module)
current_chunk = {
'name': 'products',
'id': 'chunk_003',
'chunk_type': 'module',
'context_dependencies': ['chunk_001', 'chunk_002']
}
context = build_context_from_state(analysis_state, current_chunk)
print("\n✅ Generated context:")
print(context)
print()
# Verify context contains expected sections
assert "PROJECT OVERVIEW" in context, "Context should include project overview"
assert "PREVIOUSLY ANALYZED MODULES" in context, "Context should include module summaries"
assert "ARCHITECTURE PATTERNS" in context, "Context should include architecture patterns"
assert "CRITICAL ISSUES" in context, "Context should include critical issues"
assert "TECH STACK" in context, "Context should include tech stack"
assert "DEPENDENCY CONTEXT" in context, "Context should include dependency context"
print("✅ All context sections present!")
return True
def test_update_state_with_findings():
"""Test updating analysis state with new findings."""
print("\n" + "=" * 60)
print("TEST 2: update_state_with_findings()")
print("=" * 60)
# Initial state
analysis_state = {
'modules_analyzed': ['project_overview'],
'module_summaries': {
'project_overview': 'Node.js e-commerce platform'
},
'architecture_patterns': [],
'critical_issues': [],
'dependency_context': {}
}
# New chunk analysis
chunk = {
'name': 'authentication',
'id': 'chunk_002',
'chunk_type': 'module'
}
chunk_analysis = {
'module_overview': 'JWT-based authentication module with rate limiting missing',
'module_architecture': 'Uses MVC pattern with Service Layer for business logic',
'module_quality_score': 6.5
}
# Mock file analyses
file_analyses = [
MockFileAnalysis('auth.controller.js', 7.0, ['No rate limiting']),
MockFileAnalysis('auth.service.js', 8.0),
MockFileAnalysis('auth.middleware.js', 4.0, ['Weak validation']) # Low quality
]
# Update state
updated_state = update_state_with_findings(analysis_state.copy(), chunk, chunk_analysis, file_analyses)
print("\n✅ Updated state:")
print(f" Modules analyzed: {updated_state.get('modules_analyzed', [])}")
print(f" Architecture patterns: {updated_state.get('architecture_patterns', [])}")
print(f" Critical issues: {len(updated_state.get('critical_issues', []))}")
print(f" Module summaries: {list(updated_state.get('module_summaries', {}).keys())}")
print()
# Verify updates
assert 'authentication' in updated_state['modules_analyzed'], "Authentication should be in modules_analyzed"
assert 'MVC' in updated_state['architecture_patterns'], "MVC pattern should be detected"
assert 'Service Layer' in updated_state['architecture_patterns'], "Service Layer pattern should be detected"
assert len(updated_state['critical_issues']) > 0, "Critical issues should be added"
assert 'authentication' in updated_state['module_summaries'], "Module summary should be stored"
print("✅ State updated correctly!")
return True
def test_progressive_context_flow():
"""Test the complete progressive context flow."""
print("\n" + "=" * 60)
print("TEST 3: Progressive Context Flow (Simulated)")
print("=" * 60)
# Simulate chunk processing flow
test_files = [
("README.md", "# Project\n\nNode.js e-commerce platform"),
("package.json", '{"name": "ecommerce", "dependencies": {"express": "^4.0"}}'),
("src/auth/auth.controller.js", "export class AuthController {}"),
("src/auth/auth.service.js", "export class AuthService {}"),
("src/products/product.controller.js", "export class ProductController {}"),
]
# Create chunks
chunks = create_intelligent_chunks(test_files)
print(f"\n✅ Created {len(chunks)} chunks:")
for chunk in chunks:
print(f" - {chunk['name']} ({chunk['chunk_type']}): {len(chunk['files'])} files")
# Simulate progressive analysis
analysis_state = {}
print("\n📊 Simulating progressive analysis:")
for i, chunk in enumerate(chunks, 1):
chunk_name = chunk['name']
print(f"\n Chunk {i}: {chunk_name}")
# Build context (what would be used in prompt)
context = build_context_from_state(analysis_state, chunk)
if context:
print(f" 📚 Context available: {len(context)} chars")
else:
print(f" 📚 No context (first chunk)")
# Simulate chunk analysis results
chunk_analysis = {
'module_overview': f"Analysis of {chunk_name} module",
'module_architecture': 'MVC pattern' if chunk_name != 'project_overview' else 'Node.js setup',
'module_quality_score': 7.5
}
# Mock file analyses
file_analyses = [
MockFileAnalysis(f"{chunk_name}_file{i}.js", 7.0 + i*0.1)
for i in range(len(chunk['files']))
]
# Update state
analysis_state = update_state_with_findings(analysis_state.copy(), chunk, chunk_analysis, file_analyses)
print(f" ✅ State updated: {len(analysis_state.get('modules_analyzed', []))} modules analyzed")
if analysis_state.get('architecture_patterns'):
print(f" 📐 Patterns: {', '.join(analysis_state.get('architecture_patterns', []))}")
print("\n📊 Final Analysis State:")
print(f" Modules analyzed: {', '.join(analysis_state.get('modules_analyzed', []))}")
print(f" Architecture patterns: {', '.join(analysis_state.get('architecture_patterns', []))}")
print(f" Critical issues: {len(analysis_state.get('critical_issues', []))}")
print(f" Module summaries: {len(analysis_state.get('module_summaries', {}))}")
# Verify final state
assert len(analysis_state.get('modules_analyzed', [])) == len(chunks), "All chunks should be analyzed"
assert len(analysis_state.get('architecture_patterns', [])) > 0, "Patterns should be detected"
print("\n✅ Progressive context flow working correctly!")
return True
def test_prompt_includes_context():
"""Test that prompts include progressive context."""
print("\n" + "=" * 60)
print("TEST 4: Prompt Includes Progressive Context")
print("=" * 60)
# Create analysis state
analysis_state = {
'modules_analyzed': ['project_overview', 'authentication'],
'project_overview': 'Node.js platform',
'module_summaries': {
'authentication': 'JWT auth module'
},
'architecture_patterns': ['MVC'],
'critical_issues': [
{'module': 'authentication', 'issue': 'Missing rate limiting'}
],
'tech_stack': {'backend': 'Node.js'}
}
# Test chunk
chunk = {
'name': 'products',
'chunk_type': 'module',
'files': [('product.controller.js', 'export class ProductController {}')]
}
# Build prompt
prompt = build_intelligent_chunk_prompt(chunk, analysis_state)
print("\n✅ Generated prompt (first 500 chars):")
print(prompt[:500])
print("...")
print()
# Verify prompt includes context
assert "CONTEXT FROM PREVIOUS ANALYSIS" in prompt, "Prompt should include context section"
assert "PROJECT OVERVIEW" in prompt, "Prompt should include project overview"
assert "PREVIOUSLY ANALYZED MODULES" in prompt, "Prompt should include module summaries"
assert "ARCHITECTURE PATTERNS" in prompt, "Prompt should include architecture patterns"
assert "CRITICAL ISSUES" in prompt, "Prompt should include critical issues"
print("✅ Prompt includes all context sections!")
# Test without context (first chunk)
prompt_no_context = build_intelligent_chunk_prompt(chunk, None)
assert "CONTEXT FROM PREVIOUS ANALYSIS" not in prompt_no_context, "First chunk should not have context"
print("✅ Prompt correctly omits context for first chunk!")
return True
def run_all_tests():
"""Run all tests."""
print("\n" + "=" * 60)
print("PROGRESSIVE CONTEXT - COMPREHENSIVE TEST SUITE")
print("=" * 60)
try:
# Test 1: Context building
test_build_context_from_state()
# Test 2: State updates
test_update_state_with_findings()
# Test 3: Complete flow
test_progressive_context_flow()
# Test 4: Prompt generation
test_prompt_includes_context()
print("\n" + "=" * 60)
print("✅ ALL TESTS PASSED!")
print("=" * 60)
print("\n📊 Summary:")
print(" • Context building: ✅")
print(" • State updates: ✅")
print(" • Progressive flow: ✅")
print(" • Prompt generation: ✅")
print("\n🎉 Progressive context implementation is working correctly!")
return True
except Exception as e:
print("\n" + "=" * 60)
print(f"❌ TEST FAILED: {e}")
print("=" * 60)
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = run_all_tests()
sys.exit(0 if success else 1)