codenuk_backend_mine/services/ai-analysis-service/test_multi_level_report.py
2025-11-07 08:54:52 +05:30

245 lines
8.1 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Test script for multi-level report generation and context retrieval
"""
import os
import sys
import asyncio
from pathlib import Path
from dotenv import load_dotenv
# Add current directory to path
sys.path.insert(0, str(Path(__file__).parent))
load_dotenv()
async def test_context_retrieval():
"""Test context retrieval functions."""
print("\n" + "=" * 60)
print("Testing Context Retrieval Functions")
print("=" * 60)
try:
from server import (
retrieve_all_module_analyses,
retrieve_synthesis_analysis,
retrieve_cumulative_analysis_state,
retrieve_all_findings,
retrieve_all_metrics,
retrieve_comprehensive_report_context
)
print("✅ All context retrieval functions imported")
# Test with a dummy run_id
test_run_id = "test_run_123"
test_repository_id = "test_repo_123"
test_session_id = "test_session_123"
print(f"\nTesting with run_id: {test_run_id}")
print(f"Repository ID: {test_repository_id}")
print(f"Session ID: {test_session_id}")
# Test each function
print("\n1. Testing retrieve_all_module_analyses...")
modules = await retrieve_all_module_analyses(test_run_id, test_repository_id)
print(f" ✓ Found {len(modules)} modules")
print("\n2. Testing retrieve_synthesis_analysis...")
synthesis = await retrieve_synthesis_analysis(test_run_id, test_repository_id)
if synthesis:
print(f" ✓ Found synthesis analysis")
else:
print(f" ⚠️ No synthesis analysis found (expected for test)")
print("\n3. Testing retrieve_cumulative_analysis_state...")
state = await retrieve_cumulative_analysis_state(test_run_id, test_repository_id, test_session_id)
if state:
print(f" ✓ Found cumulative analysis state")
else:
print(f" ⚠️ No cumulative analysis state found (expected for test)")
print("\n4. Testing retrieve_all_findings...")
findings = await retrieve_all_findings(test_run_id)
print(f" ✓ Found findings for {len(findings)} modules")
print("\n5. Testing retrieve_all_metrics...")
metrics = await retrieve_all_metrics(test_run_id)
print(f" ✓ Found metrics for {len(metrics)} modules")
print("\n6. Testing retrieve_comprehensive_report_context...")
context = await retrieve_comprehensive_report_context(
run_id=test_run_id,
repository_id=test_repository_id,
session_id=test_session_id
)
print(f" ✓ Context retrieved:")
print(f" - Modules: {context.get('total_modules', 0)}")
print(f" - Findings: {context.get('total_findings', 0)}")
print(f" - Has synthesis: {bool(context.get('synthesis_analysis'))}")
print(f" - Has analysis state: {bool(context.get('analysis_state'))}")
print("\n✅ All context retrieval tests passed!")
return True
except Exception as e:
print(f"\n❌ Context retrieval test failed: {e}")
import traceback
traceback.print_exc()
return False
def test_pdf_method_exists():
"""Test that the new PDF method exists."""
print("\n" + "=" * 60)
print("Testing PDF Report Method")
print("=" * 60)
try:
# Import using the same method as server.py
import sys
import importlib.util
spec = importlib.util.spec_from_file_location("ai_analyze", "ai-analyze.py")
ai_analyze_module = importlib.util.module_from_spec(spec)
sys.modules["ai_analyze"] = ai_analyze_module
spec.loader.exec_module(ai_analyze_module)
from ai_analyze import EnhancedGitHubAnalyzer
print("✅ EnhancedGitHubAnalyzer imported successfully")
# Check if new method exists
if hasattr(EnhancedGitHubAnalyzer, 'create_multi_level_pdf_report'):
print("✅ create_multi_level_pdf_report method exists")
# Check method signature
import inspect
sig = inspect.signature(EnhancedGitHubAnalyzer.create_multi_level_pdf_report)
params = list(sig.parameters.keys())
print(f" Method parameters: {', '.join(params)}")
if 'comprehensive_context' in params:
print(" ✓ comprehensive_context parameter exists")
if 'output_path' in params:
print(" ✓ output_path parameter exists")
if 'repository_id' in params:
print(" ✓ repository_id parameter exists")
if 'run_id' in params:
print(" ✓ run_id parameter exists")
return True
else:
print("❌ create_multi_level_pdf_report method not found")
return False
except Exception as e:
print(f"❌ PDF method test failed: {e}")
import traceback
traceback.print_exc()
return False
def test_database_tables():
"""Test that database tables exist."""
print("\n" + "=" * 60)
print("Testing Database Tables")
print("=" * 60)
try:
import psycopg2
from dotenv import load_dotenv
load_dotenv()
conn = psycopg2.connect(
host=os.getenv('POSTGRES_HOST', 'localhost'),
port=os.getenv('POSTGRES_PORT', '5432'),
database=os.getenv('POSTGRES_DB', 'dev_pipeline'),
user=os.getenv('POSTGRES_USER', 'pipeline_admin'),
password=os.getenv('POSTGRES_PASSWORD', 'secure_pipeline_2024')
)
cursor = conn.cursor()
# Check each table
tables_to_check = ['findings', 'metrics', 'report_sections', 'analysis_runs']
for table_name in tables_to_check:
cursor.execute(f"""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = %s
""", (table_name,))
exists = cursor.fetchone()[0] > 0
if exists:
# Get row count
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
count = cursor.fetchone()[0]
print(f"✅ Table '{table_name}' exists ({count} rows)")
else:
print(f"❌ Table '{table_name}' does not exist")
return False
cursor.close()
conn.close()
print("\n✅ All database tables verified!")
return True
except Exception as e:
print(f"❌ Database test failed: {e}")
import traceback
traceback.print_exc()
return False
async def run_all_tests():
"""Run all tests."""
print("\n" + "=" * 60)
print("MULTI-LEVEL REPORT IMPLEMENTATION TEST SUITE")
print("=" * 60)
results = []
# Test 1: Database tables
results.append(("Database Tables", test_database_tables()))
# Test 2: PDF method exists
results.append(("PDF Method", test_pdf_method_exists()))
# Test 3: Context retrieval
results.append(("Context Retrieval", await test_context_retrieval()))
# Summary
print("\n" + "=" * 60)
print("TEST SUMMARY")
print("=" * 60)
passed = 0
failed = 0
for test_name, result in results:
status = "✅ PASSED" if result else "❌ FAILED"
print(f"{test_name}: {status}")
if result:
passed += 1
else:
failed += 1
print(f"\nTotal: {passed} passed, {failed} failed out of {len(results)} tests")
if failed == 0:
print("\n✅ All tests passed! Implementation is ready.")
return True
else:
print(f"\n⚠️ {failed} test(s) failed. Please review the errors above.")
return False
if __name__ == "__main__":
success = asyncio.run(run_all_tests())
sys.exit(0 if success else 1)