156 lines
6.4 KiB
Python
Executable File
156 lines
6.4 KiB
Python
Executable File
"""
|
|
Verification Script: Test Independence
|
|
|
|
Verifies that each test case can run independently without issues.
|
|
Tests each test case in isolation to ensure no dependencies.
|
|
"""
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
# Test cases to verify
|
|
TEST_CASES = [
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_instructions_modal_appears",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_instructions_modal_dismiss",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_single_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_multiple_choice_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_true_false_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_rating_scale_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_open_ended_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_answer_matrix_question",
|
|
"tests/student_assessment/test_03_domain_assessment.py::TestDomainAssessment::test_navigate_questions",
|
|
# Note: test_answer_all_questions_in_domain is long-running, test separately
|
|
]
|
|
|
|
def run_test(test_case, timeout=300):
|
|
"""Run a single test case"""
|
|
print(f"\n{'='*80}")
|
|
print(f"🧪 Testing: {test_case}")
|
|
print(f"{'='*80}")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
test_case,
|
|
"-v",
|
|
"--tb=short"
|
|
]
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
cmd,
|
|
cwd=Path(__file__).parent.parent,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=timeout + 60 # Extra buffer (handled by subprocess, not pytest)
|
|
)
|
|
|
|
# Check exit code and output
|
|
output = result.stdout + result.stderr
|
|
|
|
# Parse pytest summary line (last line with test count)
|
|
summary_line = None
|
|
for line in reversed(output.split('\n')):
|
|
if 'passed' in line.lower() or 'failed' in line.lower() or 'skipped' in line.lower() or 'error' in line.lower():
|
|
if any(word in line.lower() for word in ['passed', 'failed', 'skipped', 'error']):
|
|
summary_line = line
|
|
break
|
|
|
|
if result.returncode == 0:
|
|
# Exit code 0 means success (passed or skipped)
|
|
if summary_line:
|
|
# Parse summary line
|
|
if "1 passed" in summary_line.lower() or "passed" in summary_line.lower():
|
|
print(f"✅ PASSED: {test_case}")
|
|
return True
|
|
elif "1 skipped" in summary_line.lower() or "skipped" in summary_line.lower():
|
|
print(f"⏭️ SKIPPED: {test_case}")
|
|
# Extract skip reason
|
|
skip_lines = [line for line in output.split('\n') if 'SKIPPED' in line and '(' in line]
|
|
if skip_lines:
|
|
print(f" Reason: {skip_lines[-1].strip()}")
|
|
return True
|
|
else:
|
|
# Check for PASSED in output
|
|
if "PASSED" in output:
|
|
print(f"✅ PASSED: {test_case}")
|
|
return True
|
|
elif "SKIPPED" in output:
|
|
print(f"⏭️ SKIPPED: {test_case}")
|
|
skip_lines = [line for line in output.split('\n') if 'SKIPPED' in line and '(' in line]
|
|
if skip_lines:
|
|
print(f" Reason: {skip_lines[-1].strip()}")
|
|
return True
|
|
else:
|
|
print(f"✅ PASSED: {test_case} (exit code 0)")
|
|
return True
|
|
else:
|
|
# No summary line, check for PASSED/SKIPPED keywords
|
|
if "PASSED" in output:
|
|
print(f"✅ PASSED: {test_case}")
|
|
return True
|
|
elif "SKIPPED" in output:
|
|
print(f"⏭️ SKIPPED: {test_case}")
|
|
skip_lines = [line for line in output.split('\n') if 'SKIPPED' in line and '(' in line]
|
|
if skip_lines:
|
|
print(f" Reason: {skip_lines[-1].strip()}")
|
|
return True
|
|
else:
|
|
# Exit code 0 but no clear status - assume passed
|
|
print(f"✅ PASSED: {test_case} (exit code 0, no clear status)")
|
|
return True
|
|
else:
|
|
# Non-zero exit code = failure
|
|
print(f"❌ FAILED: {test_case} (exit code {result.returncode})")
|
|
# Show relevant error section
|
|
if "FAILED" in output:
|
|
error_section = output.split("FAILED")[-1]
|
|
print(f"Failure details:\n{error_section[:600]}")
|
|
elif "ERROR" in output:
|
|
error_section = output.split("ERROR")[-1]
|
|
print(f"Error details:\n{error_section[:600]}")
|
|
else:
|
|
# Show last part of output
|
|
print(f"Output:\n{output[-600:]}")
|
|
return False
|
|
except subprocess.TimeoutExpired:
|
|
print(f"⏱️ TIMEOUT: {test_case} (exceeded {timeout}s)")
|
|
return False
|
|
except Exception as e:
|
|
print(f"❌ ERROR: {test_case} - {e}")
|
|
return False
|
|
|
|
def main():
|
|
"""Run all test cases independently"""
|
|
print("🚀 Starting Test Independence Verification")
|
|
print(f"📋 Total test cases: {len(TEST_CASES)}")
|
|
|
|
results = []
|
|
for test_case in TEST_CASES:
|
|
success = run_test(test_case)
|
|
results.append((test_case, success))
|
|
|
|
# Summary
|
|
print(f"\n{'='*80}")
|
|
print("📊 VERIFICATION SUMMARY")
|
|
print(f"{'='*80}")
|
|
|
|
passed = sum(1 for _, success in results if success)
|
|
failed = len(results) - passed
|
|
|
|
for test_case, success in results:
|
|
status = "✅ PASSED" if success else "❌ FAILED"
|
|
print(f"{status}: {test_case}")
|
|
|
|
print(f"\n📈 Results: {passed}/{len(results)} passed ({failed} failed)")
|
|
|
|
if passed == len(results):
|
|
print("🎉 All tests can run independently!")
|
|
return 0
|
|
else:
|
|
print("⚠️ Some tests failed - review above for details")
|
|
return 1
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|
|
|