360 lines
14 KiB
Python
360 lines
14 KiB
Python
"""
|
||
End-to-End Test: Complete Assessment Flow
|
||
|
||
Tests the complete assessment journey:
|
||
1. Login → Profile Complete → Assessments Page
|
||
2. Select Assessment → Domains Page
|
||
3. Complete All Domains (answer questions, submit)
|
||
4. Provide Domain Feedback (after each domain)
|
||
5. Provide Final Feedback (after all domains)
|
||
6. Verify Completion
|
||
|
||
This is a comprehensive end-to-end test that validates the entire assessment flow.
|
||
"""
|
||
import pytest
|
||
import time
|
||
from pages.assessments_page import AssessmentsPage
|
||
from pages.domains_page import DomainsPage
|
||
from pages.domain_assessment_page import DomainAssessmentPage
|
||
from pages.domain_feedback_page import DomainFeedbackPage
|
||
from pages.feedback_survey_page import FeedbackSurveyPage
|
||
from selenium.webdriver.common.by import By
|
||
|
||
|
||
@pytest.mark.assessment
|
||
@pytest.mark.e2e
|
||
@pytest.mark.complete_flow
|
||
class TestCompleteAssessmentFlow:
|
||
"""End-to-end test cases for complete assessment flow"""
|
||
|
||
def test_complete_single_domain_flow(self, assessment_with_domains):
|
||
"""
|
||
Test completing a single domain (questions + feedback)
|
||
|
||
This test:
|
||
1. Starts a domain assessment
|
||
2. Answers questions (all types)
|
||
3. Submits domain
|
||
4. Provides domain feedback
|
||
5. Returns to domains page
|
||
"""
|
||
driver = assessment_with_domains['driver']
|
||
domains_page = assessment_with_domains['domains_page']
|
||
domain_ids = assessment_with_domains['domain_ids']
|
||
|
||
if not domain_ids:
|
||
pytest.skip("No domains available")
|
||
|
||
# Find first unlocked domain
|
||
unlocked_domain_id = None
|
||
for domain_id in domain_ids:
|
||
if domains_page.is_domain_unlocked(domain_id):
|
||
unlocked_domain_id = domain_id
|
||
break
|
||
|
||
if not unlocked_domain_id:
|
||
pytest.skip("No unlocked domains available")
|
||
|
||
print(f"\n{'='*80}")
|
||
print(f"COMPLETING DOMAIN: {unlocked_domain_id}")
|
||
print(f"{'='*80}\n")
|
||
|
||
# Step 1: Start domain assessment
|
||
print(f"[STEP 1] Starting domain assessment {unlocked_domain_id}...")
|
||
domains_page.click_domain_action(unlocked_domain_id)
|
||
time.sleep(2)
|
||
|
||
domain_assessment = DomainAssessmentPage(driver)
|
||
domain_assessment.wait_for_page_load()
|
||
domain_assessment.dismiss_guidance()
|
||
time.sleep(1)
|
||
|
||
print(f"✅ Domain assessment started")
|
||
print(f" URL: {driver.current_url}\n")
|
||
|
||
# Step 2: Answer questions
|
||
print(f"[STEP 2] Answering questions...")
|
||
questions_answered = 0
|
||
max_questions = 20 # Safety limit
|
||
|
||
for question_num in range(1, max_questions + 1):
|
||
# Get current question
|
||
question_id = domain_assessment.get_current_question_id()
|
||
if not question_id:
|
||
# Try to get from page elements
|
||
question_ids = domain_assessment.get_all_questions()
|
||
if question_ids:
|
||
question_id = question_ids[0]
|
||
else:
|
||
print(f" No more questions found (answered {questions_answered} questions)")
|
||
break
|
||
|
||
print(f" [Question {question_num}] ID: {question_id}")
|
||
|
||
# Detect question type
|
||
question_type = domain_assessment.get_question_type(question_id)
|
||
print(f" Type: {question_type}")
|
||
|
||
# Answer based on type
|
||
try:
|
||
if question_type == "multiple_choice":
|
||
options = domain_assessment.get_question_options(question_id)
|
||
if options:
|
||
domain_assessment.answer_multiple_choice(question_id, options[0])
|
||
print(f" ✅ Answered: Multiple choice (option {options[0]})")
|
||
|
||
elif question_type == "true_false":
|
||
domain_assessment.answer_true_false(question_id, True)
|
||
print(f" ✅ Answered: True/False (True)")
|
||
|
||
elif question_type == "rating":
|
||
domain_assessment.answer_rating(question_id, 3)
|
||
print(f" ✅ Answered: Rating (3)")
|
||
|
||
elif question_type == "open_ended":
|
||
domain_assessment.answer_open_ended(question_id, "Automated test response.")
|
||
print(f" ✅ Answered: Open-ended")
|
||
|
||
elif question_type == "matrix":
|
||
domain_assessment.answer_matrix(question_id, 0, 0)
|
||
print(f" ✅ Answered: Matrix (0,0)")
|
||
|
||
else:
|
||
print(f" ⚠️ Unknown question type, skipping")
|
||
break
|
||
|
||
questions_answered += 1
|
||
time.sleep(0.3)
|
||
|
||
except Exception as e:
|
||
print(f" ⚠️ Error answering question: {e}")
|
||
break
|
||
|
||
# Navigate to next question or submit
|
||
if domain_assessment.is_next_button_visible():
|
||
domain_assessment.click_next()
|
||
time.sleep(1)
|
||
else:
|
||
# Check for submit button
|
||
try:
|
||
submit_button = domain_assessment.find_element(domain_assessment.SUBMIT_BUTTON)
|
||
if submit_button.is_displayed():
|
||
print(f" All questions answered. Submitting...")
|
||
break
|
||
except:
|
||
pass
|
||
|
||
print(f"\n✅ Answered {questions_answered} question(s)\n")
|
||
|
||
# Step 3: Submit domain
|
||
print(f"[STEP 3] Submitting domain assessment...")
|
||
try:
|
||
domain_assessment.click_submit()
|
||
time.sleep(1)
|
||
|
||
if domain_assessment.is_submit_modal_present():
|
||
domain_assessment.confirm_submit()
|
||
time.sleep(2)
|
||
print(f"✅ Domain submitted successfully")
|
||
else:
|
||
print(f"⚠️ Submit modal not found (may have auto-submitted)")
|
||
except Exception as e:
|
||
print(f"⚠️ Error submitting domain: {e}")
|
||
|
||
# Step 4: Handle success modal
|
||
if domain_assessment.is_success_modal_present():
|
||
domain_assessment.close_success_modal()
|
||
time.sleep(1)
|
||
|
||
# Step 5: Provide domain feedback
|
||
print(f"\n[STEP 4] Providing domain feedback...")
|
||
domain_feedback = DomainFeedbackPage(driver)
|
||
|
||
if domain_feedback.is_modal_present():
|
||
domain_feedback.submit_feedback(
|
||
question1_yes=True,
|
||
question1_text="Automated test feedback.",
|
||
question2_text="This is automated feedback for testing purposes."
|
||
)
|
||
time.sleep(2)
|
||
print(f"✅ Domain feedback submitted")
|
||
else:
|
||
print(f"ℹ️ Domain feedback modal not present (may have been skipped)")
|
||
|
||
# Step 6: Verify return to domains page
|
||
print(f"\n[STEP 5] Verifying navigation...")
|
||
assert "/domains" in driver.current_url, \
|
||
f"Should return to domains page. Current URL: {driver.current_url}"
|
||
|
||
print(f"✅ Successfully returned to domains page")
|
||
print(f" URL: {driver.current_url}\n")
|
||
|
||
print(f"{'='*80}")
|
||
print(f"✅ DOMAIN {unlocked_domain_id} COMPLETED SUCCESSFULLY")
|
||
print(f"{'='*80}\n")
|
||
|
||
@pytest.mark.slow
|
||
def test_complete_full_assessment_flow(self, assessment_with_domains):
|
||
"""
|
||
Test completing entire assessment (all domains + final feedback)
|
||
|
||
This is a comprehensive test that:
|
||
1. Completes all unlocked domains
|
||
2. Provides feedback for each domain
|
||
3. Provides final feedback
|
||
4. Verifies assessment completion
|
||
"""
|
||
driver = assessment_with_domains['driver']
|
||
domains_page = assessment_with_domains['domains_page']
|
||
domain_ids = assessment_with_domains['domain_ids']
|
||
|
||
if not domain_ids:
|
||
pytest.skip("No domains available")
|
||
|
||
# Get all unlocked domains
|
||
unlocked_domains = [d for d in domain_ids if domains_page.is_domain_unlocked(d)]
|
||
|
||
if not unlocked_domains:
|
||
pytest.skip("No unlocked domains available")
|
||
|
||
print(f"\n{'='*80}")
|
||
print(f"COMPLETE ASSESSMENT FLOW TEST")
|
||
print(f"{'='*80}")
|
||
print(f"Total domains: {len(domain_ids)}")
|
||
print(f"Unlocked domains: {len(unlocked_domains)}")
|
||
print(f"{'='*80}\n")
|
||
|
||
# Complete each unlocked domain
|
||
completed_domains = []
|
||
|
||
for idx, domain_id in enumerate(unlocked_domains, 1):
|
||
print(f"\n{'='*80}")
|
||
print(f"DOMAIN {idx}/{len(unlocked_domains)}: {domain_id}")
|
||
print(f"{'='*80}\n")
|
||
|
||
try:
|
||
# Start domain
|
||
domains_page.click_domain_action(domain_id)
|
||
time.sleep(2)
|
||
|
||
domain_assessment = DomainAssessmentPage(driver)
|
||
domain_assessment.wait_for_page_load()
|
||
domain_assessment.dismiss_guidance()
|
||
time.sleep(1)
|
||
|
||
# Answer a few questions (simplified - in real scenario, answer all)
|
||
questions_answered = 0
|
||
for q in range(1, 6): # Answer first 5 questions
|
||
question_id = domain_assessment.get_current_question_id()
|
||
if not question_id:
|
||
question_ids = domain_assessment.get_all_questions()
|
||
if question_ids:
|
||
question_id = question_ids[0]
|
||
else:
|
||
break
|
||
|
||
question_type = domain_assessment.get_question_type(question_id)
|
||
|
||
try:
|
||
if question_type == "multiple_choice":
|
||
options = domain_assessment.get_question_options(question_id)
|
||
if options:
|
||
domain_assessment.answer_multiple_choice(question_id, options[0])
|
||
elif question_type == "true_false":
|
||
domain_assessment.answer_true_false(question_id, True)
|
||
elif question_type == "rating":
|
||
domain_assessment.answer_rating(question_id, 3)
|
||
elif question_type == "open_ended":
|
||
domain_assessment.answer_open_ended(question_id, "Test response.")
|
||
elif question_type == "matrix":
|
||
domain_assessment.answer_matrix(question_id, 0, 0)
|
||
|
||
questions_answered += 1
|
||
time.sleep(0.3)
|
||
|
||
if domain_assessment.is_next_button_visible():
|
||
domain_assessment.click_next()
|
||
time.sleep(1)
|
||
else:
|
||
break
|
||
except:
|
||
break
|
||
|
||
# Submit domain
|
||
try:
|
||
domain_assessment.click_submit()
|
||
time.sleep(1)
|
||
if domain_assessment.is_submit_modal_present():
|
||
domain_assessment.confirm_submit()
|
||
time.sleep(2)
|
||
except:
|
||
pass
|
||
|
||
# Handle feedback
|
||
domain_feedback = DomainFeedbackPage(driver)
|
||
if domain_feedback.is_modal_present():
|
||
domain_feedback.submit_feedback(
|
||
question1_yes=True,
|
||
question1_text="Automated feedback.",
|
||
question2_text="Test feedback response."
|
||
)
|
||
time.sleep(2)
|
||
|
||
# Return to domains page
|
||
domains_page = DomainsPage(driver)
|
||
domains_page.wait_for_page_load()
|
||
|
||
completed_domains.append(domain_id)
|
||
print(f"✅ Domain {domain_id} completed")
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Error completing domain {domain_id}: {e}")
|
||
# Continue with next domain
|
||
|
||
print(f"\n{'='*80}")
|
||
print(f"COMPLETED {len(completed_domains)}/{len(unlocked_domains)} DOMAINS")
|
||
print(f"{'='*80}\n")
|
||
|
||
# Check for final feedback
|
||
print(f"[FINAL STEP] Checking for final feedback...")
|
||
|
||
# Check domains page for final feedback
|
||
if domains_page.is_final_feedback_modal_present():
|
||
domains_page.fill_final_feedback(
|
||
question1_yes=True,
|
||
question1_reason="Automated final feedback.",
|
||
question2_text="This is automated final feedback for testing."
|
||
)
|
||
time.sleep(2)
|
||
print(f"✅ Final feedback submitted (from domains page)")
|
||
|
||
# Check for feedback survey
|
||
feedback_survey = FeedbackSurveyPage(driver)
|
||
if feedback_survey.is_overall_modal_present():
|
||
feedback_survey.set_overall_rating(4)
|
||
feedback_survey.submit_feedback()
|
||
time.sleep(2)
|
||
print(f"✅ Final feedback submitted (overall modal)")
|
||
|
||
if feedback_survey.is_per_question_modal_present():
|
||
question_ids = feedback_survey.get_feedback_questions()
|
||
for q_id in question_ids[:3]:
|
||
try:
|
||
feedback_survey.set_question_rating(q_id, 4)
|
||
feedback_survey.set_question_comment(q_id, "Test comment")
|
||
except:
|
||
pass
|
||
feedback_survey.submit_feedback()
|
||
time.sleep(2)
|
||
print(f"✅ Final feedback submitted (per-question modal)")
|
||
|
||
print(f"\n{'='*80}")
|
||
print(f"✅ ASSESSMENT FLOW COMPLETED SUCCESSFULLY")
|
||
print(f"{'='*80}\n")
|
||
|
||
|
||
|
||
|
||
|
||
|