611 lines
28 KiB
Python
611 lines
28 KiB
Python
"""
|
|
Domain Assessment Tests
|
|
|
|
Tests for answering questions in domain assessments.
|
|
Covers all 5 question types and navigation.
|
|
"""
|
|
import pytest
|
|
from pages.domain_assessment_page import DomainAssessmentPage
|
|
from pages.domain_feedback_page import DomainFeedbackPage
|
|
from pages.domains_page import DomainsPage
|
|
from utils.question_answer_helper import QuestionAnswerHelper
|
|
from utils.wait_helpers import WaitHelpers
|
|
from utils.randomized_wait import RandomizedWait
|
|
from selenium.webdriver.common.by import By
|
|
import time
|
|
|
|
|
|
@pytest.mark.assessment
|
|
@pytest.mark.domain_assessment
|
|
class TestDomainAssessment:
|
|
"""Test class for domain assessment functionality"""
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def setup(self, smart_assessment_setup):
|
|
"""
|
|
Setup fixture - gets student ready for assessment
|
|
|
|
Args:
|
|
smart_assessment_setup: Smart fixture that handles login, password reset, profile completion
|
|
"""
|
|
self.driver = smart_assessment_setup['driver']
|
|
self.cpid = smart_assessment_setup['cpid']
|
|
self.assessments_page = smart_assessment_setup['assessments_page']
|
|
self.wait = WaitHelpers(self.driver)
|
|
|
|
# Navigate to domains page
|
|
assessment_ids = self.assessments_page.get_assessment_ids()
|
|
if not assessment_ids:
|
|
pytest.skip("No assessments available")
|
|
|
|
# Start first assessment
|
|
self.assessments_page.click_begin_assessment(assessment_ids[0])
|
|
|
|
# Get domains page
|
|
self.domains_page = DomainsPage(self.driver)
|
|
self.domains_page.wait_for_page_load()
|
|
|
|
# Get first unlocked domain
|
|
domain_ids = self.domains_page.get_all_domain_ids()
|
|
if not domain_ids:
|
|
pytest.skip("No domains available")
|
|
|
|
# Find first unlocked domain
|
|
unlocked_domain_id = None
|
|
for domain_id in domain_ids:
|
|
if self.domains_page.is_domain_unlocked(domain_id):
|
|
unlocked_domain_id = domain_id
|
|
break
|
|
|
|
if not unlocked_domain_id:
|
|
pytest.skip("No unlocked domains available")
|
|
|
|
# Start domain assessment
|
|
self.domains_page.click_start_domain(unlocked_domain_id)
|
|
|
|
# Store current domain ID for reference
|
|
self.current_domain_id = unlocked_domain_id
|
|
|
|
# Get domain assessment page
|
|
self.domain_assessment_page = DomainAssessmentPage(self.driver)
|
|
|
|
# Wait for page to load (handles instructions modal)
|
|
self.domain_assessment_page.wait_for_page_load()
|
|
|
|
# Wait a moment for page to stabilize (randomized)
|
|
RandomizedWait.wait_for_page_load('initial')
|
|
|
|
# Dismiss instructions modal if present (must be done before accessing questions)
|
|
if self.domain_assessment_page.is_instructions_modal_present():
|
|
print("📋 Instructions modal detected - dismissing...")
|
|
self.domain_assessment_page.dismiss_instructions_modal()
|
|
# Wait for modal to close and page to load (randomized)
|
|
RandomizedWait.wait_for_page_load('modal')
|
|
|
|
# Now wait for actual assessment page to be visible
|
|
try:
|
|
self.wait.wait_for_element_visible(self.domain_assessment_page.PAGE, timeout=10)
|
|
except:
|
|
# If page not visible, check if we're on the right URL
|
|
current_url = self.driver.current_url
|
|
if "/domain/" in current_url:
|
|
print(f"⚠️ Page element not found but URL is correct: {current_url}")
|
|
# Try waiting for action bar as fallback
|
|
try:
|
|
self.wait.wait_for_element_visible(self.domain_assessment_page.ACTION_BAR, timeout=5)
|
|
except:
|
|
pass
|
|
|
|
# Dismiss guidance modal if present
|
|
self.domain_assessment_page.dismiss_guidance()
|
|
|
|
# Initialize question answer helper
|
|
self.question_helper = QuestionAnswerHelper(self.driver)
|
|
|
|
yield
|
|
|
|
# Cleanup if needed
|
|
pass
|
|
|
|
def test_instructions_modal_appears(self):
|
|
"""Test that instructions modal appears when starting a NEW domain assessment"""
|
|
# Navigate back to assessments page (setup already started one domain)
|
|
from pages.assessments_page import AssessmentsPage
|
|
assessments_page = AssessmentsPage(self.driver)
|
|
assessments_page.navigate()
|
|
assessments_page.wait_for_page_load()
|
|
|
|
# Get assessment IDs
|
|
assessment_ids = assessments_page.get_assessment_ids()
|
|
if not assessment_ids:
|
|
pytest.skip("No assessments available")
|
|
|
|
# Start assessment
|
|
assessments_page.click_begin_assessment(assessment_ids[0])
|
|
|
|
domains_page = DomainsPage(self.driver)
|
|
domains_page.wait_for_page_load()
|
|
|
|
domain_ids = domains_page.get_all_domain_ids()
|
|
if not domain_ids:
|
|
pytest.skip("No domains available")
|
|
|
|
# Find a domain that hasn't been started yet (different from setup's domain)
|
|
unlocked_domain_id = None
|
|
setup_domain_id = getattr(self, 'current_domain_id', None)
|
|
|
|
for domain_id in domain_ids:
|
|
if domains_page.is_domain_unlocked(domain_id):
|
|
# Prefer a domain that wasn't started in setup
|
|
if domain_id != setup_domain_id:
|
|
unlocked_domain_id = domain_id
|
|
break
|
|
|
|
# If all domains were started, use any unlocked domain (modal might still appear)
|
|
if not unlocked_domain_id:
|
|
for domain_id in domain_ids:
|
|
if domains_page.is_domain_unlocked(domain_id):
|
|
unlocked_domain_id = domain_id
|
|
break
|
|
|
|
if not unlocked_domain_id:
|
|
pytest.skip("No unlocked domains available")
|
|
|
|
# Start domain assessment
|
|
domains_page.click_start_domain(unlocked_domain_id)
|
|
|
|
domain_assessment_page = DomainAssessmentPage(self.driver)
|
|
# Wait for page load - this will detect if instructions modal is present
|
|
domain_assessment_page.wait_for_page_load()
|
|
|
|
# Verify instructions modal is present (it should appear for new domains)
|
|
# Note: If domain was already started, modal won't appear - that's expected behavior
|
|
modal_present = domain_assessment_page.is_instructions_modal_present()
|
|
if not modal_present:
|
|
# If modal not present, it means domain was already started - skip test
|
|
pytest.skip(f"Instructions modal not present - domain {unlocked_domain_id} may have been already started")
|
|
|
|
assert modal_present, "Instructions modal should be present when starting a new domain assessment"
|
|
|
|
def test_instructions_modal_dismiss(self):
|
|
"""Test dismissing instructions modal"""
|
|
# Navigate back to assessments page (setup already started one domain)
|
|
from pages.assessments_page import AssessmentsPage
|
|
assessments_page = AssessmentsPage(self.driver)
|
|
assessments_page.navigate()
|
|
assessments_page.wait_for_page_load()
|
|
|
|
# Get assessment IDs
|
|
assessment_ids = assessments_page.get_assessment_ids()
|
|
if not assessment_ids:
|
|
pytest.skip("No assessments available")
|
|
|
|
# Start assessment
|
|
assessments_page.click_begin_assessment(assessment_ids[0])
|
|
|
|
domains_page = DomainsPage(self.driver)
|
|
domains_page.wait_for_page_load()
|
|
|
|
domain_ids = domains_page.get_all_domain_ids()
|
|
if not domain_ids:
|
|
pytest.skip("No domains available")
|
|
|
|
# Find a domain that hasn't been started yet (different from setup's domain)
|
|
unlocked_domain_id = None
|
|
setup_domain_id = getattr(self, 'current_domain_id', None)
|
|
|
|
for domain_id in domain_ids:
|
|
if domains_page.is_domain_unlocked(domain_id):
|
|
# Prefer a domain that wasn't started in setup
|
|
if domain_id != setup_domain_id:
|
|
unlocked_domain_id = domain_id
|
|
break
|
|
|
|
# If all domains were started, use any unlocked domain
|
|
if not unlocked_domain_id:
|
|
for domain_id in domain_ids:
|
|
if domains_page.is_domain_unlocked(domain_id):
|
|
unlocked_domain_id = domain_id
|
|
break
|
|
|
|
if not unlocked_domain_id:
|
|
pytest.skip("No unlocked domains available")
|
|
|
|
# Start domain assessment
|
|
domains_page.click_start_domain(unlocked_domain_id)
|
|
|
|
domain_assessment_page = DomainAssessmentPage(self.driver)
|
|
domain_assessment_page.wait_for_page_load()
|
|
|
|
# Check if instructions modal is present
|
|
modal_present = domain_assessment_page.is_instructions_modal_present()
|
|
if not modal_present:
|
|
# If modal not present, domain was already started - skip test
|
|
pytest.skip(f"Instructions modal not present - domain {unlocked_domain_id} may have been already started")
|
|
|
|
assert modal_present, "Instructions modal should be present when starting a new domain assessment"
|
|
|
|
# Dismiss modal
|
|
domain_assessment_page.dismiss_instructions_modal()
|
|
|
|
# Wait for modal to close
|
|
RandomizedWait.wait_for_page_load('modal')
|
|
|
|
# Verify modal is dismissed
|
|
assert not domain_assessment_page.is_instructions_modal_present(), "Instructions modal should be dismissed after clicking continue"
|
|
|
|
def test_answer_single_question(self):
|
|
"""Test answering a single question"""
|
|
# Get current question ID
|
|
question_id = self.question_helper.get_question_id()
|
|
assert question_id is not None, "Should have a current question"
|
|
|
|
# Get question type
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
assert question_type != "unknown", f"Should detect question type, got: {question_type}"
|
|
|
|
# Answer the question
|
|
answer_result = self.question_helper.answer_question(question_id, question_type)
|
|
|
|
assert answer_result['answer'] is not None, "Should have answered the question"
|
|
assert answer_result['question_type'] == question_type, "Question type should match"
|
|
|
|
def test_answer_multiple_choice_question(self):
|
|
"""Test answering a multiple choice question"""
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "multiple_choice":
|
|
pytest.skip(f"Current question is not multiple choice, it's {question_type}")
|
|
|
|
# Answer with option A
|
|
option_label = self.question_helper.answer_multiple_choice(question_id, option_label="A")
|
|
assert option_label == "A", "Should have selected option A"
|
|
|
|
def test_answer_true_false_question(self):
|
|
"""Test answering a true/false question"""
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "true_false":
|
|
pytest.skip(f"Current question is not true/false, it's {question_type}")
|
|
|
|
# Answer with True
|
|
value = self.question_helper.answer_true_false(question_id, value=True)
|
|
assert value == "True", "Should have selected True"
|
|
|
|
def test_answer_rating_scale_question(self):
|
|
"""Test answering a rating scale question"""
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "rating_scale":
|
|
pytest.skip(f"Current question is not rating scale, it's {question_type}")
|
|
|
|
# Answer with any valid rating (rating scale can have labels like "Sometimes" not just numbers)
|
|
score = self.question_helper.answer_rating_scale(question_id, score=None) # Let it pick any valid option
|
|
assert score is not None and len(score) > 0, f"Should have selected a valid rating, got: {score}"
|
|
|
|
def test_answer_open_ended_question(self):
|
|
"""Test answering an open-ended question"""
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "open_ended":
|
|
pytest.skip(f"Current question is not open-ended, it's {question_type}")
|
|
|
|
# Answer with custom text
|
|
text = "This is a thoughtful response to demonstrate understanding."
|
|
entered_text = self.question_helper.answer_open_ended(question_id, text=text)
|
|
assert entered_text == text, "Should have entered the text"
|
|
|
|
def test_answer_matrix_question(self):
|
|
"""Test answering a matrix question"""
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "matrix":
|
|
pytest.skip(f"Current question is not matrix, it's {question_type}")
|
|
|
|
# Answer with row 0, column 0
|
|
row_idx, col_idx = self.question_helper.answer_matrix(question_id, row_index=0, column_index=0)
|
|
assert row_idx == 0 and col_idx == 0, "Should have selected matrix cell (0, 0)"
|
|
|
|
def test_navigate_questions(self):
|
|
"""Test navigating between questions using Next/Previous buttons"""
|
|
# Answer current question first
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
pytest.skip("No question available")
|
|
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type != "unknown":
|
|
self.question_helper.answer_question(question_id, question_type)
|
|
RandomizedWait.wait_for_question_answer(question_type)
|
|
|
|
# Check if Next button is available
|
|
if not self.domain_assessment_page.is_next_button_visible():
|
|
pytest.skip("Next button not visible - may be on last question or submit required")
|
|
|
|
# Store original question ID
|
|
original_question_id = question_id
|
|
|
|
# Click Next
|
|
self.domain_assessment_page.click_next()
|
|
RandomizedWait.wait_for_navigation('next') # Wait for question to load
|
|
|
|
# Verify we're on a new question
|
|
new_question_id = self.question_helper.get_question_id()
|
|
if not new_question_id:
|
|
pytest.fail("Could not get question ID after clicking Next")
|
|
|
|
assert new_question_id != original_question_id, f"Should be on a different question after clicking Next. Original: {original_question_id}, New: {new_question_id}"
|
|
|
|
# Click Previous
|
|
self.domain_assessment_page.click_previous()
|
|
RandomizedWait.wait_for_navigation('previous') # Wait for question to load
|
|
|
|
# Verify we're back to original question
|
|
returned_question_id = self.question_helper.get_question_id()
|
|
if not returned_question_id:
|
|
pytest.fail("Could not get question ID after clicking Previous")
|
|
|
|
assert returned_question_id == original_question_id, f"Should be back to original question after clicking Previous. Expected: {original_question_id}, Got: {returned_question_id}"
|
|
|
|
def test_answer_all_questions_in_domain(self):
|
|
"""
|
|
Test answering all questions in a domain (single domain test - Option 2)
|
|
This is the main test for single domain completion
|
|
"""
|
|
questions_answered = 0
|
|
max_questions = 100 # Safety limit
|
|
|
|
print(f"\n🎯 Starting single domain assessment test")
|
|
|
|
# Answer questions until we can submit
|
|
consecutive_failures = 0
|
|
max_consecutive_failures = 3
|
|
|
|
while questions_answered < max_questions:
|
|
# Wait a moment for question to load (randomized)
|
|
RandomizedWait.wait_for_page_load('navigation')
|
|
|
|
# Get current question
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
print(f"⚠️ Could not detect question ID, waiting...")
|
|
RandomizedWait.wait_for_error_recovery('wait')
|
|
question_id = self.question_helper.get_question_id()
|
|
if not question_id:
|
|
consecutive_failures += 1
|
|
print(f"⚠️ Still no question ID after retry (failure {consecutive_failures}/{max_consecutive_failures})")
|
|
if consecutive_failures >= max_consecutive_failures:
|
|
print(f"❌ Too many consecutive failures, breaking")
|
|
break
|
|
# Try clicking next anyway
|
|
if self.domain_assessment_page.is_next_button_visible():
|
|
try:
|
|
self.domain_assessment_page.click_next()
|
|
RandomizedWait.wait_for_navigation('next')
|
|
except:
|
|
pass
|
|
continue
|
|
|
|
print(f"🔍 Detected question ID: {question_id}")
|
|
|
|
# Get question type
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
if question_type == "unknown":
|
|
print(f"⚠️ Unknown question type for question {question_id}")
|
|
print(f" Attempting to scroll and wait...")
|
|
# Scroll to question element
|
|
try:
|
|
question_elem = self.driver.find_element(By.CSS_SELECTOR, f"[data-testid='domain_question__{question_id}']")
|
|
self.driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", question_elem)
|
|
RandomizedWait.wait_for_page_load('navigation')
|
|
question_type = self.question_helper.get_question_type(question_id)
|
|
except:
|
|
pass
|
|
|
|
if question_type == "unknown":
|
|
print(f"❌ Still unknown after scroll, skipping question {question_id}")
|
|
# Try to move to next question
|
|
if self.domain_assessment_page.is_next_button_visible():
|
|
self.domain_assessment_page.click_next()
|
|
RandomizedWait.wait_for_navigation('next')
|
|
continue
|
|
|
|
# Answer the question
|
|
try:
|
|
answer_result = self.question_helper.answer_question(question_id, question_type)
|
|
questions_answered += 1
|
|
consecutive_failures = 0 # Reset failure counter on success
|
|
|
|
# Realistic wait after answering (varies by question type)
|
|
wait_time = RandomizedWait.wait_for_question_answer(question_type)
|
|
print(f"✅ Answered question {questions_answered}: {question_type} (ID: {question_id}) [waited {wait_time:.1f}s]")
|
|
except Exception as e:
|
|
consecutive_failures += 1
|
|
print(f"❌ Error answering question {question_id}: {e} (failure {consecutive_failures}/{max_consecutive_failures})")
|
|
if consecutive_failures >= max_consecutive_failures:
|
|
print(f"❌ Too many consecutive failures answering questions, breaking")
|
|
import traceback
|
|
traceback.print_exc()
|
|
break
|
|
# Try to continue to next question
|
|
if self.domain_assessment_page.is_next_button_visible():
|
|
try:
|
|
self.domain_assessment_page.click_next()
|
|
RandomizedWait.wait_for_navigation('next')
|
|
except:
|
|
pass
|
|
continue
|
|
|
|
# Check if we can submit (all questions answered)
|
|
submit_ready = False
|
|
try:
|
|
submit_button = self.driver.find_element(*self.domain_assessment_page.SUBMIT_BUTTON)
|
|
is_enabled = submit_button.is_enabled()
|
|
is_displayed = submit_button.is_displayed()
|
|
print(f"🔍 Submit button check: enabled={is_enabled}, displayed={is_displayed}, questions_answered={questions_answered}")
|
|
if is_enabled and is_displayed:
|
|
print(f"✅ All questions answered! Ready to submit ({questions_answered} questions)")
|
|
submit_ready = True
|
|
break
|
|
except Exception as e:
|
|
print(f"⚠️ Submit button check failed: {e}")
|
|
# Submit button not found or not enabled - continue to next question
|
|
|
|
if submit_ready:
|
|
break
|
|
|
|
# Click Next if available
|
|
next_visible = self.domain_assessment_page.is_next_button_visible()
|
|
print(f"🔍 Next button visible: {next_visible}")
|
|
|
|
if next_visible:
|
|
try:
|
|
print(f"➡️ Clicking Next button...")
|
|
self.domain_assessment_page.click_next()
|
|
wait_time = RandomizedWait.wait_for_navigation('next')
|
|
print(f"✅ Moved to next question [waited {wait_time:.1f}s]")
|
|
except Exception as e:
|
|
print(f"❌ Error clicking Next: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
# Try to continue anyway - might be last question
|
|
break
|
|
else:
|
|
# No next button - check if submit is available
|
|
print(f"⚠️ Next button not visible - checking if this is the last question...")
|
|
try:
|
|
submit_button = self.driver.find_element(*self.domain_assessment_page.SUBMIT_BUTTON)
|
|
is_enabled = submit_button.is_enabled()
|
|
is_displayed = submit_button.is_displayed()
|
|
print(f"🔍 Submit button (last question): enabled={is_enabled}, displayed={is_displayed}")
|
|
if is_enabled and is_displayed:
|
|
print(f"✅ Last question answered! Ready to submit ({questions_answered} questions)")
|
|
break
|
|
else:
|
|
print(f"⚠️ No Next button and Submit not enabled. Questions answered: {questions_answered}")
|
|
# Wait a bit more for submit to become enabled (randomized)
|
|
print(f"⏳ Waiting for submit button to become enabled...")
|
|
wait_time = RandomizedWait.wait_for_error_recovery('wait')
|
|
print(f" Waited {wait_time:.1f}s")
|
|
try:
|
|
submit_button = self.driver.find_element(*self.domain_assessment_page.SUBMIT_BUTTON)
|
|
if submit_button.is_enabled() and submit_button.is_displayed():
|
|
print(f"✅ Submit button now enabled! Ready to submit ({questions_answered} questions)")
|
|
break
|
|
else:
|
|
print(f"⚠️ Submit still not enabled after wait. Proceeding anyway...")
|
|
break
|
|
except:
|
|
print(f"⚠️ Could not find submit button after wait")
|
|
break
|
|
except Exception as e:
|
|
print(f"⚠️ No Next button and Submit button not found: {e}. Questions answered: {questions_answered}")
|
|
break
|
|
|
|
# Submit the assessment
|
|
assert questions_answered > 0, f"Should have answered at least one question. Only answered: {questions_answered}"
|
|
|
|
print(f"\n📊 Assessment Summary:")
|
|
print(f" Total questions answered: {questions_answered}")
|
|
print(f" Ready to submit: Yes")
|
|
|
|
# Wait a moment for UI to update (randomized)
|
|
RandomizedWait.wait_for_page_load('modal')
|
|
|
|
# Verify submit button is enabled before clicking
|
|
try:
|
|
submit_button = self.driver.find_element(*self.domain_assessment_page.SUBMIT_BUTTON)
|
|
if not submit_button.is_enabled():
|
|
print(f"⚠️ Submit button not enabled yet, waiting...")
|
|
for i in range(5):
|
|
wait_time = RandomizedWait.wait_for_error_recovery('wait')
|
|
submit_button = self.driver.find_element(*self.domain_assessment_page.SUBMIT_BUTTON)
|
|
if submit_button.is_enabled():
|
|
print(f"✅ Submit button now enabled! [waited {wait_time:.1f}s]")
|
|
break
|
|
else:
|
|
print(f"⚠️ Submit button still not enabled, attempting to click anyway...")
|
|
except Exception as e:
|
|
print(f"⚠️ Could not verify submit button state: {e}")
|
|
|
|
print(f"\n📤 Submitting domain assessment...")
|
|
try:
|
|
self.domain_assessment_page.click_submit()
|
|
print(f"✅ Submit button clicked successfully")
|
|
except Exception as e:
|
|
print(f"❌ Error clicking submit: {e}")
|
|
raise
|
|
|
|
# Confirm submission
|
|
print(f"⏳ Waiting for submit confirmation modal...")
|
|
try:
|
|
assert self.domain_assessment_page.is_submit_modal_present(), "Submit confirmation modal should appear"
|
|
print(f"✅ Submit confirmation modal appeared")
|
|
self.domain_assessment_page.confirm_submit()
|
|
print(f"✅ Submission confirmed")
|
|
except Exception as e:
|
|
print(f"❌ Error in submission confirmation: {e}")
|
|
raise
|
|
|
|
# Wait for success modal
|
|
print(f"⏳ Waiting for success modal...")
|
|
try:
|
|
assert self.domain_assessment_page.is_success_modal_present(), "Success modal should appear after submission"
|
|
print(f"✅ Success modal appeared")
|
|
except Exception as e:
|
|
print(f"⚠️ Success modal check failed: {e}")
|
|
# Continue anyway - modal might have closed quickly
|
|
|
|
# Wait for feedback modal
|
|
print(f"⏳ Waiting for domain feedback modal...")
|
|
feedback_page = DomainFeedbackPage(self.driver)
|
|
self.driver.implicitly_wait(5) # Wait for feedback modal to appear
|
|
|
|
# Wait up to 10 seconds for feedback modal (with randomized checks)
|
|
feedback_modal_present = False
|
|
for i in range(10):
|
|
if feedback_page.is_modal_present():
|
|
feedback_modal_present = True
|
|
break
|
|
RandomizedWait.wait_for_page_load('modal')
|
|
|
|
if feedback_modal_present:
|
|
print(f"✅ Domain feedback modal appeared")
|
|
print(f"📝 Submitting domain feedback...")
|
|
try:
|
|
feedback_page.submit_feedback(
|
|
question1_yes=True,
|
|
question2_text="The assessment was clear and well-structured. All questions were relevant and the interface was user-friendly."
|
|
)
|
|
print(f"✅ Domain feedback submitted successfully")
|
|
except Exception as e:
|
|
print(f"❌ Error submitting feedback: {e}")
|
|
raise
|
|
else:
|
|
print(f"⚠️ Domain feedback modal did not appear (may not be required for this domain)")
|
|
|
|
print(f"\n🎉 Single domain assessment completed successfully!")
|
|
print(f" Questions answered: {questions_answered}")
|
|
print(f" Assessment submitted: Yes")
|
|
print(f" Feedback submitted: {'Yes' if feedback_modal_present else 'N/A'}")
|