176 lines
5.3 KiB
Python
176 lines
5.3 KiB
Python
"""
|
|
Pytest configuration and fixtures for Cognitive Prism Automation Tests
|
|
"""
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
# Add project root to Python path
|
|
project_root = Path(__file__).parent.parent
|
|
if str(project_root) not in sys.path:
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
import pytest
|
|
from selenium.webdriver import Chrome, Firefox, Edge
|
|
from utils.driver_manager import DriverManager
|
|
from pages.login_page import LoginPage
|
|
from config.config import SCREENSHOT_ON_FAILURE, SCREENSHOT_DIR
|
|
import os
|
|
|
|
|
|
def pytest_configure(config):
|
|
"""Register custom pytest markers"""
|
|
config.addinivalue_line(
|
|
"markers", "authentication: marks tests as authentication tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "login: marks tests as login tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "password_reset: marks tests as password reset tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "logout: marks tests as logout tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "integration: marks tests as integration tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "component: marks tests as component-level isolated tests (optional, run first)"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "profile: marks tests as profile related tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "assessment: marks tests as assessment flow tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "domain_assessment: marks tests as domain assessment tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "e2e: marks tests as end-to-end tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "complete_flow: marks tests as complete flow tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "final_feedback: marks tests as final feedback tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "domain_feedback: marks tests as domain feedback tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "domains_page: marks tests as domains page tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "assessments_page: marks tests as assessments page tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "load_test: marks tests as load tests"
|
|
)
|
|
config.addinivalue_line(
|
|
"markers", "slow: marks tests as slow tests (may take longer)"
|
|
)
|
|
|
|
|
|
def pytest_collection_modifyitems(config, items):
|
|
"""
|
|
Modify test collection to ensure proper execution order.
|
|
|
|
Order:
|
|
1. Component tests (optional)
|
|
2. Authentication tests
|
|
3. Profile tests
|
|
4. Assessment tests
|
|
|
|
This ensures dependencies are met before running tests.
|
|
"""
|
|
# Define execution order
|
|
order_map = {
|
|
"component": 0, # Run first (optional)
|
|
"authentication": 1, # Run second
|
|
"profile": 2, # Run third
|
|
"assessment": 3, # Run last
|
|
}
|
|
|
|
# Sort tests by marker priority
|
|
def get_test_priority(item):
|
|
"""Get priority based on markers"""
|
|
for marker in item.iter_markers():
|
|
marker_name = marker.name
|
|
if marker_name in order_map:
|
|
return order_map[marker_name]
|
|
# Default priority for unmarked tests
|
|
return 999
|
|
|
|
# Sort items by priority
|
|
items[:] = sorted(items, key=get_test_priority)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def driver():
|
|
"""
|
|
WebDriver fixture - creates and quits driver for each test
|
|
|
|
Yields:
|
|
WebDriver: WebDriver instance
|
|
"""
|
|
driver = DriverManager.get_driver()
|
|
yield driver
|
|
DriverManager.quit_driver(driver)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def logged_in_driver(driver):
|
|
"""
|
|
Fixture that provides a logged-in driver
|
|
|
|
Args:
|
|
driver: WebDriver fixture
|
|
|
|
Yields:
|
|
WebDriver: Logged-in WebDriver instance
|
|
"""
|
|
login_page = LoginPage(driver)
|
|
login_page.login()
|
|
yield driver
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
|
def pytest_runtest_makereport(item, call):
|
|
"""
|
|
Hook to capture screenshot on test failure
|
|
|
|
Args:
|
|
item: Test item
|
|
call: Test call
|
|
"""
|
|
outcome = yield
|
|
rep = outcome.get_result()
|
|
|
|
# Only take screenshot on failure
|
|
if rep.when == "call" and rep.failed and SCREENSHOT_ON_FAILURE:
|
|
# Get driver from fixture if available
|
|
if "driver" in item.funcargs:
|
|
driver = item.funcargs["driver"]
|
|
try:
|
|
# Ensure screenshot directory exists
|
|
SCREENSHOT_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Create screenshot filename
|
|
screenshot_name = f"{item.nodeid.replace('::', '_').replace('/', '_')}.png"
|
|
screenshot_path = SCREENSHOT_DIR / screenshot_name
|
|
|
|
# Take screenshot
|
|
driver.save_screenshot(str(screenshot_path))
|
|
print(f"\nScreenshot saved: {screenshot_path}")
|
|
|
|
# Verify screenshot was created
|
|
if screenshot_path.exists():
|
|
print(f"Screenshot verified: {screenshot_path.stat().st_size} bytes")
|
|
else:
|
|
print(f"Warning: Screenshot file not found at {screenshot_path}")
|
|
except Exception as e:
|
|
print(f"Failed to take screenshot: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|