265 lines
9.0 KiB
Python
265 lines
9.0 KiB
Python
"""
|
|
Generic Load Test - Login to Assessments Page
|
|
|
|
A transparent, world-class load testing mechanism that:
|
|
1. Logs in all users
|
|
2. Navigates to Assessments page
|
|
3. Verifies page renders fully
|
|
4. Tests scroll smoothness
|
|
5. Provides comprehensive tracking
|
|
|
|
This is a generic framework that can be extended with any test case.
|
|
Updated to use Assessments flow instead of Cognition Test.
|
|
"""
|
|
import pytest
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
import threading
|
|
from utils.load_test_base import LoadTestBase, LoadTestResult, TestStatus
|
|
from datetime import datetime
|
|
from pages.login_page import LoginPage
|
|
from pages.dashboard_page import DashboardPage
|
|
# NOTE: CognitionTestPage removed - using Assessments flow instead
|
|
# from pages.cognition_test_page import CognitionTestPage
|
|
from pages.assessments_page import AssessmentsPage
|
|
from utils.driver_manager import DriverManager
|
|
from config.config import BASE_URL
|
|
from pathlib import Path
|
|
|
|
|
|
def login_and_navigate_to_assessments(user_id: int, headless: bool = True) -> dict:
|
|
"""
|
|
Generic test function: Login and navigate to Assessments page
|
|
|
|
This is a simple, focused test that verifies:
|
|
- Login works
|
|
- Navigation works
|
|
- Page renders fully
|
|
- Page scrolls smoothly
|
|
|
|
Args:
|
|
user_id: Unique user identifier
|
|
headless: Whether to run in headless mode (default: True for load testing)
|
|
|
|
Returns:
|
|
dict: Test result with driver and steps completed
|
|
"""
|
|
driver = None
|
|
steps_completed = []
|
|
|
|
try:
|
|
# Step 1: Create WebDriver (headless configurable)
|
|
driver = DriverManager.get_driver(headless=headless)
|
|
steps_completed.append(f"User {user_id}: WebDriver created")
|
|
|
|
# Step 2: Login
|
|
login_page = LoginPage(driver)
|
|
login_page.login()
|
|
steps_completed.append(f"User {user_id}: Login successful")
|
|
|
|
# Step 3: Navigate to Dashboard
|
|
dashboard_page = DashboardPage(driver)
|
|
dashboard_page.navigate()
|
|
steps_completed.append(f"User {user_id}: Dashboard loaded")
|
|
|
|
# Step 4: Handle profile modal if present
|
|
if dashboard_page.is_profile_modal_present():
|
|
dashboard_page.handle_profile_completion_modal()
|
|
steps_completed.append(f"User {user_id}: Profile modal handled")
|
|
|
|
# Step 5: Navigate to Assessments page (replacing old Cognition Test)
|
|
from pages.student_nav_page import StudentNavPage
|
|
nav_page = StudentNavPage(driver)
|
|
nav_page.click_assessments()
|
|
steps_completed.append(f"User {user_id}: Navigated to Assessments page")
|
|
|
|
# Step 6: Verify Assessments page loaded
|
|
assessments_page = AssessmentsPage(driver)
|
|
assessments_page.wait_for_page_load()
|
|
steps_completed.append(f"User {user_id}: Assessments page loaded")
|
|
|
|
# Step 7: Verify page loaded (check URL)
|
|
assert "/assessments" in driver.current_url, "Should be on assessments page"
|
|
steps_completed.append(f"User {user_id}: Page verified (assessments page rendered)")
|
|
|
|
# Step 8: Test scroll smoothness
|
|
# Scroll to bottom
|
|
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
|
|
steps_completed.append(f"User {user_id}: Scrolled to bottom")
|
|
|
|
# Scroll to top
|
|
driver.execute_script("window.scrollTo(0, 0);")
|
|
steps_completed.append(f"User {user_id}: Scrolled to top")
|
|
|
|
# Step 9: Verify page is fully rendered
|
|
page_ready = driver.execute_script("return document.readyState === 'complete'")
|
|
if page_ready:
|
|
steps_completed.append(f"User {user_id}: Page fully rendered")
|
|
|
|
# Return result with driver (metrics will be measured by base class)
|
|
# Driver will be cleaned up after metrics are collected
|
|
return {
|
|
'driver': driver,
|
|
'steps_completed': steps_completed,
|
|
'success': True
|
|
}
|
|
|
|
except Exception as e:
|
|
error_msg = f"User {user_id}: ERROR - {type(e).__name__}: {str(e)}"
|
|
steps_completed.append(error_msg)
|
|
# Print error for debugging
|
|
print(f" ❌ {error_msg}")
|
|
if driver:
|
|
try:
|
|
DriverManager.quit_driver(driver)
|
|
except:
|
|
pass
|
|
raise
|
|
# Note: Driver cleanup happens in the base class or caller
|
|
|
|
|
|
class GenericLoadTest(LoadTestBase):
|
|
"""Generic load test executor"""
|
|
|
|
def __init__(self, test_name: str = "Login to Assessments"):
|
|
super().__init__(test_name)
|
|
self.lock = threading.Lock()
|
|
|
|
def run_load_test(
|
|
self,
|
|
num_users: int,
|
|
ramp_up_time: int = 0,
|
|
max_workers: int = None,
|
|
headless: bool = True
|
|
) -> dict:
|
|
"""
|
|
Run load test with comprehensive tracking
|
|
|
|
Args:
|
|
num_users: Number of concurrent users
|
|
ramp_up_time: Time to gradually ramp up users (seconds)
|
|
max_workers: Maximum concurrent workers
|
|
|
|
Returns:
|
|
dict: Summary and results
|
|
"""
|
|
print(f"\n{'='*80}")
|
|
print(f"🚀 STARTING LOAD TEST: {self.test_name}")
|
|
print(f"{'='*80}")
|
|
print(f"📊 Configuration:")
|
|
print(f" Users: {num_users}")
|
|
print(f" Ramp-up: {ramp_up_time}s")
|
|
print(f" Max Workers: {max_workers or 'Unlimited'}")
|
|
print(f"{'='*80}\n")
|
|
|
|
self.start_time = datetime.now()
|
|
self.results = []
|
|
|
|
delay_per_user = ramp_up_time / num_users if ramp_up_time > 0 else 0
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
futures = []
|
|
|
|
for user_id in range(1, num_users + 1):
|
|
# Ramp-up delay
|
|
if delay_per_user > 0:
|
|
import time
|
|
time.sleep(delay_per_user)
|
|
|
|
# Submit test execution with headless parameter
|
|
future = executor.submit(
|
|
self.execute_test_for_user,
|
|
user_id,
|
|
login_and_navigate_to_assessments,
|
|
headless=headless
|
|
)
|
|
futures.append((user_id, future))
|
|
|
|
if user_id % 10 == 0:
|
|
print(f" ⏳ Started {user_id}/{num_users} users...")
|
|
|
|
# Wait for all to complete and collect results
|
|
print(f"\n ⏳ Waiting for all {num_users} users to complete...\n")
|
|
completed = 0
|
|
|
|
for user_id, future in futures:
|
|
try:
|
|
result = future.result()
|
|
with self.lock:
|
|
self.results.append(result)
|
|
completed += 1
|
|
if completed % 10 == 0:
|
|
print(f" ✅ Completed {completed}/{num_users} users...")
|
|
except Exception as e:
|
|
print(f" ❌ User {user_id} failed: {str(e)}")
|
|
|
|
self.end_time = datetime.now()
|
|
|
|
# Calculate summary
|
|
summary = self.calculate_summary()
|
|
|
|
# Print summary
|
|
self.print_summary(summary)
|
|
|
|
# Save results
|
|
self.save_results(summary)
|
|
|
|
return {
|
|
'summary': summary,
|
|
'results': self.results
|
|
}
|
|
|
|
|
|
@pytest.mark.load_test
|
|
class TestGenericLoadTest:
|
|
"""Generic load tests"""
|
|
|
|
@pytest.mark.parametrize("num_users", [10, 50, 100, 200, 500])
|
|
def test_login_to_assessments_load(self, num_users):
|
|
"""
|
|
Load test: Login and navigate to Assessments page
|
|
|
|
Tests:
|
|
- Login functionality under load
|
|
- Navigation under load
|
|
- Page rendering under load
|
|
- Scroll smoothness under load
|
|
|
|
Args:
|
|
num_users: Number of concurrent users
|
|
"""
|
|
load_test = GenericLoadTest("Login to Assessments")
|
|
result = load_test.run_load_test(
|
|
num_users=num_users,
|
|
ramp_up_time=0,
|
|
max_workers=num_users
|
|
)
|
|
|
|
summary = result['summary']
|
|
|
|
# Assertions
|
|
assert summary.success_rate >= 80, \
|
|
f"Success rate {summary.success_rate:.2f}% is below 80%"
|
|
assert summary.scroll_smooth_rate >= 90, \
|
|
f"Scroll smooth rate {summary.scroll_smooth_rate:.2f}% is below 90%"
|
|
assert summary.avg_page_load_time < 10, \
|
|
f"Average page load time {summary.avg_page_load_time:.2f}s exceeds 10s"
|
|
|
|
|
|
# Standalone execution
|
|
def run_generic_load_test(num_users: int = 100):
|
|
"""
|
|
Standalone function to run generic load test
|
|
|
|
Args:
|
|
num_users: Number of concurrent users
|
|
"""
|
|
load_test = GenericLoadTest("Login to Assessments")
|
|
return load_test.run_load_test(num_users=num_users)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import sys
|
|
num_users = int(sys.argv[1]) if len(sys.argv) > 1 else 100
|
|
run_generic_load_test(num_users=num_users)
|
|
|