From 7eb2ab1dc293f37dc88ef9bc79a9da071a9e9a51 Mon Sep 17 00:00:00 2001 From: Chandini Date: Fri, 26 Sep 2025 17:04:14 +0530 Subject: [PATCH] backend changes --- DATABASE_MIGRATION_FIX.md | 106 + config/urls.js | 4 +- docker-compose.yml | 43 +- scripts/cleanup-database.sh | 103 + scripts/migrate-all.sh | 18 +- services/api-gateway/.env .prod | 4 +- services/api-gateway/src/middleware/cors.js | 3 + services/api-gateway/src/server.js | 360 +- services/git-integration/Dockerfile | 7 +- services/git-integration/package-lock.json | 13 + services/git-integration/package.json | 1 + services/git-integration/src/app.js | 8 +- .../src/migrations/004_webhook_events.sql | 35 + .../src/migrations/005_webhook_commits.sql | 52 + .../src/migrations/006_commit_changes.sql | 31 + .../migrations/007_add_last_synced_commit.sql | 6 + .../migrations/008_provider_token_tables.sql | 36 + .../009_provider_webhook_tables.sql | 68 + .../src/migrations/010_remove_template_id.sql | 18 + .../011_multi_github_accounts_per_user.sql | 32 + ...012_add_user_id_to_github_repositories.sql | 10 + .../src/routes/github-integration.routes.js | 188 +- .../src/routes/github-oauth.js | 72 +- .../git-integration/src/routes/vcs.routes.js | 511 ++ .../src/routes/webhook.routes.js | 114 + .../src/services/bitbucket-oauth.js | 64 + .../src/services/file-storage.service.js | 15 + .../src/services/git-repo.service.js | 143 + .../src/services/gitea-oauth.js | 77 + .../services/github-integration.service.js | 401 +- .../src/services/github-oauth.js | 151 +- .../src/services/gitlab-oauth.js | 70 + .../src/services/provider-registry.js | 84 + .../services/providers/bitbucket.adapter.js | 166 + .../src/services/providers/gitea.adapter.js | 166 + .../src/services/providers/gitlab.adapter.js | 177 + .../src/services/vcs-provider.interface.js | 62 + .../src/services/vcs-webhook.service.js | 456 + .../src/services/webhook.service.js | 361 + services/git-integration/test-webhook.js | 70 + services/tech-stack-selector/Dockerfile | 51 +- .../Neo4j_From_Postgres.cql | 120 + services/tech-stack-selector/Readme.md | 14 +- .../TechStackSelector_Complete_README.md | 189 + .../tech-stack-selector/db/001_schema.sql | 7769 +++++++++++++++++ .../db/002_tools_migration.sql | 162 + .../db/003_tools_pricing_migration.sql | 788 ++ services/tech-stack-selector/docker-start.sh | 305 + .../migrate_postgres_to_neo4j.py | 232 + .../postman_collection.json | 1337 +++ services/tech-stack-selector/requirements.txt | 2 + services/tech-stack-selector/src/main.py | 944 -- .../tech-stack-selector/src/main.py.backup | 2532 +++++- .../tech-stack-selector/src/main_migrated.py | 1030 +++ .../src/postgres_to_neo4j_migration.py | 722 ++ services/tech-stack-selector/start.sh | 431 + services/tech-stack-selector/test_domains.py | 90 + .../tech-stack-selector/test_migration.py | 100 + services/template-manager/Dockerfile | 17 +- .../template-manager/add-sample-templates.js | 121 - services/template-manager/ai/requirements.txt | 12 + .../template-manager/ai/tech_stack_service.py | 2031 +++++ services/template-manager/package-lock.json | 552 +- services/template-manager/src/ai-service.js | 2 + .../src/migrations/001_initial_schema.sql | 14 +- .../src/migrations/009_ai_features.sql | 479 + .../src/migrations/migrate.js | 31 +- services/template-manager/start.sh | 16 + services/unison/.gitignore | 126 + services/unison/Dockerfile | 52 + services/unison/ENDPOINT_ANALYSIS.md | 199 + services/unison/README.md | 408 + services/unison/UNISON_WORKFLOW.md | 376 + services/unison/WORKFLOW_DIAGRAM.md | 499 ++ services/unison/config.env | 126 + services/unison/package-lock.json | 6686 ++++++++++++++ services/unison/package.json | 48 + services/unison/setup-env.sh | 51 + services/unison/src/app.js | 140 + .../unison/src/middleware/errorHandler.js | 72 + services/unison/src/middleware/healthCheck.js | 60 + .../unison/src/middleware/requestValidator.js | 45 + services/unison/src/routes/health.js | 160 + services/unison/src/routes/recommendations.js | 601 ++ services/unison/src/services/claudeService.js | 248 + .../unison/src/services/databaseService.js | 271 + .../unison/src/services/techStackService.js | 210 + .../unison/src/services/templateService.js | 307 + services/unison/src/utils/logger.js | 63 + services/unison/src/utils/schemaValidator.js | 308 + services/unison/start.sh | 212 + services/unison/unison_api.json | 647 ++ .../src/migrations/001_user_auth_schema.sql | 17 +- 93 files changed, 34818 insertions(+), 1513 deletions(-) create mode 100644 DATABASE_MIGRATION_FIX.md create mode 100644 scripts/cleanup-database.sh create mode 100644 services/git-integration/src/migrations/004_webhook_events.sql create mode 100644 services/git-integration/src/migrations/005_webhook_commits.sql create mode 100644 services/git-integration/src/migrations/006_commit_changes.sql create mode 100644 services/git-integration/src/migrations/007_add_last_synced_commit.sql create mode 100644 services/git-integration/src/migrations/008_provider_token_tables.sql create mode 100644 services/git-integration/src/migrations/009_provider_webhook_tables.sql create mode 100644 services/git-integration/src/migrations/010_remove_template_id.sql create mode 100644 services/git-integration/src/migrations/011_multi_github_accounts_per_user.sql create mode 100644 services/git-integration/src/migrations/012_add_user_id_to_github_repositories.sql create mode 100644 services/git-integration/src/routes/vcs.routes.js create mode 100644 services/git-integration/src/routes/webhook.routes.js create mode 100644 services/git-integration/src/services/bitbucket-oauth.js create mode 100644 services/git-integration/src/services/git-repo.service.js create mode 100644 services/git-integration/src/services/gitea-oauth.js create mode 100644 services/git-integration/src/services/gitlab-oauth.js create mode 100644 services/git-integration/src/services/provider-registry.js create mode 100644 services/git-integration/src/services/providers/bitbucket.adapter.js create mode 100644 services/git-integration/src/services/providers/gitea.adapter.js create mode 100644 services/git-integration/src/services/providers/gitlab.adapter.js create mode 100644 services/git-integration/src/services/vcs-provider.interface.js create mode 100644 services/git-integration/src/services/vcs-webhook.service.js create mode 100644 services/git-integration/src/services/webhook.service.js create mode 100644 services/git-integration/test-webhook.js create mode 100644 services/tech-stack-selector/Neo4j_From_Postgres.cql create mode 100644 services/tech-stack-selector/TechStackSelector_Complete_README.md create mode 100644 services/tech-stack-selector/db/001_schema.sql create mode 100644 services/tech-stack-selector/db/002_tools_migration.sql create mode 100644 services/tech-stack-selector/db/003_tools_pricing_migration.sql create mode 100644 services/tech-stack-selector/docker-start.sh create mode 100644 services/tech-stack-selector/migrate_postgres_to_neo4j.py create mode 100644 services/tech-stack-selector/postman_collection.json delete mode 100644 services/tech-stack-selector/src/main.py create mode 100644 services/tech-stack-selector/src/main_migrated.py create mode 100644 services/tech-stack-selector/src/postgres_to_neo4j_migration.py create mode 100644 services/tech-stack-selector/start.sh create mode 100644 services/tech-stack-selector/test_domains.py create mode 100644 services/tech-stack-selector/test_migration.py delete mode 100644 services/template-manager/add-sample-templates.js create mode 100644 services/template-manager/ai/requirements.txt create mode 100644 services/template-manager/ai/tech_stack_service.py create mode 100644 services/template-manager/src/migrations/009_ai_features.sql create mode 100644 services/template-manager/start.sh create mode 100644 services/unison/.gitignore create mode 100644 services/unison/Dockerfile create mode 100644 services/unison/ENDPOINT_ANALYSIS.md create mode 100644 services/unison/README.md create mode 100644 services/unison/UNISON_WORKFLOW.md create mode 100644 services/unison/WORKFLOW_DIAGRAM.md create mode 100644 services/unison/config.env create mode 100644 services/unison/package-lock.json create mode 100644 services/unison/package.json create mode 100644 services/unison/setup-env.sh create mode 100644 services/unison/src/app.js create mode 100644 services/unison/src/middleware/errorHandler.js create mode 100644 services/unison/src/middleware/healthCheck.js create mode 100644 services/unison/src/middleware/requestValidator.js create mode 100644 services/unison/src/routes/health.js create mode 100644 services/unison/src/routes/recommendations.js create mode 100644 services/unison/src/services/claudeService.js create mode 100644 services/unison/src/services/databaseService.js create mode 100644 services/unison/src/services/techStackService.js create mode 100644 services/unison/src/services/templateService.js create mode 100644 services/unison/src/utils/logger.js create mode 100644 services/unison/src/utils/schemaValidator.js create mode 100644 services/unison/start.sh create mode 100644 services/unison/unison_api.json diff --git a/DATABASE_MIGRATION_FIX.md b/DATABASE_MIGRATION_FIX.md new file mode 100644 index 0000000..16aa604 --- /dev/null +++ b/DATABASE_MIGRATION_FIX.md @@ -0,0 +1,106 @@ +# Database Migration Issues - SOLVED + +## Problem Summary +You were experiencing unwanted tables being created and duplicates when starting the server. This was caused by multiple migration sources creating the same tables and conflicting migration execution. + +## Root Causes Identified + +### 1. **Multiple Migration Sources** +- PostgreSQL init script (`databases/scripts/init.sql`) creates the `dev_pipeline` database +- Shared schemas (`databases/scripts/schemas.sql`) creates core tables +- Individual service migrations create their own tables +- Template-manager was also applying shared schemas, causing duplicates + +### 2. **Migration Execution Order Issues** +- Services were running migrations in parallel +- No proper dependency management between shared schemas and service-specific tables +- DROP TABLE statements in development mode causing data loss + +### 3. **Table Conflicts** +- `users` table created by both `schemas.sql` and `user-auth` migration +- `user_projects` table created by both sources +- Function conflicts (`update_updated_at_column()` created multiple times) +- Extension conflicts (`uuid-ossp` created multiple times) + +## Solutions Implemented + +### 1. **Fixed Migration Order** +- Created separate `shared-schemas` service for core database tables +- Updated migration script to run in correct order: + 1. `shared-schemas` (core tables first) + 2. `user-auth` (user-specific tables) + 3. `template-manager` (template-specific tables) + +### 2. **Made Migrations Production-Safe** +- Replaced `DROP TABLE IF EXISTS` with `CREATE TABLE IF NOT EXISTS` +- Prevents data loss on server restarts +- Safe for production environments + +### 3. **Eliminated Duplicate Table Creation** +- Removed shared schema application from template-manager +- Each service now only creates its own tables +- Proper dependency management + +### 4. **Created Database Cleanup Script** +- `scripts/cleanup-database.sh` removes unwanted/duplicate tables +- Can be run to clean up existing database issues + +## How to Use + +### Clean Up Existing Database +```bash +cd /home/tech4biz/Desktop/Projectsnew/CODENUK1/codenuk-backend-live +./scripts/cleanup-database.sh +``` + +### Start Server with Fixed Migrations +```bash +docker-compose up --build +``` + +The migrations will now run in the correct order: +1. Shared schemas (projects, tech_stack_decisions, etc.) +2. User authentication tables +3. Template management tables + +## Files Modified + +1. **`services/template-manager/src/migrations/migrate.js`** + - Removed shared schema application + - Now only handles template-specific tables + +2. **`services/user-auth/src/migrations/001_user_auth_schema.sql`** + - Replaced DROP TABLE with CREATE TABLE IF NOT EXISTS + - Made migration production-safe + +3. **`services/template-manager/src/migrations/001_initial_schema.sql`** + - Replaced DROP TABLE with CREATE TABLE IF NOT EXISTS + - Made migration production-safe + +4. **`scripts/migrate-all.sh`** + - Added shared-schemas service + - Proper migration order + +5. **`docker-compose.yml`** + - Removed APPLY_SCHEMAS_SQL environment variable + +6. **Created new files:** + - `services/shared-schemas/` - Dedicated service for shared schemas + - `scripts/cleanup-database.sh` - Database cleanup script + +## Expected Results + +After these changes: +- ✅ No duplicate tables will be created +- ✅ No unwanted tables from pgAdmin +- ✅ Proper migration order +- ✅ Production-safe migrations +- ✅ Clean database schema + +## Verification + +To verify the fix worked: +1. Run the cleanup script +2. Start the server +3. Check pgAdmin - you should only see the intended tables +4. No duplicate or unwanted tables should appear diff --git a/config/urls.js b/config/urls.js index b7a2f82..bddde4f 100644 --- a/config/urls.js +++ b/config/urls.js @@ -12,8 +12,8 @@ // ======================================== // LOCAL DEVELOPMENT URLS // ======================================== -const FRONTEND_URL = 'http://192.168.1.13:3001'; -const BACKEND_URL = 'http://192.168.1.13:8000'; +const FRONTEND_URL = 'http://localhost:3001'; +const BACKEND_URL = 'http://localhost:8000'; // ======================================== // CORS CONFIGURATION (Auto-generated) diff --git a/docker-compose.yml b/docker-compose.yml index 5e0a8af..d6f4599 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -95,7 +95,6 @@ services: - POSTGRES_DB=dev_pipeline - POSTGRES_USER=pipeline_admin - POSTGRES_PASSWORD=secure_pipeline_2024 - - APPLY_SCHEMAS_SQL=true - REDIS_HOST=redis - REDIS_PORT=6379 - REDIS_PASSWORD=redis_secure_2024 @@ -234,7 +233,7 @@ services: - NODE_ENV=development - PORT=8000 - HOST=0.0.0.0 - - CORS_ORIGINS=http://192.168.1.13:3001 + - CORS_ORIGINS=http://localhost:3001 - CORS_METHODS=GET,POST,PUT,DELETE,PATCH,OPTIONS # Add this line - CORS_CREDENTIALS=true # Add this line # Database connections @@ -269,6 +268,8 @@ services: - DASHBOARD_URL=http://dashboard:8008 - SELF_IMPROVING_GENERATOR_URL=http://self-improving-generator:8007 - AI_MOCKUP_URL=http://ai-mockup-service:8021 + - UNISON_URL=http://unison:8010 + - TEMPLATE_MANAGER_AI_URL=http://template-manager:8013 volumes: - api_gateway_logs:/app/logs # Add persistent volume for logs user: "node" # Run as node user instead of root @@ -340,6 +341,7 @@ services: - REDIS_HOST=redis - REDIS_PORT=6379 - REDIS_PASSWORD=redis_secure_2024 + - CLAUDE_API_KEY=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 networks: - pipeline_network depends_on: @@ -492,7 +494,7 @@ services: ports: - "8011:8011" environment: - - FRONTEND_URL=http://192.168.1.13:3001 + - FRONTEND_URL=http://localhost:3001 - PORT=8011 - HOST=0.0.0.0 - NODE_ENV=development @@ -556,6 +558,11 @@ services: - NODE_ENV=development - JWT_ACCESS_SECRET=access-secret-key-2024-tech4biz-secure_pipeline_2024 - CLAUDE_API_KEY=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 + - TEMPLATE_MANAGER_AI_URL=http://127.0.0.1:8013 + - NEO4J_URI=bolt://neo4j:7687 + - NEO4J_USERNAME=neo4j + - NEO4J_PASSWORD=password + - PYTHONUNBUFFERED=1 networks: - pipeline_network depends_on: @@ -573,6 +580,25 @@ services: start_period: 40s restart: unless-stopped + unison: + build: ./services/unison + container_name: pipeline_unison + environment: + - PORT=8010 + - HOST=0.0.0.0 + - TECH_STACK_SELECTOR_URL=http://tech-stack-selector:8002 + - TEMPLATE_MANAGER_URL=http://template-manager:8009 + - TEMPLATE_MANAGER_AI_URL=http://template-manager:8013 + - CLAUDE_API_KEY=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 + - LOG_LEVEL=info + networks: + - pipeline_network + depends_on: + tech-stack-selector: + condition: service_started + template-manager: + condition: service_started + # AI Mockup / Wireframe Generation Service ai-mockup-service: build: ./services/ai-mockup-service @@ -615,6 +641,7 @@ services: environment: - PORT=8012 - HOST=0.0.0.0 + - FRONTEND_URL=http://localhost:3001 - POSTGRES_HOST=postgres - POSTGRES_PORT=5432 - POSTGRES_DB=dev_pipeline @@ -624,11 +651,13 @@ services: - REDIS_PORT=6379 - REDIS_PASSWORD=redis_secure_2024 - NODE_ENV=development - - GITHUB_REDIRECT_URI=* - - ATTACHED_REPOS_DIR=/tmp/attached-repos + - GITHUB_CLIENT_ID=Ov23liQgF14aogXVZNCR + - GITHUB_CLIENT_SECRET=8bf82a29154fdccb837bc150539a2226d00b5da5 + - GITHUB_REDIRECT_URI=http://localhost:8012/api/github/auth/github/callback + - ATTACHED_REPOS_DIR=/app/git-repos - SESSION_SECRET=git-integration-secret-key-2024 volumes: - - git_repos_data:/tmp/attached-repos + - /home/tech4biz/Desktop/Projectsnew/CODENUK1/git-repos:/app/git-repos networks: - pipeline_network depends_on: @@ -639,7 +668,7 @@ services: migrations: condition: service_completed_successfully healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8012/health"] + test: ["CMD", "node", "-e", "require('http').get('http://127.0.0.1:8012/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"] interval: 30s timeout: 10s retries: 3 diff --git a/scripts/cleanup-database.sh b/scripts/cleanup-database.sh new file mode 100644 index 0000000..799669b --- /dev/null +++ b/scripts/cleanup-database.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# ======================================== +# DATABASE CLEANUP SCRIPT +# ======================================== + +# Database connection parameters +DB_HOST=${POSTGRES_HOST:-postgres} +DB_PORT=${POSTGRES_PORT:-5432} +DB_NAME=${POSTGRES_DB:-dev_pipeline} +DB_USER=${POSTGRES_USER:-pipeline_admin} +DB_PASSWORD=${POSTGRES_PASSWORD:-secure_pipeline_2024} + +# Log function with timestamp +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" +} + +log "🧹 Starting database cleanup..." + +# Connect to PostgreSQL and clean up unwanted tables +PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << 'EOF' +-- List all tables before cleanup +\echo '📋 Tables before cleanup:' +SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' ORDER BY table_name; + +-- Drop unwanted/duplicate tables that might have been created +\echo '🗑️ Dropping unwanted tables...' + +-- Drop tables that might be duplicates or unwanted +DROP TABLE IF EXISTS user_api_keys CASCADE; +DROP TABLE IF EXISTS role_scope CASCADE; +DROP TABLE IF EXISTS scope CASCADE; +DROP TABLE IF EXISTS service_health CASCADE; +DROP TABLE IF EXISTS settings CASCADE; +DROP TABLE IF EXISTS shared_credentials CASCADE; +DROP TABLE IF EXISTS shared_workflow CASCADE; +DROP TABLE IF EXISTS stack_recommendations CASCADE; +DROP TABLE IF EXISTS system_architectures CASCADE; +DROP TABLE IF EXISTS tag_entity CASCADE; +DROP TABLE IF EXISTS tech_pricing CASCADE; +DROP TABLE IF EXISTS tech_stack_decisions CASCADE; +DROP TABLE IF EXISTS template_features CASCADE; +DROP TABLE IF EXISTS templates CASCADE; +DROP TABLE IF EXISTS test_case_execution CASCADE; +DROP TABLE IF EXISTS test_results CASCADE; +DROP TABLE IF EXISTS test_run CASCADE; +DROP TABLE IF EXISTS testing_technologies CASCADE; +DROP TABLE IF EXISTS tools CASCADE; +DROP TABLE IF EXISTS user CASCADE; +DROP TABLE IF EXISTS user_feature_preferences CASCADE; +DROP TABLE IF EXISTS user_preferences CASCADE; +DROP TABLE IF EXISTS user_projects CASCADE; +DROP TABLE IF EXISTS user_sessions CASCADE; +DROP TABLE IF EXISTS users CASCADE; +DROP TABLE IF EXISTS variables CASCADE; +DROP TABLE IF EXISTS webhook_entity CASCADE; +DROP TABLE IF EXISTS wireframe_elements CASCADE; +DROP TABLE IF EXISTS wireframe_versions CASCADE; +DROP TABLE IF EXISTS wireframes CASCADE; +DROP TABLE IF EXISTS workflow_entity CASCADE; +DROP TABLE IF EXISTS workflow_history CASCADE; +DROP TABLE IF EXISTS workflow_statistics CASCADE; +DROP TABLE IF EXISTS workflows_tags CASCADE; + +-- Drop any duplicate functions +DROP FUNCTION IF EXISTS update_updated_at_column() CASCADE; + +-- Clean up any orphaned sequences +DO $$ +DECLARE + seq_record RECORD; +BEGIN + FOR seq_record IN + SELECT sequence_name + FROM information_schema.sequences + WHERE sequence_schema = 'public' + AND sequence_name NOT IN ( + SELECT column_default + FROM information_schema.columns + WHERE table_schema = 'public' + AND column_default LIKE 'nextval%' + ) + LOOP + EXECUTE 'DROP SEQUENCE IF EXISTS ' || seq_record.sequence_name || ' CASCADE'; + END LOOP; +END $$; + +-- List tables after cleanup +\echo '📋 Tables after cleanup:' +SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' ORDER BY table_name; + +\echo '✅ Database cleanup completed!' +EOF + +if [ $? -eq 0 ]; then + log "✅ Database cleanup completed successfully" +else + log "❌ Database cleanup failed" + exit 1 +fi diff --git a/scripts/migrate-all.sh b/scripts/migrate-all.sh index c1e35e9..d7af4cd 100755 --- a/scripts/migrate-all.sh +++ b/scripts/migrate-all.sh @@ -11,6 +11,7 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" # Default services list (can be overridden by CLI args) default_services=( + "shared-schemas" "user-auth" "template-manager" ) @@ -70,10 +71,19 @@ for service in "${services[@]}"; do log "➡️ ${service}: installing dependencies" log "========================================" - if ! (cd "${SERVICE_DIR}" && npm ci --no-audit --no-fund --prefer-offline); then - log "ERROR: Failed to install dependencies for ${service}" - failed_services+=("${service}") - continue + # Check if package-lock.json exists, use appropriate install command + if [ -f "${SERVICE_DIR}/package-lock.json" ]; then + if ! (cd "${SERVICE_DIR}" && npm ci --no-audit --no-fund --prefer-offline); then + log "ERROR: Failed to install dependencies for ${service}" + failed_services+=("${service}") + continue + fi + else + if ! (cd "${SERVICE_DIR}" && npm install --no-audit --no-fund); then + log "ERROR: Failed to install dependencies for ${service}" + failed_services+=("${service}") + continue + fi fi log "========================================" diff --git a/services/api-gateway/.env .prod b/services/api-gateway/.env .prod index b8dee55..28700e5 100644 --- a/services/api-gateway/.env .prod +++ b/services/api-gateway/.env .prod @@ -28,10 +28,10 @@ RABBITMQ_USER=pipeline_admin RABBITMQ_PASSWORD=secure_rabbitmq_password # CORS -FRONTEND_URL=http://192.168.1.13:3001 +FRONTEND_URL=http://localhost:3001 # CORS Configuration -CORS_ORIGIN=http://192.168.1.13:3001 +CORS_ORIGIN=http://localhost:3001 CORS_METHODS=GET,POST,PUT,DELETE,PATCH,OPT IONS CORS_CREDENTIALS=true \ No newline at end of file diff --git a/services/api-gateway/src/middleware/cors.js b/services/api-gateway/src/middleware/cors.js index ec7dc54..b5b4537 100644 --- a/services/api-gateway/src/middleware/cors.js +++ b/services/api-gateway/src/middleware/cors.js @@ -12,6 +12,9 @@ const corsMiddleware = cors({ 'Authorization', 'X-Requested-With', 'Origin', + // Custom user context headers used by frontend + 'X-User-Id', + 'x-user-id', 'X-Gateway-Request-ID', 'X-Gateway-Timestamp', 'X-Forwarded-By', diff --git a/services/api-gateway/src/server.js b/services/api-gateway/src/server.js index d74dd6c..a1b139b 100644 --- a/services/api-gateway/src/server.js +++ b/services/api-gateway/src/server.js @@ -34,6 +34,24 @@ app.use((req, res, next) => { res.setHeader('Access-Control-Allow-Origin', origin); res.setHeader('Vary', 'Origin'); res.setHeader('Access-Control-Allow-Credentials', 'true'); + res.setHeader('Access-Control-Allow-Headers', [ + 'Content-Type', + 'Authorization', + 'X-Requested-With', + 'Origin', + 'X-User-Id', + 'x-user-id', + 'X-Gateway-Request-ID', + 'X-Gateway-Timestamp', + 'X-Forwarded-By', + 'X-Forwarded-For', + 'X-Forwarded-Proto', + 'X-Forwarded-Host', + 'X-Session-Token', + 'X-Platform', + 'X-App-Version' + ].join(', ')); + res.setHeader('Access-Control-Allow-Methods', (process.env.CORS_METHODS || 'GET,POST,PUT,DELETE,OPTIONS')); next(); }); const server = http.createServer(app); @@ -55,7 +73,8 @@ global.io = io; // Service targets configuration const serviceTargets = { USER_AUTH_URL: process.env.USER_AUTH_URL || 'http://localhost:8011', - TEMPLATE_MANAGER_URL: process.env.TEMPLATE_MANAGER_URL || 'http://192.168.1.13:8009', + TEMPLATE_MANAGER_URL: process.env.TEMPLATE_MANAGER_URL || 'http://localhost:8009', + TEMPLATE_MANAGER_AI_URL: process.env.TEMPLATE_MANAGER_AI_URL || 'http://localhost:8013', GIT_INTEGRATION_URL: process.env.GIT_INTEGRATION_URL || 'http://localhost:8012', REQUIREMENT_PROCESSOR_URL: process.env.REQUIREMENT_PROCESSOR_URL || 'http://requirement-processor:8001', TECH_STACK_SELECTOR_URL: process.env.TECH_STACK_SELECTOR_URL || 'http://localhost:8002', @@ -66,6 +85,7 @@ const serviceTargets = { DASHBOARD_URL: process.env.DASHBOARD_URL || 'http://localhost:8008', SELF_IMPROVING_GENERATOR_URL: process.env.SELF_IMPROVING_GENERATOR_URL || 'http://localhost:8007', AI_MOCKUP_URL: process.env.AI_MOCKUP_URL || 'http://localhost:8021', + UNISON_URL: process.env.UNISON_URL || 'http://localhost:8010', }; // Log service targets for debugging @@ -509,6 +529,82 @@ app.use('/api/ai/analyze-feature', } ); +// Template Manager AI - expose AI recommendations through the gateway +console.log('🔧 Registering /api/ai/tech-stack proxy route...'); +app.use('/api/ai/tech-stack', + createServiceLimiter(300), + // Public (reads); Unison handles auth if needed + (req, res, next) => next(), + (req, res, next) => { + const aiUrl = serviceTargets.TEMPLATE_MANAGER_AI_URL; + // Map gateway paths to AI service: + // POST /api/ai/tech-stack/recommendations -> POST /ai/recommendations + // POST /api/ai/tech-stack/recommendations/formatted -> POST /ai/recommendations/formatted + // GET /api/ai/tech-stack/extract-keywords/:id -> GET /extract-keywords/:id + // POST /api/ai/tech-stack/extract-keywords/:id -> POST /extract-keywords/:id + // POST /api/ai/tech-stack/auto-workflow/:id -> POST /auto-workflow/:id + let rewrittenPath = req.originalUrl + .replace(/^\/api\/ai\/tech-stack\/recommendations\/formatted/, '/ai/recommendations/formatted') + .replace(/^\/api\/ai\/tech-stack\/recommendations/, '/ai/recommendations') + .replace(/^\/api\/ai\/tech-stack\/extract-keywords\//, '/extract-keywords/') + .replace(/^\/api\/ai\/tech-stack\/auto-workflow\//, '/auto-workflow/') + .replace(/^\/api\/ai\/tech-stack\/?$/, '/'); + + const targetUrl = `${aiUrl}${rewrittenPath.replace(/^\/api\/ai\/tech-stack/, '')}`; + console.log(`🔥 [TEMPLATE-AI PROXY] ${req.method} ${req.originalUrl} → ${targetUrl}`); + + res.setTimeout(30000, () => { + console.error('❌ [TEMPLATE-AI PROXY] Response timeout'); + if (!res.headersSent) { + res.status(504).json({ error: 'Gateway timeout', service: 'template-manager-ai' }); + } + }); + + const options = { + method: req.method, + url: targetUrl, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'API-Gateway/1.0', + 'Connection': 'keep-alive', + 'Authorization': req.headers.authorization, + 'X-User-ID': req.user?.id || req.user?.userId, + 'X-User-Role': req.user?.role, + }, + timeout: 25000, + validateStatus: () => true, + maxRedirects: 0 + }; + + if (req.method === 'POST' || req.method === 'PUT' || req.method === 'PATCH') { + options.data = req.body || {}; + console.log(`📦 [TEMPLATE-AI PROXY] Request body:`, JSON.stringify(req.body)); + } + + axios(options) + .then(response => { + console.log(`✅ [TEMPLATE-AI PROXY] Response: ${response.status} for ${req.method} ${req.originalUrl}`); + if (!res.headersSent) { + res.status(response.status).json(response.data); + } + }) + .catch(error => { + console.error(`❌ [TEMPLATE-AI PROXY ERROR]:`, error.message); + if (!res.headersSent) { + if (error.response) { + res.status(error.response.status).json(error.response.data); + } else { + res.status(502).json({ + error: 'Template Manager AI unavailable', + message: error.code || error.message, + service: 'template-manager-ai' + }); + } + } + }); + } +); + // Requirement Processor Service - General routes (MUST come after specific routes) app.use('/api/requirements', createServiceLimiter(300), @@ -641,6 +737,244 @@ app.use('/api/self-improving', serviceRouter.createServiceProxy(serviceTargets.SELF_IMPROVING_GENERATOR_URL, 'self-improving-generator') ); +// Unison (Unified Recommendations) Service +console.log('🔧 Registering /api/unison proxy route...'); +app.use('/api/unison', + createServiceLimiter(200), + // Allow unauthenticated access for unified recommendations + (req, res, next) => next(), + (req, res, next) => { + const unisonUrl = serviceTargets.UNISON_URL; + // Forward to same path on Unison (e.g., /api/unison/recommendations/unified) + const rewrittenPath = (req.originalUrl || '').replace(/^\/api\/unison/, '/api'); + const targetUrl = `${unisonUrl}${rewrittenPath}`; + console.log(`🔥 [UNISON PROXY] ${req.method} ${req.originalUrl} → ${targetUrl}`); + + res.setTimeout(30000, () => { + console.error('❌ [UNISON PROXY] Response timeout'); + if (!res.headersSent) { + res.status(504).json({ error: 'Gateway timeout', service: 'unison' }); + } + }); + + const options = { + method: req.method, + url: targetUrl, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'API-Gateway/1.0', + 'Connection': 'keep-alive', + 'Authorization': req.headers.authorization, + 'X-User-ID': req.user?.id || req.user?.userId, + 'X-User-Role': req.user?.role, + }, + timeout: 25000, + validateStatus: () => true, + maxRedirects: 0 + }; + + if (req.method === 'POST' || req.method === 'PUT' || req.method === 'PATCH') { + options.data = req.body || {}; + console.log(`📦 [UNISON PROXY] Request body:`, JSON.stringify(req.body)); + } + + axios(options) + .then(response => { + console.log(`✅ [UNISON PROXY] Response: ${response.status} for ${req.method} ${req.originalUrl}`); + if (!res.headersSent) { + res.status(response.status).json(response.data); + } + }) + .catch(error => { + console.error(`❌ [UNISON PROXY ERROR]:`, error.message); + if (!res.headersSent) { + if (error.response) { + res.status(error.response.status).json(error.response.data); + } else { + res.status(502).json({ + error: 'Unison service unavailable', + message: error.code || error.message, + service: 'unison' + }); + } + } + }); + } +); + +// Unified recommendations shortcut +console.log('🔧 Registering /api/recommendations proxy route (shortcut to Unison)...'); +app.use('/api/recommendations', + createServiceLimiter(200), + (req, res, next) => next(), + (req, res, next) => { + const unisonUrl = serviceTargets.UNISON_URL; + // Keep path under /api/recommendations/* when forwarding to Unison + const targetUrl = `${unisonUrl}${req.originalUrl}`; + console.log(`🔥 [UNIFIED PROXY] ${req.method} ${req.originalUrl} → ${targetUrl}`); + + res.setTimeout(30000, () => { + console.error('❌ [UNIFIED PROXY] Response timeout'); + if (!res.headersSent) { + res.status(504).json({ error: 'Gateway timeout', service: 'unison' }); + } + }); + + const options = { + method: req.method, + url: targetUrl, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'API-Gateway/1.0', + 'Connection': 'keep-alive', + 'Authorization': req.headers.authorization, + 'X-User-ID': req.user?.id || req.user?.userId, + 'X-User-Role': req.user?.role, + }, + timeout: 25000, + validateStatus: () => true, + maxRedirects: 0 + }; + + if (req.method === 'POST' || req.method === 'PUT' || req.method === 'PATCH') { + options.data = req.body || {}; + console.log(`📦 [UNIFIED PROXY] Request body:`, JSON.stringify(req.body)); + } + + axios(options) + .then(response => { + console.log(`✅ [UNIFIED PROXY] Response: ${response.status} for ${req.method} ${req.originalUrl}`); + if (!res.headersSent) { + res.status(response.status).json(response.data); + } + }) + .catch(error => { + console.error(`❌ [UNIFIED PROXY ERROR]:`, error.message); + if (!res.headersSent) { + if (error.response) { + res.status(error.response.status).json(error.response.data); + } else { + res.status(502).json({ + error: 'Unison service unavailable', + message: error.code || error.message, + service: 'unison' + }); + } + } + }); + } +); + +// Convenience alias: POST /api/recommendations -> POST /api/recommendations/unified +console.log('🔧 Registering /api/recommendations (root) alias to unified...'); +app.post('/api/recommendations', + createServiceLimiter(200), + (req, res, next) => { + const unisonUrl = serviceTargets.UNISON_URL; + const targetUrl = `${unisonUrl}/api/recommendations/unified`; + console.log(`🔥 [UNIFIED ROOT ALIAS] ${req.method} ${req.originalUrl} → ${targetUrl}`); + + const options = { + method: 'POST', + url: targetUrl, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'API-Gateway/1.0', + 'Connection': 'keep-alive', + 'Authorization': req.headers.authorization, + 'X-User-ID': req.user?.id || req.user?.userId, + 'X-User-Role': req.user?.role, + }, + timeout: 25000, + validateStatus: () => true, + maxRedirects: 0, + data: req.body || {} + }; + + axios(options) + .then(response => { + if (!res.headersSent) { + res.status(response.status).json(response.data); + } + }) + .catch(error => { + if (!res.headersSent) { + if (error.response) { + res.status(error.response.status).json(error.response.data); + } else { + res.status(502).json({ + error: 'Unison service unavailable', + message: error.code || error.message, + service: 'unison' + }); + } + } + }); + } +); + +// Backward-compatible alias: /ai/recommendations -> Unison /api/recommendations +console.log('🔧 Registering /ai/recommendations alias to Unison...'); +app.use('/ai/recommendations', + createServiceLimiter(200), + (req, res, next) => next(), + (req, res, next) => { + const unisonUrl = serviceTargets.UNISON_URL; + const targetUrl = `${unisonUrl}/api/recommendations${req.originalUrl.replace(/^\/ai\/recommendations/, '')}`; + console.log(`🔥 [AI→UNIFIED PROXY] ${req.method} ${req.originalUrl} → ${targetUrl}`); + + res.setTimeout(30000, () => { + console.error('❌ [AI→UNIFIED PROXY] Response timeout'); + if (!res.headersSent) { + res.status(504).json({ error: 'Gateway timeout', service: 'unison' }); + } + }); + + const options = { + method: req.method, + url: targetUrl, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'API-Gateway/1.0', + 'Connection': 'keep-alive', + 'Authorization': req.headers.authorization, + 'X-User-ID': req.user?.id || req.user?.userId, + 'X-User-Role': req.user?.role, + }, + timeout: 25000, + validateStatus: () => true, + maxRedirects: 0 + }; + + if (req.method === 'POST' || req.method === 'PUT' || req.method === 'PATCH') { + options.data = req.body || {}; + console.log(`📦 [AI→UNIFIED PROXY] Request body:`, JSON.stringify(req.body)); + } + + axios(options) + .then(response => { + console.log(`✅ [AI→UNIFIED PROXY] Response: ${response.status} for ${req.method} ${req.originalUrl}`); + if (!res.headersSent) { + res.status(response.status).json(response.data); + } + }) + .catch(error => { + console.error(`❌ [AI→UNIFIED PROXY ERROR]:`, error.message); + if (!res.headersSent) { + if (error.response) { + res.status(error.response.status).json(error.response.data); + } else { + res.status(502).json({ + error: 'Unison service unavailable', + message: error.code || error.message, + service: 'unison' + }); + } + } + }); + } +); + // Features (Template Manager) - expose /api/features via gateway console.log('🔧 Registering /api/features proxy route...'); app.use('/api/features', @@ -714,7 +1048,7 @@ app.use('/api/github', (req, res, next) => { // Allow unauthenticated access for read-only requests and specific public endpoints if (req.method === 'GET') { - return next(); + return authMiddleware.verifyTokenOptional(req, res, () => authMiddleware.forwardUserContext(req, res, next)); } // Allowlist certain POST endpoints that must be public to initiate flows const url = req.originalUrl || ''; @@ -722,10 +1056,11 @@ app.use('/api/github', url.startsWith('/api/github/test-access') || url.startsWith('/api/github/auth/github') || url.startsWith('/api/github/auth/github/callback') || - url.startsWith('/api/github/auth/github/status') + url.startsWith('/api/github/auth/github/status') || + url.startsWith('/api/github/attach-repository') ); if (isPublicGithubEndpoint) { - return next(); + return authMiddleware.verifyTokenOptional(req, res, () => authMiddleware.forwardUserContext(req, res, next)); } return authMiddleware.verifyToken(req, res, () => authMiddleware.forwardUserContext(req, res, next)); }, @@ -733,8 +1068,8 @@ app.use('/api/github', const gitServiceUrl = serviceTargets.GIT_INTEGRATION_URL; console.log(`🔥 [GIT PROXY] ${req.method} ${req.originalUrl} → ${gitServiceUrl}${req.originalUrl}`); - // Set response timeout to prevent hanging - res.setTimeout(15000, () => { + // Set response timeout to prevent hanging (increased for repository operations) + res.setTimeout(60000, () => { console.error('❌ [GIT PROXY] Response timeout'); if (!res.headersSent) { res.status(504).json({ error: 'Gateway timeout', service: 'git-integration' }); @@ -753,7 +1088,7 @@ app.use('/api/github', 'X-User-Role': req.user?.role, 'Authorization': req.headers.authorization }, - timeout: 8000, + timeout: 45000, validateStatus: () => true, maxRedirects: 0 }; @@ -767,6 +1102,13 @@ app.use('/api/github', axios(options) .then(response => { console.log(`✅ [GIT PROXY] Response: ${response.status} for ${req.method} ${req.originalUrl}`); + // Forward redirects so browser follows OAuth location + if (response.status >= 300 && response.status < 400 && response.headers?.location) { + const location = response.headers.location; + console.log(`↪️ [GIT PROXY] Forwarding redirect to ${location}`); + if (!res.headersSent) return res.redirect(response.status, location); + return; + } if (!res.headersSent) { res.status(response.status).json(response.data); } @@ -930,7 +1272,9 @@ app.get('/', (req, res) => { deploy: '/api/deploy', dashboard: '/api/dashboard', self_improving: '/api/self-improving', - mockup: '/api/mockup' + mockup: '/api/mockup', + unison: '/api/unison', + unified: '/api/recommendations' }, websocket: { endpoint: '/socket.io/', diff --git a/services/git-integration/Dockerfile b/services/git-integration/Dockerfile index f81e732..1a3269b 100644 --- a/services/git-integration/Dockerfile +++ b/services/git-integration/Dockerfile @@ -8,6 +8,9 @@ COPY package*.json ./ # Install dependencies RUN npm install +# Install git and tools required for healthchecks and HTTPS clones +RUN apk add --no-cache git curl ca-certificates openssh-client && update-ca-certificates + # Copy source code COPY . . @@ -15,8 +18,10 @@ COPY . . RUN addgroup -g 1001 -S nodejs RUN adduser -S git-integration -u 1001 -# Change ownership +# Create git-repos directory and set proper permissions +RUN mkdir -p /app/git-repos RUN chown -R git-integration:nodejs /app +RUN chmod -R 755 /app/git-repos USER git-integration # Expose port diff --git a/services/git-integration/package-lock.json b/services/git-integration/package-lock.json index 6dcd503..b38e65b 100644 --- a/services/git-integration/package-lock.json +++ b/services/git-integration/package-lock.json @@ -15,6 +15,7 @@ "express-session": "^1.18.2", "helmet": "^7.1.0", "morgan": "^1.10.0", + "parse-github-url": "^1.0.3", "pg": "^8.11.3", "uuid": "^9.0.1" }, @@ -1135,6 +1136,18 @@ "wrappy": "1" } }, + "node_modules/parse-github-url": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/parse-github-url/-/parse-github-url-1.0.3.tgz", + "integrity": "sha512-tfalY5/4SqGaV/GIGzWyHnFjlpTPTNpENR9Ea2lLldSJ8EWXMsvacWucqY3m3I4YPtas15IxTLQVQ5NSYXPrww==", + "license": "MIT", + "bin": { + "parse-github-url": "cli.js" + }, + "engines": { + "node": ">= 0.10" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", diff --git a/services/git-integration/package.json b/services/git-integration/package.json index 4ee829d..9dc133e 100644 --- a/services/git-integration/package.json +++ b/services/git-integration/package.json @@ -16,6 +16,7 @@ "express-session": "^1.18.2", "helmet": "^7.1.0", "morgan": "^1.10.0", + "parse-github-url": "^1.0.3", "pg": "^8.11.3", "uuid": "^9.0.1" }, diff --git a/services/git-integration/src/app.js b/services/git-integration/src/app.js index fdadbde..1101279 100644 --- a/services/git-integration/src/app.js +++ b/services/git-integration/src/app.js @@ -11,6 +11,8 @@ const database = require('./config/database'); // Import routes const githubRoutes = require('./routes/github-integration.routes'); const githubOAuthRoutes = require('./routes/github-oauth'); +const webhookRoutes = require('./routes/webhook.routes'); +const vcsRoutes = require('./routes/vcs.routes'); const app = express(); const PORT = process.env.PORT || 8012; @@ -37,6 +39,8 @@ app.use(session({ // Routes app.use('/api/github', githubRoutes); app.use('/api/github', githubOAuthRoutes); +app.use('/api/github', webhookRoutes); +app.use('/api/vcs', vcsRoutes); // Health check endpoint app.get('/health', (req, res) => { @@ -57,7 +61,9 @@ app.get('/', (req, res) => { endpoints: { health: '/health', github: '/api/github', - oauth: '/api/github/auth' + oauth: '/api/github/auth', + webhook: '/api/github/webhook', + vcs: '/api/vcs/:provider' } }); }); diff --git a/services/git-integration/src/migrations/004_webhook_events.sql b/services/git-integration/src/migrations/004_webhook_events.sql new file mode 100644 index 0000000..3e71adc --- /dev/null +++ b/services/git-integration/src/migrations/004_webhook_events.sql @@ -0,0 +1,35 @@ +-- Create table if it does not exist (compatible with existing schemas) +CREATE TABLE IF NOT EXISTS webhook_events ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + event_type VARCHAR(100) NOT NULL, + processing_status VARCHAR(50) DEFAULT 'pending', + created_at TIMESTAMP DEFAULT NOW() +); + +-- Bring table to desired schema (idempotent) +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS action VARCHAR(100); +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS repository_full_name VARCHAR(255); +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS delivery_id VARCHAR(100); +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS metadata JSONB; +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS received_at TIMESTAMP DEFAULT NOW(); +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS processed_at TIMESTAMP; +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS error_message TEXT; +ALTER TABLE webhook_events ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT NOW(); + +-- Create indexes (safe if columns now exist) +CREATE INDEX IF NOT EXISTS idx_webhook_events_event_type ON webhook_events(event_type); +CREATE INDEX IF NOT EXISTS idx_webhook_events_repository ON webhook_events(repository_full_name); +CREATE INDEX IF NOT EXISTS idx_webhook_events_received_at ON webhook_events(received_at); +CREATE INDEX IF NOT EXISTS idx_webhook_events_delivery_id ON webhook_events(delivery_id); +CREATE INDEX IF NOT EXISTS idx_webhook_events_status ON webhook_events(processing_status); + +-- Add trigger to update timestamp +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'update_webhook_events_updated_at' + ) THEN + CREATE TRIGGER update_webhook_events_updated_at BEFORE UPDATE ON webhook_events + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + END IF; +END $$; diff --git a/services/git-integration/src/migrations/005_webhook_commits.sql b/services/git-integration/src/migrations/005_webhook_commits.sql new file mode 100644 index 0000000..cea4482 --- /dev/null +++ b/services/git-integration/src/migrations/005_webhook_commits.sql @@ -0,0 +1,52 @@ +-- Migration 005: GitHub webhook tracking and commit SHA history + +-- Create a durable table for raw webhook deliveries (compat with existing code expectations) +CREATE TABLE IF NOT EXISTS github_webhooks ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delivery_id VARCHAR(120), + event_type VARCHAR(100) NOT NULL, + action VARCHAR(100), + owner_name VARCHAR(120), + repository_name VARCHAR(200), + repository_id UUID REFERENCES github_repositories(id) ON DELETE SET NULL, + ref VARCHAR(255), + before_sha VARCHAR(64), + after_sha VARCHAR(64), + commit_count INTEGER, + payload JSONB NOT NULL, + processed_at TIMESTAMP, + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_github_webhooks_delivery_id ON github_webhooks(delivery_id); +CREATE INDEX IF NOT EXISTS idx_github_webhooks_repo ON github_webhooks(owner_name, repository_name); +CREATE INDEX IF NOT EXISTS idx_github_webhooks_event_type ON github_webhooks(event_type); + +-- Track commit SHA transitions per repository to detect changes over time +CREATE TABLE IF NOT EXISTS repository_commit_events ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + repository_id UUID REFERENCES github_repositories(id) ON DELETE CASCADE, + ref VARCHAR(255), + before_sha VARCHAR(64), + after_sha VARCHAR(64), + commit_count INTEGER DEFAULT 0, + received_at TIMESTAMP DEFAULT NOW(), + created_at TIMESTAMP DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_repo_commit_events_repo ON repository_commit_events(repository_id); +CREATE INDEX IF NOT EXISTS idx_repo_commit_events_sha ON repository_commit_events(after_sha); + +-- Safe trigger creation +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'update_github_webhooks_updated_at' + ) THEN + CREATE TRIGGER update_github_webhooks_updated_at BEFORE UPDATE ON github_webhooks + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + END IF; +END $$; + + diff --git a/services/git-integration/src/migrations/006_commit_changes.sql b/services/git-integration/src/migrations/006_commit_changes.sql new file mode 100644 index 0000000..b793717 --- /dev/null +++ b/services/git-integration/src/migrations/006_commit_changes.sql @@ -0,0 +1,31 @@ +-- Migration 006: Store commit messages and per-file changes from push webhooks + +-- Per-commit details linked to an attached repository +CREATE TABLE IF NOT EXISTS repository_commit_details ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + repository_id UUID REFERENCES github_repositories(id) ON DELETE CASCADE, + commit_sha VARCHAR(64) NOT NULL, + author_name VARCHAR(200), + author_email VARCHAR(320), + message TEXT, + url TEXT, + committed_at TIMESTAMP DEFAULT NOW(), + created_at TIMESTAMP DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS uq_repo_commit_sha ON repository_commit_details(repository_id, commit_sha); +CREATE INDEX IF NOT EXISTS idx_repo_commit_created_at ON repository_commit_details(created_at); + +-- Per-file changes for each commit +CREATE TABLE IF NOT EXISTS repository_commit_files ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + commit_id UUID REFERENCES repository_commit_details(id) ON DELETE CASCADE, + change_type VARCHAR(20) NOT NULL, -- added | modified | removed + file_path TEXT NOT NULL, + created_at TIMESTAMP DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_commit_files_commit_id ON repository_commit_files(commit_id); +CREATE INDEX IF NOT EXISTS idx_commit_files_path ON repository_commit_files(file_path); + + diff --git a/services/git-integration/src/migrations/007_add_last_synced_commit.sql b/services/git-integration/src/migrations/007_add_last_synced_commit.sql new file mode 100644 index 0000000..4fe8f81 --- /dev/null +++ b/services/git-integration/src/migrations/007_add_last_synced_commit.sql @@ -0,0 +1,6 @@ +-- 007_add_last_synced_commit.sql +ALTER TABLE github_repositories +ADD COLUMN IF NOT EXISTS last_synced_commit_sha VARCHAR(64), +ADD COLUMN IF NOT EXISTS last_synced_at TIMESTAMP WITH TIME ZONE; + + diff --git a/services/git-integration/src/migrations/008_provider_token_tables.sql b/services/git-integration/src/migrations/008_provider_token_tables.sql new file mode 100644 index 0000000..425facd --- /dev/null +++ b/services/git-integration/src/migrations/008_provider_token_tables.sql @@ -0,0 +1,36 @@ +-- 008_provider_token_tables.sql + +CREATE TABLE IF NOT EXISTS gitlab_user_tokens ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + access_token TEXT NOT NULL, + gitlab_username TEXT, + gitlab_user_id TEXT, + scopes JSONB, + expires_at TIMESTAMP NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS bitbucket_user_tokens ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + access_token TEXT NOT NULL, + bitbucket_username TEXT, + bitbucket_user_id TEXT, + scopes JSONB, + expires_at TIMESTAMP NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS gitea_user_tokens ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + access_token TEXT NOT NULL, + gitea_username TEXT, + gitea_user_id TEXT, + scopes JSONB, + expires_at TIMESTAMP NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + + diff --git a/services/git-integration/src/migrations/009_provider_webhook_tables.sql b/services/git-integration/src/migrations/009_provider_webhook_tables.sql new file mode 100644 index 0000000..e689201 --- /dev/null +++ b/services/git-integration/src/migrations/009_provider_webhook_tables.sql @@ -0,0 +1,68 @@ +-- 009_provider_webhook_tables.sql + +-- GitLab webhooks table +CREATE TABLE IF NOT EXISTS gitlab_webhooks ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delivery_id TEXT, + event_type TEXT NOT NULL, + action TEXT, + owner_name TEXT NOT NULL, + repository_name TEXT NOT NULL, + repository_id UUID REFERENCES github_repositories(id), + ref TEXT, + before_sha TEXT, + after_sha TEXT, + commit_count INTEGER DEFAULT 0, + payload JSONB, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- Bitbucket webhooks table +CREATE TABLE IF NOT EXISTS bitbucket_webhooks ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delivery_id TEXT, + event_type TEXT NOT NULL, + action TEXT, + owner_name TEXT NOT NULL, + repository_name TEXT NOT NULL, + repository_id UUID REFERENCES github_repositories(id), + ref TEXT, + before_sha TEXT, + after_sha TEXT, + commit_count INTEGER DEFAULT 0, + payload JSONB, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- Gitea webhooks table +CREATE TABLE IF NOT EXISTS gitea_webhooks ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delivery_id TEXT, + event_type TEXT NOT NULL, + action TEXT, + owner_name TEXT NOT NULL, + repository_name TEXT NOT NULL, + repository_id UUID REFERENCES github_repositories(id), + ref TEXT, + before_sha TEXT, + after_sha TEXT, + commit_count INTEGER DEFAULT 0, + payload JSONB, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- Indexes for better performance +CREATE INDEX IF NOT EXISTS idx_gitlab_webhooks_repository_id ON gitlab_webhooks(repository_id); +CREATE INDEX IF NOT EXISTS idx_gitlab_webhooks_created_at ON gitlab_webhooks(created_at); +CREATE INDEX IF NOT EXISTS idx_gitlab_webhooks_event_type ON gitlab_webhooks(event_type); + +CREATE INDEX IF NOT EXISTS idx_bitbucket_webhooks_repository_id ON bitbucket_webhooks(repository_id); +CREATE INDEX IF NOT EXISTS idx_bitbucket_webhooks_created_at ON bitbucket_webhooks(created_at); +CREATE INDEX IF NOT EXISTS idx_bitbucket_webhooks_event_type ON bitbucket_webhooks(event_type); + +CREATE INDEX IF NOT EXISTS idx_gitea_webhooks_repository_id ON gitea_webhooks(repository_id); +CREATE INDEX IF NOT EXISTS idx_gitea_webhooks_created_at ON gitea_webhooks(created_at); +CREATE INDEX IF NOT EXISTS idx_gitea_webhooks_event_type ON gitea_webhooks(event_type); diff --git a/services/git-integration/src/migrations/010_remove_template_id.sql b/services/git-integration/src/migrations/010_remove_template_id.sql new file mode 100644 index 0000000..24e8274 --- /dev/null +++ b/services/git-integration/src/migrations/010_remove_template_id.sql @@ -0,0 +1,18 @@ +-- Migration 010: Remove template_id columns and related indexes +-- This migration removes template_id references from git-integration tables + +-- Drop indexes that reference template_id first +DROP INDEX IF EXISTS idx_github_repos_template_id; +DROP INDEX IF EXISTS idx_github_repos_template_user; +DROP INDEX IF EXISTS idx_feature_mappings_template_user; + +-- Remove template_id column from github_repositories table +ALTER TABLE IF EXISTS github_repositories + DROP COLUMN IF EXISTS template_id; + +-- Remove template_id column from feature_codebase_mappings table +ALTER TABLE IF EXISTS feature_codebase_mappings + DROP COLUMN IF EXISTS template_id; + +-- Note: This migration removes the template_id foreign key relationships +-- The tables will now rely on user_id for ownership tracking diff --git a/services/git-integration/src/migrations/011_multi_github_accounts_per_user.sql b/services/git-integration/src/migrations/011_multi_github_accounts_per_user.sql new file mode 100644 index 0000000..7bf77d2 --- /dev/null +++ b/services/git-integration/src/migrations/011_multi_github_accounts_per_user.sql @@ -0,0 +1,32 @@ +-- Migration 011: Support multiple GitHub accounts per user +-- This allows each user to authenticate with multiple GitHub accounts + +-- Add user_id column to github_user_tokens +ALTER TABLE github_user_tokens +ADD COLUMN IF NOT EXISTS user_id UUID REFERENCES users(id) ON DELETE CASCADE; + +-- Create indexes for faster lookups +CREATE INDEX IF NOT EXISTS idx_github_user_tokens_user_id ON github_user_tokens(user_id); +CREATE INDEX IF NOT EXISTS idx_github_user_tokens_user_github ON github_user_tokens(user_id, github_username); + +-- Remove the old unique constraint on github_username (if it exists) +-- Allow multiple tokens per user, but one token per GitHub account per user +DROP INDEX IF EXISTS idx_github_user_tokens_github_username; + +-- Create new unique constraint: one token per GitHub account per user +CREATE UNIQUE INDEX IF NOT EXISTS idx_github_user_tokens_unique_user_github +ON github_user_tokens(user_id, github_username) +WHERE user_id IS NOT NULL; + +-- Add a column to track if this is the primary/default GitHub account for the user +ALTER TABLE github_user_tokens +ADD COLUMN IF NOT EXISTS is_primary BOOLEAN DEFAULT FALSE; + +-- Create index for primary account lookups +CREATE INDEX IF NOT EXISTS idx_github_user_tokens_primary ON github_user_tokens(user_id, is_primary); + +-- Note: +-- - Each user can have multiple GitHub accounts +-- - Each GitHub account can only be linked once per user +-- - One account per user can be marked as primary +-- - Repository access will be checked against all user's GitHub accounts diff --git a/services/git-integration/src/migrations/012_add_user_id_to_github_repositories.sql b/services/git-integration/src/migrations/012_add_user_id_to_github_repositories.sql new file mode 100644 index 0000000..46182ec --- /dev/null +++ b/services/git-integration/src/migrations/012_add_user_id_to_github_repositories.sql @@ -0,0 +1,10 @@ +-- Migration 012: Track which user attached/downloaded a repository + +-- Add user_id to github_repositories to associate records with the initiating user +ALTER TABLE github_repositories +ADD COLUMN IF NOT EXISTS user_id UUID REFERENCES users(id) ON DELETE SET NULL; + +-- Helpful index for filtering user-owned repositories +CREATE INDEX IF NOT EXISTS idx_github_repositories_user_id ON github_repositories(user_id); + + diff --git a/services/git-integration/src/routes/github-integration.routes.js b/services/git-integration/src/routes/github-integration.routes.js index 65f423f..fc8b663 100644 --- a/services/git-integration/src/routes/github-integration.routes.js +++ b/services/git-integration/src/routes/github-integration.routes.js @@ -15,45 +15,45 @@ const fileStorageService = new FileStorageService(); // Attach GitHub repository to template router.post('/attach-repository', async (req, res) => { try { - const { template_id, repository_url, branch_name } = req.body; + const { repository_url, branch_name } = req.body; + const userId = req.headers['x-user-id'] || req.query.user_id || req.body.user_id || (req.user && (req.user.id || req.user.userId)); // Validate input - if (!template_id || !repository_url) { + if (!repository_url) { return res.status(400).json({ success: false, - message: 'Template ID and repository URL are required' - }); - } - - // Check if template exists - const templateQuery = 'SELECT * FROM templates WHERE id = $1 AND is_active = true'; - const templateResult = await database.query(templateQuery, [template_id]); - - if (templateResult.rows.length === 0) { - return res.status(404).json({ - success: false, - message: 'Template not found' + message: 'Repository URL is required' }); } // Parse GitHub URL const { owner, repo, branch } = githubService.parseGitHubUrl(repository_url); - // Check repository access - const accessCheck = await githubService.checkRepositoryAccess(owner, repo); + // Check repository access with user-specific tokens + const accessCheck = await githubService.checkRepositoryAccessWithUser(owner, repo, userId); if (!accessCheck.hasAccess) { - if (accessCheck.requiresAuth) { - // Check if we have OAuth token - const tokenRecord = await oauthService.getToken(); - if (!tokenRecord) { - return res.status(401).json({ - success: false, - message: 'GitHub authentication required for this repository', - requires_auth: true, - auth_url: `/api/github/auth/github` - }); - } + if (accessCheck.requiresAuth || accessCheck.authError) { + // Generate an auth URL that encodes the current user and returns absolute via gateway + const state = Math.random().toString(36).substring(7); + const userIdForAuth = userId || null; + const rawAuthUrl = oauthService.getAuthUrl(state, userIdForAuth); + + // Prefer returning a gateway URL so frontend doesn't need to know service ports + const gatewayBase = process.env.API_GATEWAY_PUBLIC_URL || 'http://localhost:8000'; + const serviceRelative = '/api/github/auth/github'; + // redirect=1 makes the endpoint issue a 302 directly to GitHub so UI doesn't land on JSON + const serviceAuthUrl = `${gatewayBase}${serviceRelative}?redirect=1&state=${encodeURIComponent(state)}${userIdForAuth ? `&user_id=${encodeURIComponent(userIdForAuth)}` : ''}`; + + return res.status(401).json({ + success: false, + message: accessCheck.error || 'GitHub authentication required for this repository', + requires_auth: true, + // Return both, frontend can pick the gateway URL + auth_url: serviceAuthUrl, + service_auth_url: rawAuthUrl, + auth_error: accessCheck.authError || false + }); } return res.status(404).json({ @@ -65,39 +65,66 @@ router.post('/attach-repository', async (req, res) => { // Get repository information from GitHub const repositoryData = await githubService.fetchRepositoryMetadata(owner, repo); - // Analyze the codebase - const codebaseAnalysis = await githubService.analyzeCodebase(owner, repo, branch || branch_name); + // Use the actual default branch from repository metadata if the requested branch doesn't exist + let actualBranch = branch || branch_name || repositoryData.default_branch || 'main'; + + // Validate that the requested branch exists, fallback to default if not + try { + if (branch || branch_name) { + const octokit = await githubService.getAuthenticatedOctokit(); + await octokit.git.getRef({ + owner, + repo, + ref: `heads/${actualBranch}` + }); + } + } catch (error) { + if (error.status === 404) { + console.warn(`Branch ${actualBranch} not found, using default branch: ${repositoryData.default_branch}`); + actualBranch = repositoryData.default_branch || 'main'; + } else { + throw error; + } + } - // Store everything in PostgreSQL + // Analyze the codebase + const codebaseAnalysis = await githubService.analyzeCodebase(owner, repo, actualBranch); + + // Store everything in PostgreSQL (without template_id) const insertQuery = ` INSERT INTO github_repositories ( - template_id, repository_url, repository_name, owner_name, + repository_url, repository_name, owner_name, branch_name, is_public, metadata, codebase_analysis, sync_status, - requires_auth + requires_auth, user_id ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING * `; const insertValues = [ - template_id, repository_url, repo, owner, - branch || branch_name || 'main', + actualBranch, repositoryData.visibility === 'public', JSON.stringify(repositoryData), JSON.stringify(codebaseAnalysis), 'synced', - accessCheck.requiresAuth + accessCheck.requiresAuth, + userId || null ]; const insertResult = await database.query(insertQuery, insertValues); const repositoryRecord = insertResult.rows[0]; - // Download repository with file storage - console.log('Downloading repository with storage...'); - const downloadResult = await githubService.downloadRepositoryWithStorage( - owner, repo, branch || branch_name || 'main', repositoryRecord.id + // Attempt to auto-create webhook on the attached repository using OAuth token + const publicBaseUrl = process.env.PUBLIC_BASE_URL || null; // e.g., your ngrok URL https://xxx.ngrok-free.app + const callbackUrl = publicBaseUrl ? `${publicBaseUrl}/api/github/webhook` : null; + const webhookResult = await githubService.ensureRepositoryWebhook(owner, repo, callbackUrl); + + // Sync with fallback: try git first, then API + console.log('Syncing repository (git first, API fallback)...'); + const downloadResult = await githubService.syncRepositoryWithFallback( + owner, repo, actualBranch, repositoryRecord.id ); if (!downloadResult.success) { @@ -105,33 +132,6 @@ router.post('/attach-repository', async (req, res) => { console.warn('Repository download failed:', downloadResult.error); } - // Create feature-codebase mappings - const featureQuery = 'SELECT id FROM template_features WHERE template_id = $1'; - const featureResult = await database.query(featureQuery, [template_id]); - - if (featureResult.rows.length > 0) { - const mappingValues = []; - const mappingParams = []; - let paramIndex = 1; - - for (const feature of featureResult.rows) { - mappingValues.push(`(uuid_generate_v4(), $${paramIndex++}, $${paramIndex++}, $${paramIndex++}, $${paramIndex++})`); - mappingParams.push( - feature.id, - repositoryRecord.id, - '[]', // Empty paths for now - '{}' // Empty snippets for now - ); - } - - const mappingQuery = ` - INSERT INTO feature_codebase_mappings (id, feature_id, repository_id, code_paths, code_snippets) - VALUES ${mappingValues.join(', ')} - `; - - await database.query(mappingQuery, mappingParams); - } - // Get storage information const storageInfo = await githubService.getRepositoryStorage(repositoryRecord.id); @@ -140,7 +140,6 @@ router.post('/attach-repository', async (req, res) => { message: 'Repository attached successfully', data: { repository_id: repositoryRecord.id, - template_id: repositoryRecord.template_id, repository_name: repositoryRecord.repository_name, owner_name: repositoryRecord.owner_name, branch_name: repositoryRecord.branch_name, @@ -161,6 +160,55 @@ router.post('/attach-repository', async (req, res) => { }); } }); +// Get repository diff between two SHAs (unified patch) +router.get('/repository/:id/diff', async (req, res) => { + try { + const { id } = req.params; + const { from, to, path: dirPath } = req.query; + + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const record = repoResult.rows[0]; + const { owner, repo } = githubService.parseGitHubUrl(record.repository_url); + // Always use stored branch_name to avoid mismatches like master/main + const targetBranch = record.branch_name || 'main'; + const patch = await githubService.getRepositoryDiff(owner, repo, targetBranch, from || record.last_synced_commit_sha, to || 'HEAD'); + res.json({ success: true, data: { patch, from: from || record.last_synced_commit_sha, to: to || 'HEAD' } }); + } catch (error) { + console.error('Error getting diff:', error); + res.status(500).json({ success: false, message: error.message || 'Failed to get diff' }); + } +}); + +// Get list of changed files since a SHA +router.get('/repository/:id/changes', async (req, res) => { + try { + const { id } = req.params; + const { since } = req.query; + + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const record = repoResult.rows[0]; + const { owner, repo, branch } = githubService.parseGitHubUrl(record.repository_url); + + const sinceSha = since || record.last_synced_commit_sha; + if (!sinceSha) { + return res.status(400).json({ success: false, message: 'since SHA is required or must be available as last_synced_commit_sha' }); + } + + const changes = await githubService.getRepositoryChangesSince(owner, repo, branch || record.branch_name, sinceSha); + res.json({ success: true, data: { since: sinceSha, changes } }); + } catch (error) { + console.error('Error getting changes:', error); + res.status(500).json({ success: false, message: error.message || 'Failed to get changes' }); + } +}); // Get repository information for a template router.get('/template/:id/repository', async (req, res) => { @@ -481,8 +529,8 @@ router.post('/repository/:id/sync', async (req, res) => { // Clean up existing storage await githubService.cleanupRepositoryStorage(id); - // Re-download with storage - const downloadResult = await githubService.downloadRepositoryWithStorage( + // Re-sync with fallback (git first, API fallback) + const downloadResult = await githubService.syncRepositoryWithFallback( owner, repo, branch || repository.branch_name, id ); @@ -558,4 +606,4 @@ router.delete('/repository/:id', async (req, res) => { } }); -module.exports = router; +module.exports = router; \ No newline at end of file diff --git a/services/git-integration/src/routes/github-oauth.js b/services/git-integration/src/routes/github-oauth.js index 43a6346..313535b 100644 --- a/services/git-integration/src/routes/github-oauth.js +++ b/services/git-integration/src/routes/github-oauth.js @@ -5,11 +5,31 @@ const GitHubOAuthService = require('../services/github-oauth'); const oauthService = new GitHubOAuthService(); -// Initiate GitHub OAuth flow +// Initiate GitHub OAuth flow (supports optional user_id). If redirect=1, do 302 to GitHub. router.get('/auth/github', async (req, res) => { try { const state = Math.random().toString(36).substring(7); - const authUrl = oauthService.getAuthUrl(state); + const userId = + req.query.user_id || + (req.body && req.body.user_id) || + req.headers['x-user-id'] || + (req.cookies && (req.cookies.user_id || req.cookies.uid)) || + (req.session && req.session.user && (req.session.user.id || req.session.user.userId)) || + (req.user && (req.user.id || req.user.userId)); + + if (!userId) { + return res.status(400).json({ + success: false, + message: 'user_id is required to initiate GitHub authentication' + }); + } + console.log('[GitHub OAuth] /auth/github resolved user_id =', userId || null); + const authUrl = oauthService.getAuthUrl(state, userId || null); + + const shouldRedirect = ['1', 'true', 'yes'].includes(String(req.query.redirect || '').toLowerCase()); + if (shouldRedirect) { + return res.redirect(302, authUrl); + } res.json({ success: true, @@ -32,6 +52,24 @@ router.get('/auth/github', async (req, res) => { router.get('/auth/github/callback', async (req, res) => { try { const { code, state } = req.query; + // user_id may arrive as query param or embedded in the state + let user_id = + req.query.user_id || + (req.body && req.body.user_id) || + req.headers['x-user-id'] || + (req.cookies && (req.cookies.user_id || req.cookies.uid)) || + (req.session && req.session.user && (req.session.user.id || req.session.user.userId)) || + (req.user && (req.user.id || req.user.userId)); + if (!user_id && typeof state === 'string' && state.includes('|uid=')) { + try { user_id = state.split('|uid=')[1]; } catch {} + } + + if (!user_id) { + return res.status(400).json({ + success: false, + message: 'user_id is required to complete GitHub authentication' + }); + } if (!code) { return res.status(400).json({ @@ -46,8 +84,9 @@ router.get('/auth/github/callback', async (req, res) => { // Get user info from GitHub const githubUser = await oauthService.getUserInfo(accessToken); - // Store token - const tokenRecord = await oauthService.storeToken(accessToken, githubUser); + // Store token with user context (if provided) + console.log('[GitHub OAuth] callback about to store token for user_id =', user_id || null); + const tokenRecord = await oauthService.storeToken(accessToken, githubUser, user_id || null); // Redirect back to frontend if configured const frontendUrl = process.env.FRONTEND_URL || 'http://localhost:3000'; @@ -79,27 +118,12 @@ router.get('/auth/github/callback', async (req, res) => { // Get GitHub connection status router.get('/auth/github/status', async (req, res) => { try { - const tokenRecord = await oauthService.getToken(); + const authStatus = await oauthService.getAuthStatus(); - if (tokenRecord) { - res.json({ - success: true, - data: { - connected: true, - github_username: tokenRecord.github_username, - github_user_id: tokenRecord.github_user_id, - connected_at: tokenRecord.created_at, - scopes: tokenRecord.scopes - } - }); - } else { - res.json({ - success: true, - data: { - connected: false - } - }); - } + res.json({ + success: true, + data: authStatus + }); } catch (error) { console.error('Error checking GitHub status:', error); diff --git a/services/git-integration/src/routes/vcs.routes.js b/services/git-integration/src/routes/vcs.routes.js new file mode 100644 index 0000000..51e5fae --- /dev/null +++ b/services/git-integration/src/routes/vcs.routes.js @@ -0,0 +1,511 @@ +// routes/vcs.routes.js +const express = require('express'); +const router = express.Router({ mergeParams: true }); +const providerRegistry = require('../services/provider-registry'); +const database = require('../config/database'); +const FileStorageService = require('../services/file-storage.service'); + +const fileStorageService = new FileStorageService(); +const GitLabOAuthService = require('../services/gitlab-oauth'); +const BitbucketOAuthService = require('../services/bitbucket-oauth'); +const GiteaOAuthService = require('../services/gitea-oauth'); +const VcsWebhookService = require('../services/vcs-webhook.service'); + +const vcsWebhookService = new VcsWebhookService(); + +function getProvider(req) { + const providerKey = (req.params.provider || '').toLowerCase(); + return providerRegistry.resolve(providerKey); +} + +function getOAuthService(providerKey) { + if (providerKey === 'gitlab') return new GitLabOAuthService(); + if (providerKey === 'bitbucket') return new BitbucketOAuthService(); + if (providerKey === 'gitea') return new GiteaOAuthService(); + return null; +} + +function extractEventType(providerKey, payload) { + switch (providerKey) { + case 'gitlab': + return payload.object_kind || (payload.ref ? 'push' : 'unknown'); + case 'bitbucket': + return (payload.push && 'push') || (payload.pullrequest && 'pull_request') || 'unknown'; + case 'gitea': + return payload.action || (payload.ref ? 'push' : 'unknown'); + default: + return 'unknown'; + } +} + +// Attach repository (provider-agnostic) +router.post('/:provider/attach-repository', async (req, res) => { + try { + const provider = getProvider(req); + const { template_id, repository_url, branch_name } = req.body; + const userId = req.headers['x-user-id'] || req.query.user_id || req.body.user_id || (req.user && (req.user.id || req.user.userId)); + + if (!template_id || !repository_url) { + return res.status(400).json({ success: false, message: 'Template ID and repository URL are required' }); + } + + const templateResult = await database.query('SELECT * FROM templates WHERE id = $1 AND is_active = true', [template_id]); + if (templateResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Template not found' }); + } + + const { owner, repo, branch } = provider.parseRepoUrl(repository_url); + const accessCheck = await provider.checkRepositoryAccess(owner, repo); + + if (!accessCheck.hasAccess) { + if (accessCheck.requiresAuth) { + // Check if we have OAuth token for this provider + const providerKey = (req.params.provider || '').toLowerCase(); + const oauthService = getOAuthService(providerKey); + if (oauthService) { + const tokenRecord = await oauthService.getToken(); + if (!tokenRecord) { + return res.status(401).json({ + success: false, + message: `${providerKey.charAt(0).toUpperCase() + providerKey.slice(1)} authentication required for this repository`, + requires_auth: true, + auth_url: `/api/vcs/${providerKey}/auth/start` + }); + } + } + } + + return res.status(404).json({ success: false, message: accessCheck.error || 'Repository not accessible' }); + } + + const repositoryData = await provider.fetchRepositoryMetadata(owner, repo); + let actualBranch = branch || branch_name || repositoryData.default_branch || 'main'; + + try { + // No-op for non-GitHub providers if not supported; adapters can throw if needed + } catch (_) {} + + // Preliminary analysis (may be refined after full sync) + let codebaseAnalysis = await provider.analyzeCodebase(owner, repo, actualBranch); + + // For backward-compatibility, insert into github_repositories for now + const insertQuery = ` + INSERT INTO github_repositories ( + template_id, repository_url, repository_name, owner_name, + branch_name, is_public, metadata, codebase_analysis, sync_status, + requires_auth, user_id + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + RETURNING * + `; + const insertValues = [ + template_id, + repository_url, + repo, + owner, + actualBranch, + repositoryData.visibility === 'public', + JSON.stringify(repositoryData), + JSON.stringify(codebaseAnalysis), + 'synced', + accessCheck.requiresAuth, + userId || null + ]; + const insertResult = await database.query(insertQuery, insertValues); + const repositoryRecord = insertResult.rows[0]; + + const publicBaseUrl = process.env.PUBLIC_BASE_URL || null; + const callbackUrl = publicBaseUrl ? `${publicBaseUrl}/api/vcs/${req.params.provider}/webhook` : null; + try { await provider.ensureRepositoryWebhook(owner, repo, callbackUrl); } catch (_) {} + + const downloadResult = await provider.syncRepositoryWithFallback(owner, repo, actualBranch, repositoryRecord.id); + + // Recompute analysis from indexed storage for accurate counts + try { + const aggQuery = ` + SELECT + COALESCE(SUM(rf.file_size_bytes), 0) AS total_size, + COALESCE(COUNT(rf.id), 0) AS total_files, + COALESCE((SELECT COUNT(1) FROM repository_directories rd WHERE rd.storage_id = rs.id), 0) AS total_directories + FROM repository_storage rs + LEFT JOIN repository_files rf ON rs.id = rf.storage_id + WHERE rs.repository_id = $1 + GROUP BY rs.id + LIMIT 1 + `; + const aggRes = await database.query(aggQuery, [repositoryRecord.id]); + if (aggRes.rows.length > 0) { + const agg = aggRes.rows[0]; + codebaseAnalysis = { + total_files: Number(agg.total_files) || 0, + total_size: Number(agg.total_size) || 0, + directories: [], + branch: actualBranch + }; + // Persist refined analysis + await database.query('UPDATE github_repositories SET codebase_analysis = $1, updated_at = NOW() WHERE id = $2', [JSON.stringify(codebaseAnalysis), repositoryRecord.id]); + } + } catch (_) {} + + // Create empty feature mappings like existing flow + const featureResult = await database.query('SELECT id FROM template_features WHERE template_id = $1', [template_id]); + if (featureResult.rows.length > 0) { + const mappingValues = []; + const params = []; + let i = 1; + for (const feature of featureResult.rows) { + mappingValues.push(`(uuid_generate_v4(), $${i++}, $${i++}, $${i++}, $${i++})`); + params.push(feature.id, repositoryRecord.id, '[]', '{}'); + } + await database.query( + `INSERT INTO feature_codebase_mappings (id, feature_id, repository_id, code_paths, code_snippets) VALUES ${mappingValues.join(', ')}`, + params + ); + } + + const storageInfo = await (async () => { + const q = ` + SELECT rs.*, COUNT(DISTINCT rd.id) AS directories_count, COUNT(rf.id) AS files_count + FROM repository_storage rs + LEFT JOIN repository_directories rd ON rs.id = rd.storage_id + LEFT JOIN repository_files rf ON rs.id = rf.storage_id + WHERE rs.repository_id = $1 + GROUP BY rs.id + `; + const r = await database.query(q, [repositoryRecord.id]); + return r.rows[0] || null; + })(); + + res.status(201).json({ + success: true, + message: 'Repository attached successfully', + data: { + repository_id: repositoryRecord.id, + template_id: repositoryRecord.template_id, + repository_name: repositoryRecord.repository_name, + owner_name: repositoryRecord.owner_name, + branch_name: repositoryRecord.branch_name, + is_public: repositoryRecord.is_public, + requires_auth: repositoryRecord.requires_auth, + metadata: repositoryData, + codebase_analysis: codebaseAnalysis, + storage_info: storageInfo, + download_result: downloadResult + } + }); + } catch (error) { + console.error('Error attaching repository (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to attach repository' }); + } +}); + +// Generic webhook endpoint with provider-specific verification and push handling +router.post('/:provider/webhook', async (req, res) => { + try { + const providerKey = (req.params.provider || '').toLowerCase(); + const payload = req.body || {}; + + if (providerKey === 'github') { + return res.status(400).json({ success: false, message: 'Use /api/github/webhook for GitHub' }); + } + + // Signature verification + const rawBody = JSON.stringify(payload); + const verifySignature = () => { + try { + if (providerKey === 'gitlab') { + const token = req.headers['x-gitlab-token']; + const secret = process.env.GITLAB_WEBHOOK_SECRET; + if (!secret) return true; // if not set, skip + return token && token === secret; + } + if (providerKey === 'gitea') { + const crypto = require('crypto'); + const provided = req.headers['x-gitea-signature']; + const secret = process.env.GITEA_WEBHOOK_SECRET; + if (!secret) return true; + if (!provided) return false; + const expected = crypto.createHmac('sha256', secret).update(rawBody).digest('hex'); + return crypto.timingSafeEqual(Buffer.from(expected, 'hex'), Buffer.from(provided, 'hex')); + } + if (providerKey === 'bitbucket') { + // Bitbucket Cloud webhooks typically have no shared secret by default + return true; + } + return false; + } catch (_) { + return false; + } + }; + + if (!verifySignature()) { + return res.status(401).json({ success: false, message: 'Invalid webhook signature' }); + } + + // Process webhook event using comprehensive service + const eventType = extractEventType(providerKey, payload); + await vcsWebhookService.processWebhookEvent(providerKey, eventType, payload); + + return res.status(200).json({ success: true, message: 'Webhook processed', provider: providerKey, event_type: eventType }); + } catch (error) { + console.error('Error in VCS webhook:', error); + res.status(500).json({ success: false, message: 'Failed to process webhook' }); + } +}); + +module.exports = router; +// Additional provider-agnostic routes mirroring GitHub endpoints + +// Get repository diff between two SHAs (unified patch) +router.get('/:provider/repository/:id/diff', async (req, res) => { + try { + const provider = getProvider(req); + const { id } = req.params; + const { from, to } = req.query; + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const record = repoResult.rows[0]; + const { owner, repo } = provider.parseRepoUrl(record.repository_url); + // Use stored branch_name to avoid master/main mismatch + const targetBranch = record.branch_name || 'main'; + const patch = await provider.getRepositoryDiff(owner, repo, targetBranch, from || record.last_synced_commit_sha, to || 'HEAD'); + res.json({ success: true, data: { patch, from: from || record.last_synced_commit_sha, to: to || 'HEAD' } }); + } catch (error) { + console.error('Error getting diff (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to get diff' }); + } +}); + +// Get list of changed files since a SHA +router.get('/:provider/repository/:id/changes', async (req, res) => { + try { + const provider = getProvider(req); + const { id } = req.params; + const { since } = req.query; + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const record = repoResult.rows[0]; + const { owner, repo } = provider.parseRepoUrl(record.repository_url); + const sinceSha = since || record.last_synced_commit_sha; + if (!sinceSha) { + return res.status(400).json({ success: false, message: 'since SHA is required or must be available as last_synced_commit_sha' }); + } + const changes = await provider.getRepositoryChangesSince(owner, repo, record.branch_name, sinceSha); + res.json({ success: true, data: { since: sinceSha, changes } }); + } catch (error) { + console.error('Error getting changes (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to get changes' }); + } +}); + +// Get repository information for a template (latest) +router.get('/:provider/template/:id/repository', async (req, res) => { + try { + const { id } = req.params; + const query = ` + SELECT gr.*, rs.local_path, rs.storage_status, rs.total_files_count, + rs.total_directories_count, rs.total_size_bytes, rs.download_completed_at + FROM github_repositories gr + LEFT JOIN repository_storage rs ON gr.id = rs.repository_id + WHERE gr.template_id = $1 + ORDER BY gr.created_at DESC + LIMIT 1 + `; + const result = await database.query(query, [id]); + if (result.rows.length === 0) { + return res.status(404).json({ success: false, message: 'No repository found for this template' }); + } + const repository = result.rows[0]; + const parseMaybe = (v) => { + if (v == null) return {}; + if (typeof v === 'string') { try { return JSON.parse(v); } catch { return {}; } } + return v; + }; + res.json({ success: true, data: { ...repository, metadata: parseMaybe(repository.metadata), codebase_analysis: parseMaybe(repository.codebase_analysis) } }); + } catch (error) { + console.error('Error fetching repository (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to fetch repository' }); + } +}); + +// Get repository file structure +router.get('/:provider/repository/:id/structure', async (req, res) => { + try { + const { id } = req.params; + const { path: directoryPath } = req.query; + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const structure = await fileStorageService.getRepositoryStructure(id, directoryPath); + res.json({ success: true, data: { repository_id: id, directory_path: directoryPath || '', structure } }); + } catch (error) { + console.error('Error fetching repository structure (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to fetch repository structure' }); + } +}); + +// Get files in a directory +router.get('/:provider/repository/:id/files', async (req, res) => { + try { + const { id } = req.params; + const { directory_path = '' } = req.query; + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const files = await fileStorageService.getDirectoryFiles(id, directory_path); + res.json({ success: true, data: { repository_id: id, directory_path, files } }); + } catch (error) { + console.error('Error fetching directory files (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to fetch directory files' }); + } +}); + +// Get file content +router.get('/:provider/repository/:id/file-content', async (req, res) => { + try { + const { id } = req.params; + const { file_path } = req.query; + if (!file_path) { + return res.status(400).json({ success: false, message: 'File path is required' }); + } + const query = ` + SELECT rf.*, rfc.content_text, rfc.content_preview, rfc.language_detected, + rfc.line_count, rfc.char_count + FROM repository_files rf + LEFT JOIN repository_file_contents rfc ON rf.id = rfc.file_id + WHERE rf.repository_id = $1 AND rf.relative_path = $2 + `; + const result = await database.query(query, [id, file_path]); + if (result.rows.length === 0) { + return res.status(404).json({ success: false, message: 'File not found' }); + } + const file = result.rows[0]; + res.json({ success: true, data: { file_info: { id: file.id, filename: file.filename, file_extension: file.file_extension, relative_path: file.relative_path, file_size_bytes: file.file_size_bytes, mime_type: file.mime_type, is_binary: file.is_binary, language_detected: file.language_detected, line_count: file.line_count, char_count: file.char_count }, content: file.is_binary ? null : file.content_text, preview: file.content_preview } }); + } catch (error) { + console.error('Error fetching file content (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to fetch file content' }); + } +}); + +// List all repositories for a template +router.get('/:provider/template/:id/repositories', async (req, res) => { + try { + const { id } = req.params; + const query = ` + SELECT gr.*, rs.local_path, rs.storage_status, rs.total_files_count, + rs.total_directories_count, rs.total_size_bytes, rs.download_completed_at + FROM github_repositories gr + LEFT JOIN repository_storage rs ON gr.id = rs.repository_id + WHERE gr.template_id = $1 + ORDER BY gr.created_at DESC + `; + const result = await database.query(query, [id]); + const repositories = result.rows.map(repo => ({ ...repo, metadata: JSON.parse(repo.metadata || '{}'), codebase_analysis: JSON.parse(repo.codebase_analysis || '{}') })); + res.json({ success: true, data: repositories }); + } catch (error) { + console.error('Error fetching repositories (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to fetch repositories' }); + } +}); + +// Re-sync repository (git-based) +router.post('/:provider/repository/:id/sync', async (req, res) => { + try { + const provider = getProvider(req); + const { id } = req.params; + const repoQuery = 'SELECT * FROM github_repositories WHERE id = $1'; + const repoResult = await database.query(repoQuery, [id]); + if (repoResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const repository = repoResult.rows[0]; + const { owner, repo, branch } = provider.parseRepoUrl(repository.repository_url); + await provider.cleanupRepositoryStorage(id); + const downloadResult = await provider.syncRepositoryWithFallback(owner, repo, branch || repository.branch_name, id); + await database.query('UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', [downloadResult.success ? 'synced' : 'error', id]); + res.json({ success: downloadResult.success, message: downloadResult.success ? 'Repository synced successfully' : 'Failed to sync repository', data: downloadResult }); + } catch (error) { + console.error('Error syncing repository (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to sync repository' }); + } +}); + +// Remove repository +router.delete('/:provider/repository/:id', async (req, res) => { + try { + const { id } = req.params; + const getResult = await database.query('SELECT * FROM github_repositories WHERE id = $1', [id]); + if (getResult.rows.length === 0) { + return res.status(404).json({ success: false, message: 'Repository not found' }); + } + const repository = getResult.rows[0]; + await fileStorageService.cleanupRepositoryStorage(id); + await database.query('DELETE FROM feature_codebase_mappings WHERE repository_id = $1', [id]); + await database.query('DELETE FROM github_repositories WHERE id = $1', [id]); + res.json({ success: true, message: 'Repository removed successfully', data: { removed_repository: repository.repository_name, template_id: repository.template_id } }); + } catch (error) { + console.error('Error removing repository (vcs):', error); + res.status(500).json({ success: false, message: error.message || 'Failed to remove repository' }); + } +}); + +// OAuth placeholders (start/callback) per provider for future implementation +router.get('/:provider/auth/start', (req, res) => { + try { + const providerKey = (req.params.provider || '').toLowerCase(); + const oauth = getOAuthService(providerKey); + if (!oauth) return res.status(400).json({ success: false, message: 'Unsupported provider or OAuth not available' }); + const state = req.query.state || Math.random().toString(36).slice(2); + const url = oauth.getAuthUrl(state); + res.json({ success: true, auth_url: url, provider: providerKey, state }); + } catch (e) { + res.status(500).json({ success: false, message: e.message || 'Failed to start OAuth' }); + } +}); + +router.get('/:provider/auth/callback', (req, res) => { + (async () => { + try { + const providerKey = (req.params.provider || '').toLowerCase(); + const code = req.query.code; + const error = req.query.error; + const errorDescription = req.query.error_description; + const oauth = getOAuthService(providerKey); + if (!oauth) return res.status(400).json({ success: false, message: 'Unsupported provider or OAuth not available' }); + if (!code) { + // Surface upstream provider error details if present + if (error || errorDescription) { + return res.status(400).json({ success: false, message: 'OAuth error from provider', provider: providerKey, error: error || 'unknown_error', error_description: errorDescription || null, query: req.query }); + } + return res.status(400).json({ success: false, message: 'Missing code' }); + } + const accessToken = await oauth.exchangeCodeForToken(code); + const user = await oauth.getUserInfo(accessToken); + const userId = + req.query.user_id || + (req.body && req.body.user_id) || + req.headers['x-user-id'] || + (req.cookies && (req.cookies.user_id || req.cookies.uid)) || + (req.session && req.session.user && (req.session.user.id || req.session.user.userId)) || + (req.user && (req.user.id || req.user.userId)); + if (providerKey === 'github' && !userId) { + return res.status(400).json({ success: false, message: 'user_id is required to complete GitHub authentication' }); + } + console.log('[VCS OAuth] callback provider=%s resolved user_id = %s', providerKey, userId || null); + const tokenRecord = await oauth.storeToken(accessToken, user, userId || null); + res.json({ success: true, provider: providerKey, user, token: { id: tokenRecord.id || null } }); + } catch (e) { + res.status(500).json({ success: false, message: e.message || 'OAuth callback failed' }); + } + })(); +}); + diff --git a/services/git-integration/src/routes/webhook.routes.js b/services/git-integration/src/routes/webhook.routes.js new file mode 100644 index 0000000..884e222 --- /dev/null +++ b/services/git-integration/src/routes/webhook.routes.js @@ -0,0 +1,114 @@ +// routes/webhook.routes.js +const express = require('express'); +const router = express.Router(); +const WebhookService = require('../services/webhook.service'); + +const webhookService = new WebhookService(); + +// GitHub webhook endpoint +router.post('/webhook', async (req, res) => { + try { + const signature = req.headers['x-hub-signature-256']; + const eventType = req.headers['x-github-event']; + const deliveryId = req.headers['x-github-delivery']; + const userAgent = req.headers['user-agent']; + + console.log('🔔 GitHub webhook received:'); + console.log(`- Event Type: ${eventType}`); + console.log(`- Delivery ID: ${deliveryId}`); + console.log(`- User Agent: ${userAgent}`); + console.log(`- Signature: ${signature ? 'Present' : 'Missing'}`); + console.log(`- Payload Size: ${JSON.stringify(req.body).length} bytes`); + console.log(`- Timestamp: ${new Date().toISOString()}`); + + // Verify webhook signature if secret is configured + if (process.env.GITHUB_WEBHOOK_SECRET) { + const rawBody = JSON.stringify(req.body); + const isValidSignature = webhookService.verifySignature(rawBody, signature); + + if (!isValidSignature) { + console.warn('Invalid webhook signature - potential security issue'); + return res.status(401).json({ + success: false, + message: 'Invalid webhook signature' + }); + } + } else { + console.warn('GitHub webhook secret not configured - skipping signature verification'); + } + + // Attach delivery_id into payload for downstream persistence convenience + const payloadWithDelivery = { ...req.body, delivery_id: deliveryId }; + + // Process the webhook event + if (eventType) { + await webhookService.processWebhookEvent(eventType, payloadWithDelivery); + } + + // Log the webhook event + await webhookService.logWebhookEvent( + eventType || 'unknown', + req.body.action || 'unknown', + req.body.repository?.full_name || 'unknown', + { + delivery_id: deliveryId, + event_type: eventType, + action: req.body.action, + repository: req.body.repository?.full_name, + sender: req.body.sender?.login + }, + deliveryId, + payloadWithDelivery + ); + + res.status(200).json({ + success: true, + message: 'Webhook processed successfully', + event_type: eventType, + delivery_id: deliveryId + }); + + } catch (error) { + console.error('Error processing webhook:', error); + res.status(500).json({ + success: false, + message: 'Failed to process webhook', + error: process.env.NODE_ENV === 'development' ? error.message : 'Internal server error' + }); + } +}); + +// Get recent webhook events (for debugging) +router.get('/webhook/events', async (req, res) => { + try { + const limit = parseInt(req.query.limit) || 50; + const events = await webhookService.getRecentWebhookEvents(limit); + + res.json({ + success: true, + data: { + events, + total: events.length, + limit + } + }); + } catch (error) { + console.error('Error fetching webhook events:', error); + res.status(500).json({ + success: false, + message: 'Failed to fetch webhook events' + }); + } +}); + +// Webhook health check +router.get('/webhook/health', (req, res) => { + res.json({ + success: true, + message: 'Webhook service is healthy', + timestamp: new Date().toISOString(), + webhook_secret_configured: !!process.env.GITHUB_WEBHOOK_SECRET + }); +}); + +module.exports = router; diff --git a/services/git-integration/src/services/bitbucket-oauth.js b/services/git-integration/src/services/bitbucket-oauth.js new file mode 100644 index 0000000..59fe82e --- /dev/null +++ b/services/git-integration/src/services/bitbucket-oauth.js @@ -0,0 +1,64 @@ +// services/bitbucket-oauth.js +const database = require('../config/database'); + +class BitbucketOAuthService { + constructor() { + this.clientId = process.env.BITBUCKET_CLIENT_ID; + this.clientSecret = process.env.BITBUCKET_CLIENT_SECRET; + this.redirectUri = process.env.BITBUCKET_REDIRECT_URI || 'http://localhost:8012/api/vcs/bitbucket/auth/callback'; + } + + getAuthUrl(state) { + if (!this.clientId) throw new Error('Bitbucket OAuth not configured'); + const params = new URLSearchParams({ + client_id: this.clientId, + response_type: 'code', + state, + // Bitbucket Cloud uses 'repository' for read access; 'repository:write' for write + scope: 'repository account', + redirect_uri: this.redirectUri + }); + return `https://bitbucket.org/site/oauth2/authorize?${params.toString()}`; + } + + async exchangeCodeForToken(code) { + const resp = await fetch('https://bitbucket.org/site/oauth2/access_token', { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded', Authorization: `Basic ${Buffer.from(`${this.clientId}:${this.clientSecret}`).toString('base64')}` }, + body: new URLSearchParams({ grant_type: 'authorization_code', code, redirect_uri: this.redirectUri }) + }); + let data = null; + try { data = await resp.json(); } catch (_) { data = null; } + if (!resp.ok) { + const detail = data?.error_description || data?.error || (await resp.text().catch(() => '')) || 'unknown_error'; + throw new Error(`Bitbucket token exchange failed: ${detail}`); + } + return data.access_token; + } + + async getUserInfo(accessToken) { + const resp = await fetch('https://api.bitbucket.org/2.0/user', { headers: { Authorization: `Bearer ${accessToken}` } }); + if (!resp.ok) throw new Error('Failed to fetch Bitbucket user'); + return await resp.json(); + } + + async storeToken(accessToken, user) { + const result = await database.query( + `INSERT INTO bitbucket_user_tokens (access_token, bitbucket_username, bitbucket_user_id, scopes, expires_at) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (id) DO UPDATE SET access_token = EXCLUDED.access_token, bitbucket_username = EXCLUDED.bitbucket_username, bitbucket_user_id = EXCLUDED.bitbucket_user_id, scopes = EXCLUDED.scopes, expires_at = EXCLUDED.expires_at, updated_at = NOW() + RETURNING *`, + [accessToken, user.username || user.display_name, user.uuid || null, JSON.stringify(['repository:read','account']), null] + ); + return result.rows[0]; + } + + async getToken() { + const r = await database.query('SELECT * FROM bitbucket_user_tokens ORDER BY created_at DESC LIMIT 1'); + return r.rows[0]; + } +} + +module.exports = BitbucketOAuthService; + + diff --git a/services/git-integration/src/services/file-storage.service.js b/services/git-integration/src/services/file-storage.service.js index 415b567..ecc3c8e 100644 --- a/services/git-integration/src/services/file-storage.service.js +++ b/services/git-integration/src/services/file-storage.service.js @@ -61,6 +61,16 @@ class FileStorageService { return null; } + // Skip any .git directory anywhere in the tree + const normalizedRel = currentPath.replace(/\\/g, '/'); + if ( + normalizedRel === '.git' || + normalizedRel.startsWith('.git/') || + normalizedRel.includes('/.git/') + ) { + return null; + } + // Insert directory record const dirName = currentPath === '' ? '.' : path.basename(currentPath); const relativePath = currentPath === '' ? '' : currentPath; @@ -91,6 +101,11 @@ class FileStorageService { const itemRelativePath = currentPath ? path.join(currentPath, item) : item; const itemStats = fs.statSync(itemPath); + // Skip .git directory and its contents + if (item === '.git' || itemRelativePath.replace(/\\/g, '/').includes('/.git/')) { + continue; + } + if (itemStats.isDirectory()) { // Recursively process subdirectory const subDir = await this.processDirectoryStructure( diff --git a/services/git-integration/src/services/git-repo.service.js b/services/git-integration/src/services/git-repo.service.js new file mode 100644 index 0000000..17f3375 --- /dev/null +++ b/services/git-integration/src/services/git-repo.service.js @@ -0,0 +1,143 @@ +// services/git-repo.service.js +const fs = require('fs'); +const path = require('path'); +const { exec, execFile } = require('child_process'); + +class GitRepoService { + constructor() { + this.baseDir = process.env.ATTACHED_REPOS_DIR || '/tmp/attached-repos'; + } + + getLocalRepoPath(owner, repo, branch) { + return path.join(this.baseDir, `${owner}__${repo}__${branch}`); + } + + async ensureDirectory(dirPath) { + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + } + } + + runGitCommand(cwd, command) { + return new Promise((resolve, reject) => { + try { + if (!fs.existsSync(cwd)) { + return reject(new Error(`Working directory not found: ${cwd}`)); + } + } catch (_) { + return reject(new Error(`Invalid working directory: ${cwd}`)); + } + // Make git non-interactive to avoid terminal credential prompts + const env = { ...process.env, GIT_TERMINAL_PROMPT: '0' }; + exec(command, { cwd, maxBuffer: 1024 * 1024 * 64, env }, (error, stdout, stderr) => { + if (error) { + const details = [`cmd: ${command}`, `cwd: ${cwd}`, stderr ? `stderr: ${stderr}` : ''].filter(Boolean).join('\n'); + return reject(new Error((stderr && stderr.trim()) || `${error.message}\n${details}`)); + } + resolve(stdout.trim()); + }); + }); + } + + runGit(cwd, args) { + return new Promise((resolve, reject) => { + try { + if (!fs.existsSync(cwd)) { + return reject(new Error(`Working directory not found: ${cwd}`)); + } + } catch (_) { + return reject(new Error(`Invalid working directory: ${cwd}`)); + } + const env = { ...process.env, GIT_TERMINAL_PROMPT: '0' }; + execFile('git', args, { cwd, maxBuffer: 1024 * 1024 * 64, env }, (error, stdout, stderr) => { + if (error) { + const details = [`git ${args.join(' ')}`, `cwd: ${cwd}`, stderr ? `stderr: ${stderr}` : ''].filter(Boolean).join('\n'); + return reject(new Error((stderr && stderr.trim()) || `${error.message}\n${details}`)); + } + resolve(stdout.trim()); + }); + }); + } + + async cloneIfMissing(owner, repo, branch) { + const repoPath = this.getLocalRepoPath(owner, repo, branch); + await this.ensureDirectory(path.dirname(repoPath)); + if (!fs.existsSync(repoPath) || !fs.existsSync(path.join(repoPath, '.git'))) { + const cloneUrl = `https://github.com/${owner}/${repo}.git`; + await this.runGit(path.dirname(repoPath), ['clone', '--depth', '1', '-b', branch, cloneUrl, path.basename(repoPath)]); + } + return repoPath; + } + + async cloneIfMissingWithHost(owner, repo, branch, host) { + const repoPath = this.getLocalRepoPath(owner, repo, branch); + await this.ensureDirectory(path.dirname(repoPath)); + if (!fs.existsSync(repoPath) || !fs.existsSync(path.join(repoPath, '.git'))) { + const normalizedHost = (host || 'github.com').replace(/^https?:\/\//, '').replace(/\/$/, ''); + const cloneUrl = `https://${normalizedHost}/${owner}/${repo}.git`; + await this.runGit(path.dirname(repoPath), ['clone', '--depth', '1', '-b', branch, cloneUrl, path.basename(repoPath)]); + } + return repoPath; + } + + async getHeadSha(repoPath) { + try { + const sha = await this.runGit(repoPath, ['rev-parse', 'HEAD']); + return sha; + } catch (_) { + return null; + } + } + + async fetchAndFastForward(repoPath, branch) { + const beforeSha = await this.getHeadSha(repoPath); + await this.runGit(repoPath, ['fetch', '--all', '--prune']); + await this.runGit(repoPath, ['checkout', branch]); + await this.runGit(repoPath, ['pull', '--ff-only', 'origin', branch]); + const afterSha = await this.getHeadSha(repoPath); + return { beforeSha, afterSha }; + } + + async cloneIfMissingWithAuth(owner, repo, branch, host, token, tokenType = 'oauth2') { + const repoPath = this.getLocalRepoPath(owner, repo, branch); + await this.ensureDirectory(path.dirname(repoPath)); + if (!fs.existsSync(repoPath) || !fs.existsSync(path.join(repoPath, '.git'))) { + const normalizedHost = (host || 'github.com').replace(/^https?:\/\//, '').replace(/\/$/, ''); + let cloneUrl = `https://${normalizedHost}/${owner}/${repo}.git`; + if (token) { + if (tokenType === 'oauth2') { + // Many providers accept oauth2:@host + cloneUrl = `https://oauth2:${token}@${normalizedHost}/${owner}/${repo}.git`; + } else if (tokenType === 'bearer') { + // Use extraheader auth pattern + await this.runGit(path.dirname(repoPath), ['-c', `http.extraheader=Authorization: Bearer ${token}`, 'clone', '--depth', '1', '-b', branch, cloneUrl, path.basename(repoPath)]); + return repoPath; + } + } + await this.runGit(path.dirname(repoPath), ['clone', '--depth', '1', '-b', branch, cloneUrl, path.basename(repoPath)]); + } + return repoPath; + } + + async getDiff(repoPath, fromSha, toSha, options = { patch: true }) { + const range = fromSha && toSha ? `${fromSha}..${toSha}` : toSha ? `${toSha}^..${toSha}` : ''; + const mode = options.patch ? '--patch' : '--name-status'; + const args = ['diff', mode]; + if (range) args.push(range); + const output = await this.runGit(repoPath, args); + return output; + } + + async getChangedFilesSince(repoPath, sinceSha) { + const output = await this.runGit(repoPath, ['diff', '--name-status', `${sinceSha}..HEAD`]); + const lines = output.split('\n').filter(Boolean); + return lines.map(line => { + const [status, filePath] = line.split(/\s+/, 2); + return { status, filePath }; + }); + } +} + +module.exports = GitRepoService; + + diff --git a/services/git-integration/src/services/gitea-oauth.js b/services/git-integration/src/services/gitea-oauth.js new file mode 100644 index 0000000..662a043 --- /dev/null +++ b/services/git-integration/src/services/gitea-oauth.js @@ -0,0 +1,77 @@ +// services/gitea-oauth.js +const database = require('../config/database'); + +class GiteaOAuthService { + constructor() { + this.clientId = process.env.GITEA_CLIENT_ID; + this.clientSecret = process.env.GITEA_CLIENT_SECRET; + this.baseUrl = (process.env.GITEA_BASE_URL || 'https://gitea.com').replace(/\/$/, ''); + this.redirectUri = process.env.GITEA_REDIRECT_URI || 'http://localhost:8012/api/vcs/gitea/auth/callback'; + } + + getAuthUrl(state) { + if (!this.clientId) throw new Error('Gitea OAuth not configured'); + const authUrl = `${this.baseUrl}/login/oauth/authorize`; + const params = new URLSearchParams({ + client_id: this.clientId, + redirect_uri: this.redirectUri, + response_type: 'code', + // Request both user and repository read scopes + scope: 'read:user read:repository', + state + }); + return `${authUrl}?${params.toString()}`; + } + + async exchangeCodeForToken(code) { + const tokenUrl = `${this.baseUrl}/login/oauth/access_token`; + const resp = await fetch(tokenUrl, { + method: 'POST', + headers: { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded' }, + body: new URLSearchParams({ + client_id: this.clientId, + client_secret: this.clientSecret, + code, + grant_type: 'authorization_code', + redirect_uri: this.redirectUri + }) + }); + let data = null; + try { data = await resp.json(); } catch (_) { data = null; } + if (!resp.ok || data?.error) { + const detail = data?.error_description || data?.error || (await resp.text().catch(() => '')) || 'unknown_error'; + throw new Error(`Gitea token exchange failed: ${detail}`); + } + return data.access_token; + } + + async getUserInfo(accessToken) { + const resp = await fetch(`${this.baseUrl}/api/v1/user`, { headers: { Authorization: `Bearer ${accessToken}` } }); + if (!resp.ok) { + let txt = ''; + try { txt = await resp.text(); } catch (_) {} + throw new Error(`Failed to fetch Gitea user (status ${resp.status}): ${txt}`); + } + return await resp.json(); + } + + async storeToken(accessToken, user) { + const result = await database.query( + `INSERT INTO gitea_user_tokens (access_token, gitea_username, gitea_user_id, scopes, expires_at) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (id) DO UPDATE SET access_token = EXCLUDED.access_token, gitea_username = EXCLUDED.gitea_username, gitea_user_id = EXCLUDED.gitea_user_id, scopes = EXCLUDED.scopes, expires_at = EXCLUDED.expires_at, updated_at = NOW() + RETURNING *`, + [accessToken, user.login, user.id, JSON.stringify(['read:repository']), null] + ); + return result.rows[0]; + } + + async getToken() { + const r = await database.query('SELECT * FROM gitea_user_tokens ORDER BY created_at DESC LIMIT 1'); + return r.rows[0]; + } +} + +module.exports = GiteaOAuthService; + + diff --git a/services/git-integration/src/services/github-integration.service.js b/services/git-integration/src/services/github-integration.service.js index 4d460d1..63e3845 100644 --- a/services/git-integration/src/services/github-integration.service.js +++ b/services/git-integration/src/services/github-integration.service.js @@ -3,13 +3,16 @@ const { Octokit } = require('@octokit/rest'); const fs = require('fs'); const path = require('path'); const { exec } = require('child_process'); +const parseGitHubUrl = require('parse-github-url'); const GitHubOAuthService = require('./github-oauth'); const FileStorageService = require('./file-storage.service'); +const GitRepoService = require('./git-repo.service'); class GitHubIntegrationService { constructor() { this.oauthService = new GitHubOAuthService(); this.fileStorageService = new FileStorageService(); + this.gitRepoService = new GitRepoService(); // Default unauthenticated instance this.octokit = new Octokit({ @@ -22,19 +25,86 @@ class GitHubIntegrationService { return await this.oauthService.getAuthenticatedOctokit(); } - // Extract owner, repo, and branch from GitHub URL + // Extract owner, repo, and branch from GitHub URL using parse-github-url library parseGitHubUrl(url) { - const regex = /github\.com\/([^\/]+)\/([^\/]+)(?:\/tree\/([^\/]+))?/; - const match = url.match(regex); + if (!url || typeof url !== 'string') { + throw new Error('URL must be a non-empty string'); + } + + // Normalize the URL first + let normalizedUrl = url.trim(); - if (!match) { - throw new Error('Invalid GitHub repository URL'); + // Handle URLs without protocol + if (!normalizedUrl.startsWith('http://') && !normalizedUrl.startsWith('https://') && !normalizedUrl.startsWith('git@')) { + normalizedUrl = 'https://' + normalizedUrl; + } + + // Handle SSH format: git@github.com:owner/repo.git + if (normalizedUrl.startsWith('git@github.com:')) { + normalizedUrl = normalizedUrl.replace('git@github.com:', 'https://github.com/'); + } + + // Handle git+https format: git+https://github.com/owner/repo.git + if (normalizedUrl.startsWith('git+https://') || normalizedUrl.startsWith('git+http://')) { + normalizedUrl = normalizedUrl.replace('git+', ''); + } + + // Validate that it's a GitHub URL before parsing + if (!normalizedUrl.includes('github.com')) { + throw new Error(`Invalid GitHub repository URL: ${url}`); + } + + // Clean URL by removing query parameters and fragments for parsing + const cleanUrl = normalizedUrl.split('?')[0].split('#')[0]; + + // Use the parse-github-url library to parse the URL + const parsed = parseGitHubUrl(cleanUrl); + + if (!parsed || !parsed.owner || !parsed.name) { + throw new Error(`Invalid GitHub repository URL: ${url}`); + } + + // Additional validation: reject URLs with invalid paths + const urlWithoutQuery = normalizedUrl.split('?')[0].split('#')[0]; + const pathAfterRepo = urlWithoutQuery.split(/github\.com\/[^\/]+\/[^\/]+/)[1]; + if (pathAfterRepo && pathAfterRepo.length > 0) { + const validPaths = ['/tree/', '/blob/', '/commit/', '/pull/', '/issue', '/archive/', '/releases', '/actions', '/projects', '/wiki', '/settings', '/security', '/insights', '/pulse', '/graphs', '/network', '/compare']; + const hasValidPath = validPaths.some(path => pathAfterRepo.startsWith(path)); + if (!hasValidPath) { + throw new Error(`Invalid GitHub repository URL: ${url}`); + } + } + + // Extract branch information + let branch = parsed.branch; + + // Handle special cases for branch extraction + if (branch) { + // For archive URLs, remove .zip or .tar.gz extensions + branch = branch.replace(/\.(zip|tar\.gz|tar)$/, ''); + + // For blob URLs, the branch might be followed by a path, take only the first part + branch = branch.split('/')[0]; + + // For commit/PR/issue URLs, don't treat the ID as a branch + if (normalizedUrl.includes('/commit/') || normalizedUrl.includes('/pull/') || normalizedUrl.includes('/issue')) { + branch = 'main'; // Default to main for these cases + } + } + + // Validate owner and repo names (GitHub naming rules) + if (!/^[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$/.test(parsed.owner)) { + throw new Error(`Invalid GitHub owner name: ${parsed.owner}`); + } + + if (!/^[a-zA-Z0-9]([a-zA-Z0-9\-\._]*[a-zA-Z0-9])?$/.test(parsed.name)) { + throw new Error(`Invalid GitHub repository name: ${parsed.name}`); } return { - owner: match[1], - repo: match[2].replace('.git', ''), - branch: match[3] || 'main' + owner: parsed.owner, + repo: parsed.name, + branch: branch || 'main' }; } @@ -60,6 +130,77 @@ class GitHubIntegrationService { error: 'Repository not found or requires authentication' }; } + + // Handle authentication errors + if (error.status === 401 || error.message.includes('token has expired') || error.message.includes('authenticate with GitHub')) { + return { + exists: null, + isPrivate: null, + hasAccess: false, + requiresAuth: true, + error: 'GitHub authentication required or token expired', + authError: true + }; + } + + throw error; + } + } + + // Check repository access with user-specific tokens + async checkRepositoryAccessWithUser(owner, repo, userId) { + try { + // First try to find a token that can access this repository + const token = await this.oauthService.findTokenForRepository(userId, owner, repo); + + if (token) { + // We found a token that can access this repository + const octokit = new Octokit({ auth: token.access_token }); + const { data } = await octokit.repos.get({ owner, repo }); + + return { + exists: true, + isPrivate: data.private, + hasAccess: true, + requiresAuth: data.private, + github_username: token.github_username, + token_id: token.id + }; + } + + // No token found that can access this repository + return { + exists: null, + isPrivate: null, + hasAccess: false, + requiresAuth: true, + error: 'Repository not found or requires authentication', + authError: false + }; + + } catch (error) { + if (error.status === 404) { + return { + exists: false, + isPrivate: null, + hasAccess: false, + requiresAuth: true, + error: 'Repository not found or requires authentication' + }; + } + + // Handle authentication errors + if (error.status === 401 || error.message.includes('token has expired') || error.message.includes('authenticate with GitHub')) { + return { + exists: null, + isPrivate: null, + hasAccess: false, + requiresAuth: true, + error: 'GitHub authentication required or token expired', + authError: true + }; + } + throw error; } } @@ -195,15 +336,253 @@ class GitHubIntegrationService { return result.rows[0] || null; } + // Ensure a GitHub webhook exists for the repository (uses OAuth token) + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + try { + const secret = process.env.GITHUB_WEBHOOK_SECRET; + if (!callbackUrl) { + console.warn('Webhook callbackUrl not provided; skipping webhook creation'); + return { created: false, reason: 'missing_callback_url' }; + } + + const octokit = await this.getAuthenticatedOctokit(); + + // List existing hooks to avoid duplicates + const { data: hooks } = await octokit.request('GET /repos/{owner}/{repo}/hooks', { + owner, + repo + }); + + const existing = hooks.find(h => h.config && h.config.url === callbackUrl); + if (existing) { + // Optionally ensure events include push + if (!existing.events || !existing.events.includes('push')) { + try { + await octokit.request('PATCH /repos/{owner}/{repo}/hooks/{hook_id}', { + owner, + repo, + hook_id: existing.id, + events: Array.from(new Set([...(existing.events || []), 'push'])) + }); + } catch (e) { + console.warn('Failed to update existing webhook events:', e.message); + } + } + return { created: false, reason: 'exists', hook_id: existing.id }; + } + + // Create new webhook + const createResp = await octokit.request('POST /repos/{owner}/{repo}/hooks', { + owner, + repo, + config: { + url: callbackUrl, + content_type: 'json', + secret: secret || undefined, + insecure_ssl: '0' + }, + events: ['push'], + active: true + }); + + return { created: true, hook_id: createResp.data.id }; + } catch (error) { + // Common cases: insufficient permissions, private repo without correct scope + console.warn('ensureRepositoryWebhook failed:', error.status, error.message); + return { created: false, error: error.message }; + } + } + + // Git-based: clone or update local repo and re-index into DB + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + const database = require('../config/database'); + const localPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + let storageRecord = null; + + try { + await this.gitRepoService.ensureDirectory(path.dirname(localPath)); + + // Initialize storage record as downloading + storageRecord = await this.fileStorageService.initializeRepositoryStorage( + repositoryId, + localPath + ); + + // Clone if missing (prefer authenticated HTTPS with OAuth token), otherwise fetch & fast-forward + let repoPath = null; + try { + const tokenRecord = await this.oauthService.getToken(); + if (tokenRecord?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth( + owner, + repo, + branch, + 'github.com', + tokenRecord.access_token, + 'oauth2' + ); + } + } catch (_) {} + if (!repoPath) { + repoPath = await this.gitRepoService.cloneIfMissing(owner, repo, branch); + } + const beforeSha = await this.gitRepoService.getHeadSha(repoPath); + const { afterSha } = await this.gitRepoService.fetchAndFastForward(repoPath, branch); + + // Index filesystem into DB + await this.fileStorageService.processDirectoryStructure( + storageRecord.id, + repositoryId, + repoPath + ); + + const finalStorage = await this.fileStorageService.completeRepositoryStorage(storageRecord.id); + + // Persist last synced commit + try { + await database.query( + 'UPDATE github_repositories SET last_synced_commit_sha = $1, last_synced_at = NOW(), updated_at = NOW() WHERE id = $2', + [afterSha || beforeSha || null, repositoryId] + ); + } catch (_) {} + + return { + success: true, + targetDir: repoPath, + beforeSha, + afterSha: afterSha || beforeSha, + storage: finalStorage + }; + } catch (error) { + if (storageRecord) { + await this.fileStorageService.markStorageFailed(storageRecord.id, error.message); + } + return { success: false, error: error.message }; + } + } + + // Git-based: get unified diff between two SHAs in local repo + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + // Ensure local repo exists and is up to date; handle main/master mismatch gracefully + const preferredBranch = branch || 'main'; + const alternateBranch = preferredBranch === 'main' ? 'master' : 'main'; + + let repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, preferredBranch); + try { + // Try to ensure repo exists for the preferred branch + try { + const tokenRecord = await this.oauthService.getToken().catch(() => null); + if (tokenRecord?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, preferredBranch, 'github.com', tokenRecord.access_token, 'oauth2'); + } else { + repoPath = await this.gitRepoService.cloneIfMissing(owner, repo, preferredBranch); + } + } catch (cloneErr) { + // If the branch doesn't exist (e.g., refs/heads not found), try the alternate branch + try { + const tokenRecordAlt = await this.oauthService.getToken().catch(() => null); + repoPath = tokenRecordAlt?.access_token + ? await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, alternateBranch, 'github.com', tokenRecordAlt.access_token, 'oauth2') + : await this.gitRepoService.cloneIfMissing(owner, repo, alternateBranch); + } catch (_) { + // Fall through; we'll try to use any existing local copy next + } + } + + // If a local repo exists for alternate branch, prefer that to avoid failures + const fs = require('fs'); + const altPath = this.gitRepoService.getLocalRepoPath(owner, repo, alternateBranch); + if ((!fs.existsSync(repoPath) || !fs.existsSync(require('path').join(repoPath, '.git'))) && fs.existsSync(altPath)) { + repoPath = altPath; + } + + // Update and checkout target ref if possible + try { + await this.gitRepoService.fetchAndFastForward(repoPath, preferredBranch); + } catch (_) { + // If checkout fails for preferred branch, attempt alternate + try { await this.gitRepoService.fetchAndFastForward(repoPath, alternateBranch); } catch (_) {} + } + + const patch = await this.gitRepoService.getDiff(repoPath, fromSha || null, toSha || 'HEAD', { patch: true }); + return patch; + } catch (error) { + // Surface a clearer error including both attempted paths + const attempted = [this.gitRepoService.getLocalRepoPath(owner, repo, preferredBranch), this.gitRepoService.getLocalRepoPath(owner, repo, alternateBranch)].join(' | '); + throw new Error(`${error.message} (attempted paths: ${attempted})`); + } + } + + // Git-based: list changed files since a SHA + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + const preferredBranch = branch || 'main'; + const alternateBranch = preferredBranch === 'main' ? 'master' : 'main'; + + let repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, preferredBranch); + try { + // Ensure repo exists similarly to diff flow + try { + const tokenRecord = await this.oauthService.getToken().catch(() => null); + if (tokenRecord?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, preferredBranch, 'github.com', tokenRecord.access_token, 'oauth2'); + } else { + repoPath = await this.gitRepoService.cloneIfMissing(owner, repo, preferredBranch); + } + } catch (_) { + try { + const tokenRecordAlt = await this.oauthService.getToken().catch(() => null); + repoPath = tokenRecordAlt?.access_token + ? await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, alternateBranch, 'github.com', tokenRecordAlt.access_token, 'oauth2') + : await this.gitRepoService.cloneIfMissing(owner, repo, alternateBranch); + } catch (_) {} + } + + const fs = require('fs'); + const altPath = this.gitRepoService.getLocalRepoPath(owner, repo, alternateBranch); + if ((!fs.existsSync(repoPath) || !fs.existsSync(require('path').join(repoPath, '.git'))) && fs.existsSync(altPath)) { + repoPath = altPath; + } + + try { + await this.gitRepoService.fetchAndFastForward(repoPath, preferredBranch); + } catch (_) { + try { await this.gitRepoService.fetchAndFastForward(repoPath, alternateBranch); } catch (_) {} + } + + const files = await this.gitRepoService.getChangedFilesSince(repoPath, sinceSha); + return files; + } catch (error) { + const attempted = [this.gitRepoService.getLocalRepoPath(owner, repo, preferredBranch), this.gitRepoService.getLocalRepoPath(owner, repo, alternateBranch)].join(' | '); + throw new Error(`${error.message} (attempted paths: ${attempted})`); + } + } + // Clean up repository storage async cleanupRepositoryStorage(repositoryId) { return await this.fileStorageService.cleanupRepositoryStorage(repositoryId); } + // Try git-based sync first, fall back to GitHub API download on failure + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + // First attempt: full git clone/fetch and index + const gitResult = await this.syncRepositoryWithGit(owner, repo, branch, repositoryId); + if (gitResult && gitResult.success) { + return { method: 'git', ...gitResult }; + } + + // Fallback: API-based download and storage + const apiResult = await this.downloadRepositoryWithStorage(owner, repo, branch, repositoryId); + if (apiResult && apiResult.success) { + return { method: 'api', ...apiResult, git_error: gitResult?.error }; + } + + return { success: false, error: apiResult?.error || gitResult?.error || 'Unknown sync failure' }; + } + // Download repository files locally and store in database async downloadRepositoryWithStorage(owner, repo, branch, repositoryId) { const targetDir = path.join( - process.env.ATTACHED_REPOS_DIR || '/tmp/attached-repos', + process.env.ATTACHED_REPOS_DIR, `${owner}__${repo}__${branch}` ); @@ -320,7 +699,7 @@ class GitHubIntegrationService { // Legacy method - download repository files locally (backwards compatibility) async downloadRepository(owner, repo, branch) { const targetDir = path.join( - process.env.ATTACHED_REPOS_DIR || '/tmp/attached-repos', + process.env.ATTACHED_REPOS_DIR, `${owner}__${repo}__${branch}` ); @@ -399,4 +778,4 @@ class GitHubIntegrationService { } } -module.exports = GitHubIntegrationService; +module.exports = GitHubIntegrationService; \ No newline at end of file diff --git a/services/git-integration/src/services/github-oauth.js b/services/git-integration/src/services/github-oauth.js index f5f464e..b2b0ff0 100644 --- a/services/git-integration/src/services/github-oauth.js +++ b/services/git-integration/src/services/github-oauth.js @@ -14,16 +14,26 @@ class GitHubOAuthService { } // Generate GitHub OAuth URL - getAuthUrl(state) { + getAuthUrl(state, userId = null) { if (!this.clientId) { throw new Error('GitHub OAuth not configured'); } + // If a userId is provided, append it to the redirect_uri so the callback can link token to that user + let redirectUri = this.redirectUri; + if (userId) { + const hasQuery = redirectUri.includes('?'); + redirectUri = `${redirectUri}${hasQuery ? '&' : '?'}user_id=${encodeURIComponent(userId)}`; + } + + // Also embed userId into the OAuth state for fallback extraction in callback + const stateWithUser = userId ? `${state}|uid=${userId}` : state; + const params = new URLSearchParams({ client_id: this.clientId, - redirect_uri: this.redirectUri, + redirect_uri: redirectUri, scope: 'repo,user:email', - state: state, + state: stateWithUser, allow_signup: 'false' }); @@ -61,48 +71,116 @@ class GitHubOAuthService { return user; } - // Store GitHub token (no user ID) - async storeToken(accessToken, githubUser) { + // Store GitHub token with user ID + async storeToken(accessToken, githubUser, userId = null) { const query = ` - INSERT INTO github_user_tokens (access_token, github_username, github_user_id, scopes, expires_at) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (id) + INSERT INTO github_user_tokens (access_token, github_username, github_user_id, scopes, expires_at, user_id, is_primary) + VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (user_id, github_username) DO UPDATE SET access_token = $1, - github_username = $2, github_user_id = $3, scopes = $4, expires_at = $5, + is_primary = $7, updated_at = NOW() RETURNING * `; + // If this is the first GitHub account for the user, make it primary + const isPrimary = userId ? await this.isFirstGitHubAccountForUser(userId) : false; + const result = await database.query(query, [ accessToken, githubUser.login, githubUser.id, JSON.stringify(['repo', 'user:email']), - null + null, + userId, + isPrimary ]); return result.rows[0]; } - // Get stored token + // Check if this is the first GitHub account for a user + async isFirstGitHubAccountForUser(userId) { + const result = await database.query( + 'SELECT COUNT(*) as count FROM github_user_tokens WHERE user_id = $1', + [userId] + ); + return parseInt(result.rows[0].count) === 0; + } + + // Get stored token (legacy method - gets any token) async getToken() { const query = 'SELECT * FROM github_user_tokens ORDER BY created_at DESC LIMIT 1'; const result = await database.query(query); return result.rows[0]; } + // Get all tokens for a specific user + async getUserTokens(userId) { + const query = 'SELECT * FROM github_user_tokens WHERE user_id = $1 ORDER BY is_primary DESC, created_at DESC'; + const result = await database.query(query, [userId]); + return result.rows; + } + + // Get primary token for a user + async getUserPrimaryToken(userId) { + const query = 'SELECT * FROM github_user_tokens WHERE user_id = $1 AND is_primary = true LIMIT 1'; + const result = await database.query(query, [userId]); + return result.rows[0] || null; + } + + // Find the right token for accessing a specific repository + async findTokenForRepository(userId, owner, repo) { + const tokens = await this.getUserTokens(userId); + + for (const token of tokens) { + try { + const octokit = new Octokit({ auth: token.access_token }); + // Try to access the repository with this token + await octokit.repos.get({ owner, repo }); + console.log(`✅ Found token for ${owner}/${repo}: ${token.github_username}`); + return token; + } catch (error) { + console.log(`❌ Token ${token.github_username} cannot access ${owner}/${repo}: ${error.message}`); + continue; + } + } + + return null; // No token found that can access this repository + } + + // Validate if a token is still valid + async validateToken(accessToken) { + try { + const octokit = new Octokit({ auth: accessToken }); + await octokit.users.getAuthenticated(); + return true; + } catch (error) { + if (error.status === 401) { + return false; + } + throw error; + } + } + // Create authenticated Octokit instance async getAuthenticatedOctokit() { const tokenRecord = await this.getToken(); if (!tokenRecord) { - return new Octokit({ - userAgent: 'CodeNuk-GitIntegration/1.0.0', - }); + throw new Error('No GitHub token found. Please authenticate with GitHub first.'); + } + + // Validate token before using it + const isValid = await this.validateToken(tokenRecord.access_token); + if (!isValid) { + console.warn('GitHub token is invalid or expired, removing from database'); + await this.removeInvalidToken(tokenRecord.id); + throw new Error('GitHub token has expired. Please re-authenticate with GitHub.'); } return new Octokit({ @@ -125,6 +203,51 @@ class GitHubOAuthService { } } + // Remove invalid token from database + async removeInvalidToken(tokenId) { + try { + await database.query('DELETE FROM github_user_tokens WHERE id = $1', [tokenId]); + } catch (error) { + console.error('Error removing invalid token:', error); + } + } + + // Check authentication status + async getAuthStatus() { + const tokenRecord = await this.getToken(); + + if (!tokenRecord) { + return { + connected: false, + requires_auth: true, + auth_url: this.getAuthUrl(Math.random().toString(36).substring(7)) + }; + } + + // Validate token by making a test API call + try { + const octokit = new Octokit({ auth: tokenRecord.access_token }); + await octokit.users.getAuthenticated(); + + return { + connected: true, + github_username: tokenRecord.github_username, + github_user_id: tokenRecord.github_user_id, + scopes: tokenRecord.scopes, + created_at: tokenRecord.created_at + }; + } catch (error) { + console.warn('GitHub token validation failed:', error.message); + // Remove invalid token + await this.removeInvalidToken(tokenRecord.id); + return { + connected: false, + requires_auth: true, + auth_url: this.getAuthUrl(Math.random().toString(36).substring(7)) + }; + } + } + // Revoke token async revokeToken() { const tokenRecord = await this.getToken(); diff --git a/services/git-integration/src/services/gitlab-oauth.js b/services/git-integration/src/services/gitlab-oauth.js new file mode 100644 index 0000000..762c997 --- /dev/null +++ b/services/git-integration/src/services/gitlab-oauth.js @@ -0,0 +1,70 @@ +// services/gitlab-oauth.js +const database = require('../config/database'); + +class GitLabOAuthService { + constructor() { + this.clientId = process.env.GITLAB_CLIENT_ID; + this.clientSecret = process.env.GITLAB_CLIENT_SECRET; + this.baseUrl = (process.env.GITLAB_BASE_URL || 'https://gitlab.com').replace(/\/$/, ''); + this.redirectUri = process.env.GITLAB_REDIRECT_URI || 'http://localhost:8012/api/vcs/gitlab/auth/callback'; + } + + getAuthUrl(state) { + if (!this.clientId) throw new Error('GitLab OAuth not configured'); + const authUrl = `${this.baseUrl}/oauth/authorize`; + const params = new URLSearchParams({ + client_id: this.clientId, + redirect_uri: this.redirectUri, + response_type: 'code', + scope: 'read_api api read_user', + state + }); + return `${authUrl}?${params.toString()}`; + } + + async exchangeCodeForToken(code) { + const tokenUrl = `${this.baseUrl}/oauth/token`; + const resp = await fetch(tokenUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + client_id: this.clientId, + client_secret: this.clientSecret, + code, + grant_type: 'authorization_code', + redirect_uri: this.redirectUri + }) + }); + const data = await resp.json(); + if (!resp.ok || data.error) throw new Error(data.error_description || 'GitLab token exchange failed'); + return data.access_token; + } + + async getUserInfo(accessToken) { + const resp = await fetch(`${this.baseUrl}/api/v4/user`, { + headers: { Authorization: `Bearer ${accessToken}` } + }); + if (!resp.ok) throw new Error('Failed to fetch GitLab user'); + return await resp.json(); + } + + async storeToken(accessToken, user) { + const result = await database.query( + `INSERT INTO gitlab_user_tokens (access_token, gitlab_username, gitlab_user_id, scopes, expires_at) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (id) DO UPDATE SET access_token = EXCLUDED.access_token, gitlab_username = EXCLUDED.gitlab_username, gitlab_user_id = EXCLUDED.gitlab_user_id, scopes = EXCLUDED.scopes, expires_at = EXCLUDED.expires_at, updated_at = NOW() + RETURNING *`, + [accessToken, user.username, user.id, JSON.stringify(['read_api','api','read_user']), null] + ); + return result.rows[0]; + } + + async getToken() { + const r = await database.query('SELECT * FROM gitlab_user_tokens ORDER BY created_at DESC LIMIT 1'); + return r.rows[0]; + } +} + +module.exports = GitLabOAuthService; + + diff --git a/services/git-integration/src/services/provider-registry.js b/services/git-integration/src/services/provider-registry.js new file mode 100644 index 0000000..7cc8610 --- /dev/null +++ b/services/git-integration/src/services/provider-registry.js @@ -0,0 +1,84 @@ +// services/provider-registry.js +// Simple provider registry/factory to resolve adapters by provider key. + +const GithubIntegrationService = require('./github-integration.service'); +const GitlabAdapter = require('./providers/gitlab.adapter'); +const BitbucketAdapter = require('./providers/bitbucket.adapter'); +const GiteaAdapter = require('./providers/gitea.adapter'); + +class GithubAdapter { + constructor() { + this.impl = new GithubIntegrationService(); + } + + parseRepoUrl(url) { + return this.impl.parseGitHubUrl(url); + } + + async checkRepositoryAccess(owner, repo) { + return await this.impl.checkRepositoryAccess(owner, repo); + } + + async fetchRepositoryMetadata(owner, repo) { + return await this.impl.fetchRepositoryMetadata(owner, repo); + } + + async analyzeCodebase(owner, repo, branch) { + return await this.impl.analyzeCodebase(owner, repo, branch); + } + + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + return await this.impl.ensureRepositoryWebhook(owner, repo, callbackUrl); + } + + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + return await this.impl.syncRepositoryWithGit(owner, repo, branch, repositoryId); + } + + async downloadRepositoryWithStorage(owner, repo, branch, repositoryId) { + return await this.impl.downloadRepositoryWithStorage(owner, repo, branch, repositoryId); + } + + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + return await this.impl.syncRepositoryWithFallback(owner, repo, branch, repositoryId); + } + + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + return await this.impl.getRepositoryDiff(owner, repo, branch, fromSha, toSha); + } + + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + return await this.impl.getRepositoryChangesSince(owner, repo, branch, sinceSha); + } + + async cleanupRepositoryStorage(repositoryId) { + return await this.impl.cleanupRepositoryStorage(repositoryId); + } +} + +class ProviderRegistry { + constructor() { + this.providers = new Map(); + // Register GitHub by default + this.providers.set('github', () => new GithubAdapter()); + this.providers.set('gitlab', () => new GitlabAdapter()); + this.providers.set('bitbucket', () => new BitbucketAdapter()); + this.providers.set('gitea', () => new GiteaAdapter()); + } + + register(providerKey, factoryFn) { + this.providers.set(providerKey, factoryFn); + } + + resolve(providerKey) { + const factory = this.providers.get((providerKey || '').toLowerCase()); + if (!factory) { + throw new Error(`Unsupported provider: ${providerKey}`); + } + return factory(); + } +} + +module.exports = new ProviderRegistry(); + + diff --git a/services/git-integration/src/services/providers/bitbucket.adapter.js b/services/git-integration/src/services/providers/bitbucket.adapter.js new file mode 100644 index 0000000..f9d8d81 --- /dev/null +++ b/services/git-integration/src/services/providers/bitbucket.adapter.js @@ -0,0 +1,166 @@ +// services/providers/bitbucket.adapter.js +const VcsProviderInterface = require('../vcs-provider.interface'); +const FileStorageService = require('../file-storage.service'); +const GitRepoService = require('../git-repo.service'); +const BitbucketOAuthService = require('../bitbucket-oauth'); + +class BitbucketAdapter extends VcsProviderInterface { + constructor() { + super(); + this.fileStorageService = new FileStorageService(); + this.gitRepoService = new GitRepoService(); + this.host = process.env.BITBUCKET_BASE_URL || 'bitbucket.org'; + this.oauth = new BitbucketOAuthService(); + } + + parseRepoUrl(url) { + if (!url || typeof url !== 'string') throw new Error('URL must be a non-empty string'); + let normalized = url.trim(); + if (!normalized.startsWith('http')) normalized = 'https://' + normalized; + const host = normalized.replace(/^https?:\/\//, '').split('/')[0]; + if (!host.includes('bitbucket')) throw new Error(`Invalid Bitbucket repository URL: ${url}`); + const parts = normalized.split(host)[1].replace(/^\//, '').split('#')[0].split('?')[0].split('/'); + const owner = parts[0]; + const repo = (parts[1] || '').replace(/\.git$/, ''); + if (!owner || !repo) throw new Error(`Invalid Bitbucket repository URL: ${url}`); + let branch = 'main'; + // Bitbucket uses /branch/ sometimes in URLs + const branchIdx = parts.findIndex(p => p === 'branch'); + if (branchIdx >= 0 && parts[branchIdx + 1]) branch = parts[branchIdx + 1]; + return { owner, repo, branch }; + } + + async checkRepositoryAccess(owner, repo) { + const token = await this.oauth.getToken(); + + try { + // Always try with authentication first (like GitHub behavior) + if (token?.access_token) { + const resp = await fetch(`https://api.bitbucket.org/2.0/repositories/${owner}/${repo}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.status === 200) { + const d = await resp.json(); + const isPrivate = !!d.is_private; + return { exists: true, isPrivate, hasAccess: true, requiresAuth: isPrivate }; + } + } + + // No token or token failed: try without authentication + const resp = await fetch(`https://api.bitbucket.org/2.0/repositories/${owner}/${repo}`); + if (resp.status === 200) { + const d = await resp.json(); + const isPrivate = !!d.is_private; + return { exists: true, isPrivate, hasAccess: true, requiresAuth: false }; + } + if (resp.status === 404 || resp.status === 403) { + // Repository exists but requires authentication (like GitHub behavior) + return { exists: resp.status !== 404 ? true : false, isPrivate: true, hasAccess: false, requiresAuth: true }; + } + } catch (error) { + // If any error occurs, assume repository requires authentication + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + async fetchRepositoryMetadata(owner, repo) { + const token = await this.oauth.getToken(); + if (token?.access_token) { + try { + const resp = await fetch(`https://api.bitbucket.org/2.0/repositories/${owner}/${repo}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.ok) { + const d = await resp.json(); + // Bitbucket default branch is in mainbranch.name + return { full_name: d.full_name, visibility: d.is_private ? 'private' : 'public', default_branch: d.mainbranch?.name || 'main', updated_at: d.updated_on }; + } + } catch (_) {} + } + return { full_name: `${owner}/${repo}`, visibility: 'public', default_branch: 'main', updated_at: new Date().toISOString() }; + } + + async analyzeCodebase(owner, repo, branch) { + return { total_files: 0, total_size: 0, directories: [], branch }; + } + + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + try { + if (!callbackUrl) return { created: false, reason: 'missing_callback_url' }; + const token = await this.oauth.getToken(); + if (!token?.access_token) return { created: false, reason: 'missing_token' }; + // Bitbucket Cloud webhooks don't support shared secret directly; create basic push webhook + const resp = await fetch(`https://api.bitbucket.org/2.0/repositories/${owner}/${repo}/hooks`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token.access_token}` }, + body: JSON.stringify({ description: 'CodeNuk Git Integration', url: callbackUrl, active: true, events: ['repo:push'] }) + }); + if (resp.ok) { const d = await resp.json(); return { created: true, hook_id: d.uuid || d.id }; } + return { created: false, reason: `status_${resp.status}` }; + } catch (e) { + return { created: false, error: e.message }; + } + } + + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + const database = require('../../config/database'); + let storageRecord = null; + try { + const token = await this.oauth.getToken(); + let repoPath = null; + if (token?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, branch, this.host, token.access_token, 'bearer'); + } else { + repoPath = await this.gitRepoService.cloneIfMissingWithHost(owner, repo, branch, this.host); + } + storageRecord = await this.fileStorageService.initializeRepositoryStorage(repositoryId, repoPath); + await this.fileStorageService.processDirectoryStructure(storageRecord.id, repositoryId, repoPath); + const finalStorage = await this.fileStorageService.completeRepositoryStorage(storageRecord.id); + + // Get the current HEAD commit SHA and update the repository record + try { + const headSha = await this.gitRepoService.getHeadSha(repoPath); + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), last_synced_commit_sha = $1, updated_at = NOW() WHERE id = $2', + [headSha, repositoryId] + ); + } catch (e) { + // If we can't get the SHA, still update the sync time + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), updated_at = NOW() WHERE id = $1', + [repositoryId] + ); + } + return { success: true, method: 'git', targetDir: repoPath, storage: finalStorage }; + } catch (e) { + if (storageRecord) await this.fileStorageService.markStorageFailed(storageRecord.id, e.message); + return { success: false, error: e.message }; + } + } + + async downloadRepositoryWithStorage() { + return { success: false, error: 'api_download_not_implemented' }; + } + + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + const git = await this.syncRepositoryWithGit(owner, repo, branch, repositoryId); + if (git.success) return git; + return { success: false, error: git.error }; + } + + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getDiff(repoPath, fromSha || null, toSha || 'HEAD', { patch: true }); + } + + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getChangedFilesSince(repoPath, sinceSha); + } + + async cleanupRepositoryStorage(repositoryId) { + return await this.fileStorageService.cleanupRepositoryStorage(repositoryId); + } +} + +module.exports = BitbucketAdapter; + + diff --git a/services/git-integration/src/services/providers/gitea.adapter.js b/services/git-integration/src/services/providers/gitea.adapter.js new file mode 100644 index 0000000..fccdf72 --- /dev/null +++ b/services/git-integration/src/services/providers/gitea.adapter.js @@ -0,0 +1,166 @@ +// services/providers/gitea.adapter.js +const VcsProviderInterface = require('../vcs-provider.interface'); +const FileStorageService = require('../file-storage.service'); +const GitRepoService = require('../git-repo.service'); +const GiteaOAuthService = require('../gitea-oauth'); + +class GiteaAdapter extends VcsProviderInterface { + constructor() { + super(); + this.fileStorageService = new FileStorageService(); + this.gitRepoService = new GitRepoService(); + this.host = process.env.GITEA_BASE_URL || 'gitea.com'; + this.oauth = new GiteaOAuthService(); + } + + parseRepoUrl(url) { + if (!url || typeof url !== 'string') throw new Error('URL must be a non-empty string'); + let normalized = url.trim(); + if (!normalized.startsWith('http')) normalized = 'https://' + normalized; + const host = normalized.replace(/^https?:\/\//, '').split('/')[0]; + // Gitea can be self-hosted; accept any host when explicitly using /api/vcs/gitea + const parts = normalized.split(host)[1].replace(/^\//, '').split('#')[0].split('?')[0].split('/'); + const owner = parts[0]; + const repo = (parts[1] || '').replace(/\.git$/, ''); + if (!owner || !repo) throw new Error(`Invalid Gitea repository URL: ${url}`); + let branch = 'main'; + const treeIdx = parts.findIndex(p => p === 'tree'); + if (treeIdx >= 0 && parts[treeIdx + 1]) branch = parts[treeIdx + 1]; + return { owner, repo, branch }; + } + + async checkRepositoryAccess(owner, repo) { + const token = await this.oauth.getToken(); + const base = (process.env.GITEA_BASE_URL || 'https://gitea.com').replace(/\/$/, ''); + + try { + // Always try with authentication first (like GitHub behavior) + if (token?.access_token) { + const resp = await fetch(`${base}/api/v1/repos/${owner}/${repo}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.status === 200) { + const d = await resp.json(); + const isPrivate = !!d.private; + return { exists: true, isPrivate, hasAccess: true, requiresAuth: isPrivate }; + } + } + + // No token or token failed: try without authentication + const resp = await fetch(`${base}/api/v1/repos/${owner}/${repo}`); + if (resp.status === 200) { + const d = await resp.json(); + return { exists: true, isPrivate: !!d.private, hasAccess: true, requiresAuth: false }; + } + if (resp.status === 404 || resp.status === 403) { + // Repository exists but requires authentication (like GitHub behavior) + return { exists: resp.status !== 404 ? true : false, isPrivate: true, hasAccess: false, requiresAuth: true }; + } + } catch (error) { + // If any error occurs, assume repository requires authentication + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + async fetchRepositoryMetadata(owner, repo) { + const token = await this.oauth.getToken(); + const base = (process.env.GITEA_BASE_URL || 'https://gitea.com').replace(/\/$/, ''); + if (token?.access_token) { + try { + const resp = await fetch(`${base}/api/v1/repos/${owner}/${repo}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.ok) { + const d = await resp.json(); + return { full_name: d.full_name || `${owner}/${repo}`, visibility: d.private ? 'private' : 'public', default_branch: d.default_branch || 'main', updated_at: d.updated_at }; + } + } catch (_) {} + } + return { full_name: `${owner}/${repo}`, visibility: 'public', default_branch: 'main', updated_at: new Date().toISOString() }; + } + + async analyzeCodebase(owner, repo, branch) { + return { total_files: 0, total_size: 0, directories: [], branch }; + } + + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + try { + if (!callbackUrl) return { created: false, reason: 'missing_callback_url' }; + const token = await this.oauth.getToken(); + if (!token?.access_token) return { created: false, reason: 'missing_token' }; + const base = (process.env.GITEA_BASE_URL || 'https://gitea.com').replace(/\/$/, ''); + const secret = process.env.GITEA_WEBHOOK_SECRET || ''; + const resp = await fetch(`${base}/api/v1/repos/${owner}/${repo}/hooks`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token.access_token}` }, + body: JSON.stringify({ type: 'gitea', config: { url: callbackUrl, content_type: 'json', secret: secret || undefined }, events: ['push'], active: true }) + }); + if (resp.ok) { const d = await resp.json(); return { created: true, hook_id: d.id }; } + return { created: false, reason: `status_${resp.status}` }; + } catch (e) { + return { created: false, error: e.message }; + } + } + + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + const database = require('../../config/database'); + let storageRecord = null; + try { + const token = await this.oauth.getToken(); + let repoPath = null; + if (token?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, branch, this.host, token.access_token, 'oauth2'); + } else { + repoPath = await this.gitRepoService.cloneIfMissingWithHost(owner, repo, branch, this.host); + } + storageRecord = await this.fileStorageService.initializeRepositoryStorage(repositoryId, repoPath); + await this.fileStorageService.processDirectoryStructure(storageRecord.id, repositoryId, repoPath); + const finalStorage = await this.fileStorageService.completeRepositoryStorage(storageRecord.id); + + // Get the current HEAD commit SHA and update the repository record + try { + const headSha = await this.gitRepoService.getHeadSha(repoPath); + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), last_synced_commit_sha = $1, updated_at = NOW() WHERE id = $2', + [headSha, repositoryId] + ); + } catch (e) { + // If we can't get the SHA, still update the sync time + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), updated_at = NOW() WHERE id = $1', + [repositoryId] + ); + } + return { success: true, method: 'git', targetDir: repoPath, storage: finalStorage }; + } catch (e) { + if (storageRecord) await this.fileStorageService.markStorageFailed(storageRecord.id, e.message); + return { success: false, error: e.message }; + } + } + + async downloadRepositoryWithStorage() { + return { success: false, error: 'api_download_not_implemented' }; + } + + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + const git = await this.syncRepositoryWithGit(owner, repo, branch, repositoryId); + if (git.success) return git; + return { success: false, error: git.error }; + } + + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getDiff(repoPath, fromSha || null, toSha || 'HEAD', { patch: true }); + } + + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getChangedFilesSince(repoPath, sinceSha); + } + + async cleanupRepositoryStorage(repositoryId) { + return await this.fileStorageService.cleanupRepositoryStorage(repositoryId); + } +} + +module.exports = GiteaAdapter; + + diff --git a/services/git-integration/src/services/providers/gitlab.adapter.js b/services/git-integration/src/services/providers/gitlab.adapter.js new file mode 100644 index 0000000..91b5b3b --- /dev/null +++ b/services/git-integration/src/services/providers/gitlab.adapter.js @@ -0,0 +1,177 @@ +// services/providers/gitlab.adapter.js +const VcsProviderInterface = require('../vcs-provider.interface'); +const FileStorageService = require('../file-storage.service'); +const GitRepoService = require('../git-repo.service'); +const GitLabOAuthService = require('../gitlab-oauth'); + +class GitlabAdapter extends VcsProviderInterface { + constructor() { + super(); + this.fileStorageService = new FileStorageService(); + this.gitRepoService = new GitRepoService(); + this.host = process.env.GITLAB_BASE_URL || 'gitlab.com'; + this.oauth = new GitLabOAuthService(); + } + + parseRepoUrl(url) { + if (!url || typeof url !== 'string') throw new Error('URL must be a non-empty string'); + let normalized = url.trim(); + if (!normalized.startsWith('http')) normalized = 'https://' + normalized; + const host = normalized.replace(/^https?:\/\//, '').split('/')[0]; + if (!host.includes('gitlab')) throw new Error(`Invalid GitLab repository URL: ${url}`); + const parts = normalized.split(host)[1].replace(/^\//, '').split('#')[0].split('?')[0].split('/'); + const owner = parts[0]; + const repo = (parts[1] || '').replace(/\.git$/, ''); + if (!owner || !repo) throw new Error(`Invalid GitLab repository URL: ${url}`); + let branch = 'main'; + const treeIdx = parts.findIndex(p => p === 'tree'); + if (treeIdx >= 0 && parts[treeIdx + 1]) branch = parts[treeIdx + 1]; + return { owner, repo, branch }; + } + + async checkRepositoryAccess(owner, repo) { + const token = await this.oauth.getToken(); + const base = (process.env.GITLAB_BASE_URL || 'https://gitlab.com').replace(/\/$/, ''); + + try { + // Always try with authentication first (like GitHub behavior) + if (token?.access_token) { + const resp = await fetch(`${base}/api/v4/projects/${encodeURIComponent(`${owner}/${repo}`)}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.status === 200) { + const data = await resp.json(); + return { exists: true, isPrivate: data.visibility !== 'public', hasAccess: true, requiresAuth: data.visibility !== 'public' }; + } + } + + // No token or token failed: try without authentication + const resp = await fetch(`${base}/api/v4/projects/${encodeURIComponent(`${owner}/${repo}`)}`); + if (resp.status === 200) { + const data = await resp.json(); + return { exists: true, isPrivate: data.visibility !== 'public', hasAccess: true, requiresAuth: false }; + } + if (resp.status === 404 || resp.status === 403) { + // Repository exists but requires authentication (like GitHub behavior) + return { exists: resp.status !== 404 ? true : false, isPrivate: true, hasAccess: false, requiresAuth: true }; + } + } catch (error) { + // If any error occurs, assume repository requires authentication + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + return { exists: false, isPrivate: null, hasAccess: false, requiresAuth: true, error: 'Repository not found or requires authentication' }; + } + + async fetchRepositoryMetadata(owner, repo) { + const token = await this.oauth.getToken(); + const base = (process.env.GITLAB_BASE_URL || 'https://gitlab.com').replace(/\/$/, ''); + if (token?.access_token) { + try { + const resp = await fetch(`${base}/api/v4/projects/${encodeURIComponent(`${owner}/${repo}`)}`, { headers: { Authorization: `Bearer ${token.access_token}` } }); + if (resp.ok) { + const d = await resp.json(); + return { full_name: d.path_with_namespace, visibility: d.visibility === 'public' ? 'public' : 'private', default_branch: d.default_branch || 'main', updated_at: d.last_activity_at }; + } + } catch (_) {} + } + return { full_name: `${owner}/${repo}`, visibility: 'public', default_branch: 'main', updated_at: new Date().toISOString() }; + } + + async analyzeCodebase(owner, repo, branch) { + // Not using API; actual analysis happens after sync in storage + return { total_files: 0, total_size: 0, directories: [], branch }; + } + + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + try { + if (!callbackUrl) return { created: false, reason: 'missing_callback_url' }; + const token = await this.oauth.getToken(); + if (!token?.access_token) return { created: false, reason: 'missing_token' }; + const base = (process.env.GITLAB_BASE_URL || 'https://gitlab.com').replace(/\/$/, ''); + const secret = process.env.GITLAB_WEBHOOK_SECRET || ''; + const resp = await fetch(`${base}/api/v4/projects/${encodeURIComponent(`${owner}/${repo}`)}/hooks`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token.access_token}` }, + body: JSON.stringify({ url: callbackUrl, push_events: true, token: secret || undefined, enable_ssl_verification: true }) + }); + if (resp.ok) { const data = await resp.json(); return { created: true, hook_id: data.id }; } + return { created: false, reason: `status_${resp.status}` }; + } catch (e) { + return { created: false, error: e.message }; + } + } + + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + const database = require('../../config/database'); + let storageRecord = null; + try { + const token = await this.oauth.getToken(); + let repoPath = null; + + // Always try with authentication first for GitLab, even for public repos + // because GitLab often requires auth for git operations + if (token?.access_token) { + repoPath = await this.gitRepoService.cloneIfMissingWithAuth(owner, repo, branch, this.host, token.access_token, 'oauth2'); + } else { + // If no token, try without auth first, but if it fails, require authentication + try { + repoPath = await this.gitRepoService.cloneIfMissingWithHost(owner, repo, branch, this.host); + } catch (cloneError) { + // If clone fails without auth, this means the repo requires authentication + throw new Error(`GitLab repository requires authentication: ${cloneError.message}`); + } + } + + storageRecord = await this.fileStorageService.initializeRepositoryStorage(repositoryId, repoPath); + await this.fileStorageService.processDirectoryStructure(storageRecord.id, repositoryId, repoPath); + const finalStorage = await this.fileStorageService.completeRepositoryStorage(storageRecord.id); + + // Get the current HEAD commit SHA and update the repository record + try { + const headSha = await this.gitRepoService.getHeadSha(repoPath); + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), last_synced_commit_sha = $1, updated_at = NOW() WHERE id = $2', + [headSha, repositoryId] + ); + } catch (e) { + // If we can't get the SHA, still update the sync time + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), updated_at = NOW() WHERE id = $1', + [repositoryId] + ); + } + return { success: true, method: 'git', targetDir: repoPath, storage: finalStorage }; + } catch (e) { + if (storageRecord) await this.fileStorageService.markStorageFailed(storageRecord.id, e.message); + return { success: false, error: e.message }; + } + } + + async downloadRepositoryWithStorage(owner, repo, branch, repositoryId) { + // Not implemented for GitLab without API token; fallback to git + return { success: false, error: 'api_download_not_implemented' }; + } + + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + const git = await this.syncRepositoryWithGit(owner, repo, branch, repositoryId); + if (git.success) return git; + return { success: false, error: git.error }; + } + + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getDiff(repoPath, fromSha || null, toSha || 'HEAD', { patch: true }); + } + + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + const repoPath = this.gitRepoService.getLocalRepoPath(owner, repo, branch); + return await this.gitRepoService.getChangedFilesSince(repoPath, sinceSha); + } + + async cleanupRepositoryStorage(repositoryId) { + return await this.fileStorageService.cleanupRepositoryStorage(repositoryId); + } +} + +module.exports = GitlabAdapter; + + diff --git a/services/git-integration/src/services/vcs-provider.interface.js b/services/git-integration/src/services/vcs-provider.interface.js new file mode 100644 index 0000000..99a51a5 --- /dev/null +++ b/services/git-integration/src/services/vcs-provider.interface.js @@ -0,0 +1,62 @@ +// services/vcs-provider.interface.js +// Provider-agnostic interface (shape) for VCS adapters. + +class VcsProviderInterface { + // Parse a repository URL and return { owner, repo, branch } + parseRepoUrl(url) { + throw new Error('parseRepoUrl not implemented'); + } + + // Access check for repository + async checkRepositoryAccess(owner, repo) { + throw new Error('checkRepositoryAccess not implemented'); + } + + // Fetch repository metadata + async fetchRepositoryMetadata(owner, repo) { + throw new Error('fetchRepositoryMetadata not implemented'); + } + + // Analyze codebase (lightweight tree analysis) + async analyzeCodebase(owner, repo, branch) { + throw new Error('analyzeCodebase not implemented'); + } + + // Ensure a webhook exists for this repository + async ensureRepositoryWebhook(owner, repo, callbackUrl) { + throw new Error('ensureRepositoryWebhook not implemented'); + } + + // Sync using git; index to storage/DB via file storage service + async syncRepositoryWithGit(owner, repo, branch, repositoryId) { + throw new Error('syncRepositoryWithGit not implemented'); + } + + // Fallback: API download + storage + async downloadRepositoryWithStorage(owner, repo, branch, repositoryId) { + throw new Error('downloadRepositoryWithStorage not implemented'); + } + + // Try git first then API + async syncRepositoryWithFallback(owner, repo, branch, repositoryId) { + throw new Error('syncRepositoryWithFallback not implemented'); + } + + // Get diff and change lists + async getRepositoryDiff(owner, repo, branch, fromSha, toSha) { + throw new Error('getRepositoryDiff not implemented'); + } + + async getRepositoryChangesSince(owner, repo, branch, sinceSha) { + throw new Error('getRepositoryChangesSince not implemented'); + } + + // Cleanup local storage/DB artifacts + async cleanupRepositoryStorage(repositoryId) { + throw new Error('cleanupRepositoryStorage not implemented'); + } +} + +module.exports = VcsProviderInterface; + + diff --git a/services/git-integration/src/services/vcs-webhook.service.js b/services/git-integration/src/services/vcs-webhook.service.js new file mode 100644 index 0000000..8eab88f --- /dev/null +++ b/services/git-integration/src/services/vcs-webhook.service.js @@ -0,0 +1,456 @@ +// services/vcs-webhook.service.js +const database = require('../config/database'); +const providerRegistry = require('./provider-registry'); + +class VcsWebhookService { + constructor() { + this._schemaChecked = false; + this._webhookEventColumns = new Map(); + } + + // Process webhook events for any VCS provider + async processWebhookEvent(providerKey, eventType, payload) { + console.log(`Processing ${providerKey} webhook event: ${eventType}`); + + try { + switch (eventType) { + case 'push': + await this.handlePushEvent(providerKey, payload); + break; + case 'pull_request': + case 'merge_request': + await this.handlePullRequestEvent(providerKey, payload); + break; + case 'repository': + await this.handleRepositoryEvent(providerKey, payload); + break; + case 'ping': + await this.handlePingEvent(providerKey, payload); + break; + default: + console.log(`Unhandled webhook event type: ${eventType} for provider: ${providerKey}`); + } + } catch (error) { + console.error(`Error processing ${providerKey} webhook event ${eventType}:`, error); + throw error; + } + } + + // Handle push events for any provider + async handlePushEvent(providerKey, payload) { + const { repository, project, ref, commits, pusher, user } = payload; + + // Build a provider-normalized repo object for extraction + let repoForExtraction = repository || {}; + if (providerKey === 'gitlab') { + // GitLab push payload includes repository (limited fields) and project (full namespace) + // Prefer project.path_with_namespace when available + repoForExtraction = { + path_with_namespace: project?.path_with_namespace || repository?.path_with_namespace, + full_name: project?.path_with_namespace || repository?.name, + default_branch: project?.default_branch || repository?.default_branch + }; + } + + // Extract provider-specific data + const repoData = this.extractRepositoryData(providerKey, repoForExtraction); + const commitData = this.extractCommitData(providerKey, commits); + const branchFromRef = this.extractBranchFromRef(providerKey, ref, repoForExtraction); + + console.log(`Push event received for ${repoData.full_name} on ${branchFromRef}`); + console.log(`Pusher: ${pusher?.name || user?.name || 'Unknown'}, Commits: ${commitData.length}`); + + // Persist raw webhook and commit SHAs + try { + // Find repository_id in our DB if attached + const repoLookup = await database.query( + 'SELECT id FROM github_repositories WHERE owner_name = $1 AND repository_name = $2 ORDER BY created_at DESC LIMIT 1', + [repoData.owner, repoData.name] + ); + const repoId = repoLookup.rows[0]?.id || null; + + // Insert into provider-specific webhooks table + await this.insertWebhookEvent(providerKey, { + delivery_id: payload.delivery_id || payload.object_attributes?.id || null, + event_type: 'push', + action: null, + owner_name: repoData.owner, + repository_name: repoData.name, + repository_id: repoId, + ref: ref, + before_sha: payload.before || null, + after_sha: payload.after || null, + commit_count: commitData.length, + payload: JSON.stringify(payload) + }); + + if (repoId) { + // Insert into repository_commit_events + await database.query( + `INSERT INTO repository_commit_events (repository_id, ref, before_sha, after_sha, commit_count) + VALUES ($1, $2, $3, $4, $5)`, + [repoId, ref, payload.before || null, payload.after || null, commitData.length] + ); + + // Persist per-commit details and file paths + if (commitData.length > 0) { + for (const commit of commitData) { + try { + const commitInsert = await database.query( + `INSERT INTO repository_commit_details (repository_id, commit_sha, author_name, author_email, message, url) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (repository_id, commit_sha) DO UPDATE SET + author_name = EXCLUDED.author_name, + author_email = EXCLUDED.author_email, + message = EXCLUDED.message, + url = EXCLUDED.url + RETURNING id`, + [ + repoId, + commit.id, + commit.author?.name || null, + commit.author?.email || null, + commit.message || null, + commit.url || null + ] + ); + + const commitId = commitInsert.rows[0].id; + + // Insert file changes + const addFiles = (paths = [], changeType) => paths.forEach(async (p) => { + try { + await database.query( + `INSERT INTO repository_commit_files (commit_id, change_type, file_path) + VALUES ($1, $2, $3)`, + [commitId, changeType, p] + ); + } catch (_) {} + }); + + addFiles(commit.added || [], 'added'); + addFiles(commit.modified || [], 'modified'); + addFiles(commit.removed || [], 'removed'); + } catch (commitErr) { + console.warn('Failed to persist commit details:', commitErr.message); + } + } + } + + // Kick off background re-sync + setImmediate(async () => { + try { + const provider = providerRegistry.resolve(providerKey); + + // Mark syncing + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['syncing', repoId] + ); + + // Clean existing storage then git-sync and re-index + await provider.cleanupRepositoryStorage(repoId); + const downloadResult = await provider.syncRepositoryWithFallback( + repoData.owner, + repoData.name, + branchFromRef, + repoId + ); + + await database.query( + 'UPDATE github_repositories SET sync_status = $1, last_synced_at = NOW(), updated_at = NOW() WHERE id = $2', + [downloadResult.success ? 'synced' : 'error', repoId] + ); + } catch (syncErr) { + console.warn('Auto-sync failed:', syncErr.message); + try { + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['error', repoId] + ); + } catch (_) {} + } + }); + } + } catch (e) { + console.warn('Failed to persist push webhook details:', e.message); + } + } + + // Extract repository data based on provider + extractRepositoryData(providerKey, repository) { + switch (providerKey) { + case 'github': + return { + owner: repository.owner.login, + name: repository.name, + full_name: repository.full_name + }; + case 'gitlab': + { + const ns = repository?.path_with_namespace || repository?.full_name || ''; + const parts = typeof ns === 'string' ? ns.split('/') : []; + return { + owner: parts[0] || null, + name: parts[1] || repository?.name || null, + full_name: ns || [parts[0], parts[1]].filter(Boolean).join('/') + }; + } + case 'bitbucket': + return { + owner: repository.full_name.split('/')[0], + name: repository.full_name.split('/')[1], + full_name: repository.full_name + }; + case 'gitea': + return { + owner: repository.full_name.split('/')[0], + name: repository.full_name.split('/')[1], + full_name: repository.full_name + }; + default: + return { owner: 'unknown', name: 'unknown', full_name: 'unknown/unknown' }; + } + } + + // Extract commit data based on provider + extractCommitData(providerKey, commits) { + if (!Array.isArray(commits)) return []; + + switch (providerKey) { + case 'github': + return commits.map(commit => ({ + id: commit.id, + author: commit.author, + message: commit.message, + url: commit.url, + added: commit.added || [], + modified: commit.modified || [], + removed: commit.removed || [] + })); + case 'gitlab': + return commits.map(commit => ({ + id: commit.id, + author: { + name: commit.author?.name, + email: commit.author?.email + }, + message: commit.message, + url: commit.url, + added: commit.added || [], + modified: commit.modified || [], + removed: commit.removed || [] + })); + case 'bitbucket': + return commits.map(commit => ({ + id: commit.hash, + author: { + name: commit.author?.user?.display_name, + email: commit.author?.user?.email_address + }, + message: commit.message, + url: commit.links?.html?.href, + added: commit.added || [], + modified: commit.modified || [], + removed: commit.removed || [] + })); + case 'gitea': + return commits.map(commit => ({ + id: commit.id, + author: { + name: commit.author?.name, + email: commit.author?.email + }, + message: commit.message, + url: commit.url, + added: commit.added || [], + modified: commit.modified || [], + removed: commit.removed || [] + })); + default: + return []; + } + } + + // Extract branch from ref based on provider + extractBranchFromRef(providerKey, ref, repository) { + if (!ref) return repository?.default_branch || 'main'; + + switch (providerKey) { + case 'github': + case 'gitlab': + case 'gitea': + return ref.startsWith('refs/heads/') ? ref.replace('refs/heads/', '') : ref; + case 'bitbucket': + return ref.startsWith('refs/heads/') ? ref.replace('refs/heads/', '') : ref; + default: + return 'main'; + } + } + + // Insert webhook event into provider-specific table + async insertWebhookEvent(providerKey, eventData) { + const tableName = `${providerKey}_webhooks`; + const query = ` + INSERT INTO ${tableName} (delivery_id, event_type, action, owner_name, repository_name, repository_id, ref, before_sha, after_sha, commit_count, payload) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + `; + + await database.query(query, [ + eventData.delivery_id, + eventData.event_type, + eventData.action, + eventData.owner_name, + eventData.repository_name, + eventData.repository_id, + eventData.ref, + eventData.before_sha, + eventData.after_sha, + eventData.commit_count, + eventData.payload + ]); + } + + // Handle pull/merge request events + async handlePullRequestEvent(providerKey, payload) { + const { action, pull_request, merge_request } = payload; + const pr = pull_request || merge_request; + const repository = payload.repository; + + console.log(`Pull/Merge request ${action} for ${repository?.full_name || repository?.path_with_namespace}: #${pr?.number || pr?.iid}`); + + // Log PR events for potential future integration + await this.logWebhookEvent(providerKey, 'pull_request', action, repository?.full_name || repository?.path_with_namespace, { + pr_number: pr?.number || pr?.iid, + pr_title: pr?.title, + pr_state: pr?.state, + pr_url: pr?.html_url || pr?.web_url + }); + } + + // Handle repository events + async handleRepositoryEvent(providerKey, payload) { + const { action, repository } = payload; + + console.log(`Repository ${action} event for ${repository?.full_name || repository?.path_with_namespace}`); + + if (action === 'deleted') { + // Handle repository deletion + const repoData = this.extractRepositoryData(providerKey, repository); + const query = ` + SELECT gr.id, gr.template_id + FROM github_repositories gr + WHERE gr.owner_name = $1 AND gr.repository_name = $2 + `; + + const result = await database.query(query, [repoData.owner, repoData.name]); + + if (result.rows.length > 0) { + console.log(`Repository ${repoData.full_name} was deleted, marking as inactive`); + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['deleted', result.rows[0].id] + ); + } + } + } + + // Handle ping events + async handlePingEvent(providerKey, payload) { + console.log(`${providerKey} webhook ping received - webhook is working correctly`); + console.log(`Repository: ${payload.repository?.full_name || payload.repository?.path_with_namespace || 'Unknown'}`); + } + + // Log webhook events for debugging and analytics + async logWebhookEvent(providerKey, eventType, action, repositoryFullName, metadata = {}, deliveryId = null, fullPayload = null) { + try { + await this._ensureWebhookEventsSchemaCached(); + + // Build a flexible INSERT based on existing columns + const columns = []; + const placeholders = []; + const values = []; + let i = 1; + + columns.push('event_type'); + placeholders.push(`$${i++}`); + values.push(eventType); + + if (this._webhookEventColumns.has('action')) { + columns.push('action'); + placeholders.push(`$${i++}`); + values.push(action || null); + } + if (this._webhookEventColumns.has('repository_full_name')) { + columns.push('repository_full_name'); + placeholders.push(`$${i++}`); + values.push(repositoryFullName || null); + } + if (this._webhookEventColumns.has('delivery_id')) { + columns.push('delivery_id'); + placeholders.push(`$${i++}`); + values.push(deliveryId || null); + } + if (this._webhookEventColumns.has('metadata')) { + columns.push('metadata'); + placeholders.push(`$${i++}`); + values.push(JSON.stringify({ ...metadata, provider: providerKey })); + } + if (this._webhookEventColumns.has('event_payload')) { + columns.push('event_payload'); + placeholders.push(`$${i++}`); + values.push(JSON.stringify(fullPayload || {})); + } + if (this._webhookEventColumns.has('received_at')) { + columns.push('received_at'); + placeholders.push(`$${i++}`); + values.push(new Date()); + } + if (this._webhookEventColumns.has('processing_status')) { + columns.push('processing_status'); + placeholders.push(`$${i++}`); + values.push('pending'); + } + + const query = `INSERT INTO webhook_events (${columns.join(', ')}) VALUES (${placeholders.join(', ')})`; + await database.query(query, values); + } catch (error) { + console.warn('Failed to log webhook event:', error.message); + } + } + + async _ensureWebhookEventsSchemaCached() { + if (this._schemaChecked) return; + try { + const result = await database.query( + "SELECT column_name, is_nullable FROM information_schema.columns WHERE table_schema='public' AND table_name='webhook_events'" + ); + for (const row of result.rows) { + this._webhookEventColumns.set(row.column_name, row.is_nullable); + } + } catch (e) { + console.warn('Could not introspect webhook_events schema:', e.message); + } finally { + this._schemaChecked = true; + } + } + + // Get recent webhook events + async getRecentWebhookEvents(limit = 50) { + try { + const query = ` + SELECT * FROM webhook_events + ORDER BY received_at DESC + LIMIT $1 + `; + + const result = await database.query(query, [limit]); + return result.rows; + } catch (error) { + console.error('Failed to get webhook events:', error.message); + return []; + } + } +} + +module.exports = VcsWebhookService; diff --git a/services/git-integration/src/services/webhook.service.js b/services/git-integration/src/services/webhook.service.js new file mode 100644 index 0000000..2fbd1ca --- /dev/null +++ b/services/git-integration/src/services/webhook.service.js @@ -0,0 +1,361 @@ +// services/webhook.service.js +const crypto = require('crypto'); +const database = require('../config/database'); +const GitHubIntegrationService = require('./github-integration.service'); + +class WebhookService { + constructor() { + this.webhookSecret = process.env.GITHUB_WEBHOOK_SECRET || 'default-webhook-secret'; + this._schemaChecked = false; + this._webhookEventColumns = new Map(); + this.githubService = new GitHubIntegrationService(); + } + + // Verify GitHub webhook signature + verifySignature(payload, signature) { + if (!signature) { + return false; + } + + const expectedSignature = crypto + .createHmac('sha256', this.webhookSecret) + .update(payload) + .digest('hex'); + + const providedSignature = signature.replace('sha256=', ''); + + return crypto.timingSafeEqual( + Buffer.from(expectedSignature, 'hex'), + Buffer.from(providedSignature, 'hex') + ); + } + + // Process GitHub webhook events + async processWebhookEvent(eventType, payload) { + console.log(`Processing GitHub webhook event: ${eventType}`); + + try { + switch (eventType) { + case 'push': + await this.handlePushEvent(payload); + break; + case 'pull_request': + await this.handlePullRequestEvent(payload); + break; + case 'repository': + await this.handleRepositoryEvent(payload); + break; + case 'ping': + await this.handlePingEvent(payload); + break; + default: + console.log(`Unhandled webhook event type: ${eventType}`); + } + } catch (error) { + console.error(`Error processing webhook event ${eventType}:`, error); + throw error; + } + } + + // Handle push events + async handlePushEvent(payload) { + const { repository, ref, commits, pusher } = payload; + + console.log(`Push event received for ${repository.full_name} on ${ref}`); + console.log(`Pusher: ${pusher.name}, Commits: ${commits.length}`); + + // Persist raw webhook and commit SHAs + try { + const repoOwner = repository.owner.login; + const repoName = repository.name; + const branchFromRef = (ref || '').startsWith('refs/heads/') ? ref.replace('refs/heads/', '') : (repository.default_branch || 'main'); + + // Find repository_id in our DB if attached + const repoLookup = await database.query( + 'SELECT id FROM github_repositories WHERE owner_name = $1 AND repository_name = $2 ORDER BY created_at DESC LIMIT 1', + [repoOwner, repoName] + ); + const repoId = repoLookup.rows[0]?.id || null; + + // Insert into durable github_webhooks table + await database.query( + `INSERT INTO github_webhooks (delivery_id, event_type, action, owner_name, repository_name, repository_id, ref, before_sha, after_sha, commit_count, payload) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`, + [ + payload.delivery_id || null, // may be null if not provided by route; route passes header separately + 'push', + null, + repoOwner, + repoName, + repoId, + ref, + payload.before || null, + payload.after || null, + Array.isArray(commits) ? commits.length : 0, + JSON.stringify(payload) + ] + ); + + if (repoId) { + await database.query( + `INSERT INTO repository_commit_events (repository_id, ref, before_sha, after_sha, commit_count) + VALUES ($1, $2, $3, $4, $5)`, + [repoId, ref, payload.before || null, payload.after || null, Array.isArray(commits) ? commits.length : 0] + ); + + // Persist per-commit details and file paths (added/modified/removed) + if (Array.isArray(commits) && commits.length > 0) { + for (const commit of commits) { + try { + const commitInsert = await database.query( + `INSERT INTO repository_commit_details (repository_id, commit_sha, author_name, author_email, message, url) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (repository_id, commit_sha) DO UPDATE SET + author_name = EXCLUDED.author_name, + author_email = EXCLUDED.author_email, + message = EXCLUDED.message, + url = EXCLUDED.url + RETURNING id`, + [ + repoId, + commit.id, + commit.author?.name || null, + commit.author?.email || null, + commit.message || null, + commit.url || null + ] + ); + + const commitId = commitInsert.rows[0].id; + + const addFiles = (paths = [], changeType) => paths.forEach(async (p) => { + try { + await database.query( + `INSERT INTO repository_commit_files (commit_id, change_type, file_path) + VALUES ($1, $2, $3)`, + [commitId, changeType, p] + ); + } catch (_) {} + }); + + addFiles(commit.added || [], 'added'); + addFiles(commit.modified || [], 'modified'); + addFiles(commit.removed || [], 'removed'); + } catch (commitErr) { + console.warn('Failed to persist commit details:', commitErr.message); + } + } + } + + // Kick off background re-sync to refresh local files and DB (git-based) + setImmediate(async () => { + try { + // Mark syncing + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['syncing', repoId] + ); + + // Clean existing storage then git-sync and re-index + await this.githubService.cleanupRepositoryStorage(repoId); + const downloadResult = await this.githubService.syncRepositoryWithFallback( + repoOwner, + repoName, + branchFromRef, + repoId + ); + + await database.query( + 'UPDATE github_repositories SET sync_status = $1, last_synced_at = NOW(), updated_at = NOW() WHERE id = $2', + [downloadResult.success ? 'synced' : 'error', repoId] + ); + } catch (syncErr) { + console.warn('Auto-sync failed:', syncErr.message); + try { + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['error', repoId] + ); + } catch (_) {} + } + }); + } + } catch (e) { + console.warn('Failed to persist push webhook details:', e.message); + } + + // Find repositories in our database that match this GitHub repository + const query = ` + SELECT gr.*, rs.storage_status, rs.local_path + FROM github_repositories gr + LEFT JOIN repository_storage rs ON gr.id = rs.repository_id + WHERE gr.owner_name = $1 AND gr.repository_name = $2 + `; + + const result = await database.query(query, [repository.owner.login, repository.name]); + + if (result.rows.length > 0) { + console.log(`Found ${result.rows.length} matching repositories in database`); + + // Update last synced timestamp + for (const repo of result.rows) { + await database.query( + 'UPDATE github_repositories SET last_synced_at = NOW(), updated_at = NOW() WHERE id = $1', + [repo.id] + ); + + // If repository is synced, we could trigger a re-sync here + if (repo.storage_status === 'completed') { + console.log(`Repository ${repo.repository_name} is synced, could trigger re-sync`); + // You could add logic here to trigger a background sync + } + } + } else { + console.log(`No matching repositories found for ${repository.full_name}`); + } + } + + // Handle pull request events + async handlePullRequestEvent(payload) { + const { action, pull_request, repository } = payload; + + console.log(`Pull request ${action} for ${repository.full_name}: #${pull_request.number}`); + console.log(`PR Title: ${pull_request.title}`); + console.log(`PR State: ${pull_request.state}`); + + // Log PR events for potential future integration + await this.logWebhookEvent('pull_request', action, repository.full_name, { + pr_number: pull_request.number, + pr_title: pull_request.title, + pr_state: pull_request.state, + pr_url: pull_request.html_url + }); + } + + // Handle repository events + async handleRepositoryEvent(payload) { + const { action, repository } = payload; + + console.log(`Repository ${action} event for ${repository.full_name}`); + + if (action === 'deleted') { + // Handle repository deletion + const query = ` + SELECT gr.id, gr.template_id + FROM github_repositories gr + WHERE gr.owner_name = $1 AND gr.repository_name = $2 + `; + + const result = await database.query(query, [repository.owner.login, repository.name]); + + if (result.rows.length > 0) { + console.log(`Repository ${repository.full_name} was deleted, marking as inactive`); + await database.query( + 'UPDATE github_repositories SET sync_status = $1, updated_at = NOW() WHERE id = $2', + ['deleted', result.rows[0].id] + ); + } + } + } + + // Handle ping events (GitHub webhook test) + async handlePingEvent(payload) { + console.log('GitHub webhook ping received - webhook is working correctly'); + console.log(`Repository: ${payload.repository?.full_name || 'Unknown'}`); + console.log(`Zen: ${payload.zen || 'No zen message'}`); + } + + // Log webhook events for debugging and analytics + async _ensureWebhookEventsSchemaCached() { + if (this._schemaChecked) return; + try { + const result = await database.query( + "SELECT column_name, is_nullable FROM information_schema.columns WHERE table_schema='public' AND table_name='webhook_events'" + ); + for (const row of result.rows) { + this._webhookEventColumns.set(row.column_name, row.is_nullable); + } + } catch (e) { + // If schema check fails, proceed with best-effort insert + console.warn('Could not introspect webhook_events schema:', e.message); + } finally { + this._schemaChecked = true; + } + } + + async logWebhookEvent(eventType, action, repositoryFullName, metadata = {}, deliveryId = null, fullPayload = null) { + try { + await this._ensureWebhookEventsSchemaCached(); + + // Build a flexible INSERT based on existing columns + const columns = []; + const placeholders = []; + const values = []; + let i = 1; + + columns.push('event_type'); + placeholders.push(`$${i++}`); + values.push(eventType); + + if (this._webhookEventColumns.has('action')) { + columns.push('action'); + placeholders.push(`$${i++}`); + values.push(action || null); + } + if (this._webhookEventColumns.has('repository_full_name')) { + columns.push('repository_full_name'); + placeholders.push(`$${i++}`); + values.push(repositoryFullName || null); + } + if (this._webhookEventColumns.has('delivery_id')) { + columns.push('delivery_id'); + placeholders.push(`$${i++}`); + values.push(deliveryId || null); + } + if (this._webhookEventColumns.has('metadata')) { + columns.push('metadata'); + placeholders.push(`$${i++}`); + values.push(JSON.stringify(metadata || {})); + } + if (this._webhookEventColumns.has('event_payload')) { + columns.push('event_payload'); + placeholders.push(`$${i++}`); + values.push(JSON.stringify(fullPayload || {})); + } + if (this._webhookEventColumns.has('received_at')) { + columns.push('received_at'); + placeholders.push(`$${i++}`); + values.push(new Date()); + } + if (this._webhookEventColumns.has('processing_status')) { + columns.push('processing_status'); + placeholders.push(`$${i++}`); + values.push('pending'); + } + + const query = `INSERT INTO webhook_events (${columns.join(', ')}) VALUES (${placeholders.join(', ')})`; + await database.query(query, values); + } catch (error) { + console.warn('Failed to log webhook event:', error.message); + } + } + + // Get recent webhook events + async getRecentWebhookEvents(limit = 50) { + try { + const query = ` + SELECT * FROM webhook_events + ORDER BY received_at DESC + LIMIT $1 + `; + + const result = await database.query(query, [limit]); + return result.rows; + } catch (error) { + console.error('Failed to get webhook events:', error.message); + return []; + } + } +} + +module.exports = WebhookService; diff --git a/services/git-integration/test-webhook.js b/services/git-integration/test-webhook.js new file mode 100644 index 0000000..8cdcf8d --- /dev/null +++ b/services/git-integration/test-webhook.js @@ -0,0 +1,70 @@ +// test-webhook.js - Simple test script for webhook endpoint +const fetch = require('node-fetch'); + +const WEBHOOK_URL = 'http://localhost:8012/api/github/webhook'; + +// Test webhook with a sample GitHub push event +const testPayload = { + ref: 'refs/heads/main', + before: 'abc123', + after: 'def456', + repository: { + id: 123456, + name: 'test-repo', + full_name: 'testuser/test-repo', + owner: { + login: 'testuser', + id: 789 + } + }, + pusher: { + name: 'testuser', + email: 'test@example.com' + }, + commits: [ + { + id: 'def456', + message: 'Test commit', + author: { + name: 'Test User', + email: 'test@example.com' + } + } + ] +}; + +async function testWebhook() { + try { + console.log('🧪 Testing webhook endpoint...'); + console.log(`📡 Sending POST request to: ${WEBHOOK_URL}`); + + const response = await fetch(WEBHOOK_URL, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-GitHub-Event': 'push', + 'X-GitHub-Delivery': 'test-delivery-123' + }, + body: JSON.stringify(testPayload) + }); + + const result = await response.json(); + + console.log('📊 Response Status:', response.status); + console.log('📋 Response Body:', JSON.stringify(result, null, 2)); + + if (response.ok) { + console.log('✅ Webhook test successful!'); + } else { + console.log('❌ Webhook test failed!'); + } + + } catch (error) { + console.error('❌ Error testing webhook:', error.message); + } +} + +// Run the test +testWebhook(); + + diff --git a/services/tech-stack-selector/Dockerfile b/services/tech-stack-selector/Dockerfile index 7b30fb8..a3ff93e 100644 --- a/services/tech-stack-selector/Dockerfile +++ b/services/tech-stack-selector/Dockerfile @@ -1,37 +1,36 @@ -FROM python:3.12-slim - -# Install system dependencies needed for building Python packages -RUN apt-get update && apt-get install -y \ - curl \ - gcc \ - g++ \ - make \ - libc6-dev \ - libffi-dev \ - libssl-dev \ - build-essential \ - pkg-config \ - python3-dev \ - && rm -rf /var/lib/apt/lists/* +# Use official Python runtime as a parent image +FROM python:3.9-slim +# Set the working directory in the container WORKDIR /app -# Upgrade pip and install build tools -RUN pip install --no-cache-dir --upgrade pip setuptools wheel +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 -# Copy requirements and install Python dependencies +# Install system dependencies including PostgreSQL client and netcat +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + libpq-dev \ + postgresql-client \ + curl \ + netcat-openbsd \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt -# Copy application code +# Copy the current directory contents into the container at /app COPY . . -# Expose port +# Copy and set up startup scripts +COPY start.sh /app/start.sh +COPY docker-start.sh /app/docker-start.sh +RUN chmod +x /app/start.sh /app/docker-start.sh + +# Expose the port the app runs on EXPOSE 8002 -# Health check -HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8002/health || exit 1 - -# Start the application -CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8002", "--reload"] +# Run Docker-optimized startup script +CMD ["/app/docker-start.sh"] \ No newline at end of file diff --git a/services/tech-stack-selector/Neo4j_From_Postgres.cql b/services/tech-stack-selector/Neo4j_From_Postgres.cql new file mode 100644 index 0000000..46bd1f1 --- /dev/null +++ b/services/tech-stack-selector/Neo4j_From_Postgres.cql @@ -0,0 +1,120 @@ +// ===================================================== +// NEO4J SCHEMA FROM POSTGRESQL DATA +// Price-focused migration from existing PostgreSQL database +// ===================================================== + +// Clear existing data +MATCH (n) DETACH DELETE n; + +// ===================================================== +// CREATE CONSTRAINTS AND INDEXES +// ===================================================== + +// Create uniqueness constraints +CREATE CONSTRAINT price_tier_name_unique IF NOT EXISTS FOR (p:PriceTier) REQUIRE p.tier_name IS UNIQUE; +CREATE CONSTRAINT technology_name_unique IF NOT EXISTS FOR (t:Technology) REQUIRE t.name IS UNIQUE; +CREATE CONSTRAINT tool_name_unique IF NOT EXISTS FOR (tool:Tool) REQUIRE tool.name IS UNIQUE; +CREATE CONSTRAINT stack_name_unique IF NOT EXISTS FOR (s:TechStack) REQUIRE s.name IS UNIQUE; + +// Create indexes for performance +CREATE INDEX price_tier_range_idx IF NOT EXISTS FOR (p:PriceTier) ON (p.min_price_usd, p.max_price_usd); +CREATE INDEX tech_category_idx IF NOT EXISTS FOR (t:Technology) ON (t.category); +CREATE INDEX tech_cost_idx IF NOT EXISTS FOR (t:Technology) ON (t.monthly_cost_usd); +CREATE INDEX tool_category_idx IF NOT EXISTS FOR (tool:Tool) ON (tool.category); +CREATE INDEX tool_cost_idx IF NOT EXISTS FOR (tool:Tool) ON (tool.monthly_cost_usd); + +// ===================================================== +// PRICE TIER NODES (from PostgreSQL price_tiers table) +// ===================================================== + +// These will be populated from PostgreSQL data +// Structure matches PostgreSQL price_tiers table: +// - id, tier_name, min_price_usd, max_price_usd, target_audience, typical_project_scale, description + +// ===================================================== +// TECHNOLOGY NODES (from PostgreSQL technology tables) +// ===================================================== + +// These will be populated from PostgreSQL data +// Categories: frontend_technologies, backend_technologies, database_technologies, +// cloud_technologies, testing_technologies, mobile_technologies, +// devops_technologies, ai_ml_technologies + +// ===================================================== +// TOOL NODES (from PostgreSQL tools table) +// ===================================================== + +// These will be populated from PostgreSQL data +// Structure matches PostgreSQL tools table with pricing: +// - id, name, category, description, monthly_cost_usd, setup_cost_usd, +// price_tier_id, total_cost_of_ownership_score, price_performance_ratio + +// ===================================================== +// TECH STACK NODES (will be generated from combinations) +// ===================================================== + +// These will be dynamically created based on: +// - Price tier constraints +// - Technology compatibility +// - Budget optimization +// - Domain requirements + +// ===================================================== +// RELATIONSHIP TYPES +// ===================================================== + +// Price-based relationships +// - [:BELONGS_TO_TIER] - Technology/Tool belongs to price tier +// - [:WITHIN_BUDGET] - Technology/Tool fits within budget range +// - [:COST_OPTIMIZED] - Optimal cost-performance ratio + +// Technology relationships +// - [:COMPATIBLE_WITH] - Technology compatibility +// - [:USES_FRONTEND] - Stack uses frontend technology +// - [:USES_BACKEND] - Stack uses backend technology +// - [:USES_DATABASE] - Stack uses database technology +// - [:USES_CLOUD] - Stack uses cloud technology +// - [:USES_TESTING] - Stack uses testing technology +// - [:USES_MOBILE] - Stack uses mobile technology +// - [:USES_DEVOPS] - Stack uses devops technology +// - [:USES_AI_ML] - Stack uses AI/ML technology + +// Tool relationships +// - [:RECOMMENDED_FOR] - Tool recommended for domain/use case +// - [:INTEGRATES_WITH] - Tool integrates with technology +// - [:SUITABLE_FOR] - Tool suitable for price tier + +// ===================================================== +// PRICE-BASED QUERIES (examples) +// ===================================================== + +// Query 1: Find technologies within budget +// MATCH (t:Technology)-[:BELONGS_TO_TIER]->(p:PriceTier) +// WHERE $budget >= p.min_price_usd AND $budget <= p.max_price_usd +// RETURN t, p ORDER BY t.total_cost_of_ownership_score DESC + +// Query 2: Find optimal tech stack for budget +// MATCH (frontend:Technology {category: "frontend"})-[:BELONGS_TO_TIER]->(p1:PriceTier) +// MATCH (backend:Technology {category: "backend"})-[:BELONGS_TO_TIER]->(p2:PriceTier) +// MATCH (database:Technology {category: "database"})-[:BELONGS_TO_TIER]->(p3:PriceTier) +// MATCH (cloud:Technology {category: "cloud"})-[:BELONGS_TO_TIER]->(p4:PriceTier) +// WHERE (frontend.monthly_cost_usd + backend.monthly_cost_usd + +// database.monthly_cost_usd + cloud.monthly_cost_usd) <= $budget +// RETURN frontend, backend, database, cloud, +// (frontend.monthly_cost_usd + backend.monthly_cost_usd + +// database.monthly_cost_usd + cloud.monthly_cost_usd) as total_cost +// ORDER BY total_cost ASC, +// (frontend.total_cost_of_ownership_score + backend.total_cost_of_ownership_score + +// database.total_cost_of_ownership_score + cloud.total_cost_of_ownership_score) DESC + +// Query 3: Find tools for specific price tier +// MATCH (tool:Tool)-[:BELONGS_TO_TIER]->(p:PriceTier {tier_name: $tier_name}) +// RETURN tool ORDER BY tool.price_performance_ratio DESC + +// ===================================================== +// COMPLETION STATUS +// ===================================================== + +RETURN "✅ Neo4j Schema Ready for PostgreSQL Migration!" as status, + "🎯 Focus: Price-based relationships from existing PostgreSQL data" as focus, + "📊 Ready for data migration and relationship creation" as ready_state; diff --git a/services/tech-stack-selector/Readme.md b/services/tech-stack-selector/Readme.md index 84fc622..73e07db 100644 --- a/services/tech-stack-selector/Readme.md +++ b/services/tech-stack-selector/Readme.md @@ -502,4 +502,16 @@ if domain == 'gaming': **Last Updated**: July 3, 2025 **Version**: 4.0.0 **Maintainer**: AI Development Pipeline Team -**Status**: Production Ready ✅ \ No newline at end of file +**Status**: Production Ready ✅ + +# Normal startup (auto-detects if migration needed) +./start_migrated.sh + +# Force re-migration (useful when you add new data) +./start_migrated.sh --force-migration + +# Show help +./start_migrated.sh --help + + +healthcare, finance, gaming, education, media, iot, social, elearning, realestate, travel, manufacturing, ecommerce, saas \ No newline at end of file diff --git a/services/tech-stack-selector/TechStackSelector_Complete_README.md b/services/tech-stack-selector/TechStackSelector_Complete_README.md new file mode 100644 index 0000000..fc29b11 --- /dev/null +++ b/services/tech-stack-selector/TechStackSelector_Complete_README.md @@ -0,0 +1,189 @@ +# Tech Stack Selector -- Postgres + Neo4j Knowledge Graph + +This project provides a **price-focused technology stack selector**.\ +It uses a **Postgres relational database** for storing technologies and +pricing, and builds a **Neo4j knowledge graph** to support advanced +queries like: + +> *"Show me all backend, frontend, and cloud technologies that fit a +> \$10-\$50 budget."* + +------------------------------------------------------------------------ + +## 📌 1. Database Schema (Postgres) + +The schema is designed to ensure **data integrity** and +**price-tier-driven recommendations**. + +### Core Tables + +- **`price_tiers`** -- Foundation table for price categories (tiers + like *Free*, *Low*, *Medium*, *Enterprise*). +- **Category-Specific Tables** -- Each technology domain has its own + table: + - `frontend_technologies` + - `backend_technologies` + - `cloud_technologies` + - `database_technologies` + - `testing_technologies` + - `mobile_technologies` + - `devops_technologies` + - `ai_ml_technologies` +- **`tools`** -- Central table for business/productivity tools with: + - `name`, `category`, `description` + - `primary_use_cases` + - `popularity_score` + - Pricing fields: `monthly_cost_usd`, `setup_cost_usd`, + `license_cost_usd`, `training_cost_usd`, + `total_cost_of_ownership_score` + - Foreign key to `price_tiers` + +All category tables reference `price_tiers(id)` ensuring **referential +integrity**. + +------------------------------------------------------------------------ + +## 🧱 2. Migration Files + +Your migrations are structured as follows: + +1. **`001_schema.sql`** -- Creates all tables, constraints, indexes. +2. **`002_tools_migration.sql`** -- Adds `tools` table and full-text + search indexes. +3. **`003_tools_pricing_migration.sql`** -- Adds cost-related fields to + `tools` and links to `price_tiers`. + +Run them in order: + +``` bash +psql -U -d -f sql/001_schema.sql +psql -U -d -f sql/002_tools_migration.sql +psql -U -d -f sql/003_tools_pricing_migration.sql +``` + +------------------------------------------------------------------------ + +## 🕸️ 3. Neo4j Knowledge Graph Design + +We map relational data into a graph for semantic querying. + +### Node Types + +- **Technology** → `{name, category, description, popularity_score}` +- **Category** → `{name}` +- **PriceTier** → `{tier_name, min_price, max_price}` + +### Relationships + +- `(Technology)-[:BELONGS_TO]->(Category)` +- `(Technology)-[:HAS_PRICE_TIER]->(PriceTier)` + +Example graph: + + (:Technology {name:"NodeJS"})-[:BELONGS_TO]->(:Category {name:"Backend"}) + (:Technology {name:"NodeJS"})-[:HAS_PRICE_TIER]->(:PriceTier {tier_name:"Medium"}) + +------------------------------------------------------------------------ + +## 🔄 4. ETL (Extract → Transform → Load) + +Use a Python ETL script to pull from Postgres and load into Neo4j. + +### Example Script + +``` python +from neo4j import GraphDatabase +import psycopg2 + +pg_conn = psycopg2.connect(host="localhost", database="techstack", user="user", password="pass") +pg_cur = pg_conn.cursor() + +driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "password")) + +def insert_data(tx, tech_name, category, price_tier): + tx.run(""" + MERGE (c:Category {name: $category}) + MERGE (t:Technology {name: $tech}) + ON CREATE SET t.category = $category + MERGE (p:PriceTier {tier_name: $price_tier}) + MERGE (t)-[:BELONGS_TO]->(c) + MERGE (t)-[:HAS_PRICE_TIER]->(p) + """, tech=tech_name, category=category, price_tier=price_tier) + +pg_cur.execute("SELECT name, category, tier_name FROM tools JOIN price_tiers ON price_tiers.id = tools.price_tier_id") +rows = pg_cur.fetchall() + +with driver.session() as session: + for name, category, tier in rows: + session.write_transaction(insert_data, name, category, tier) + +pg_conn.close() +driver.close() +``` + +------------------------------------------------------------------------ + +## 🔍 5. Querying the Knowledge Graph + +### Find technologies in a price range: + +``` cypher +MATCH (t:Technology)-[:HAS_PRICE_TIER]->(p:PriceTier) +WHERE p.min_price >= 10 AND p.max_price <= 50 +RETURN t.name, p.tier_name +ORDER BY p.min_price ASC +``` + +### Find technologies for a specific domain: + +``` cypher +MATCH (t:Technology)-[:BELONGS_TO]->(c:Category) +WHERE c.name = "Backend" +RETURN t.name, t.popularity_score +ORDER BY t.popularity_score DESC +``` + +------------------------------------------------------------------------ + +## 🗂️ 6. Suggested Project Structure + + techstack-selector/ + ├── sql/ + │ ├── 001_schema.sql + │ ├── 002_tools_migration.sql + │ └── 003_tools_pricing_migration.sql + ├── etl/ + │ └── postgres_to_neo4j.py + ├── api/ + │ └── app.py (Flask/FastAPI server for exposing queries) + ├── docs/ + │ └── README.md + +------------------------------------------------------------------------ + +## 🚀 7. API Layer (Optional) + +You can wrap Neo4j queries inside a REST/GraphQL API. + +Example response: + +``` json +{ + "price_range": [10, 50], + "technologies": [ + {"name": "NodeJS", "category": "Backend", "tier": "Medium"}, + {"name": "React", "category": "Frontend", "tier": "Medium"} + ] +} +``` + +------------------------------------------------------------------------ + +## ✅ Summary + +This README covers: - Postgres schema with pricing and foreign keys - +Migration execution steps - Neo4j graph model - Python ETL script - +Example Cypher queries - Suggested folder structure + +This setup enables **price-driven technology recommendations** with a +clear path for building APIs and AI-powered analytics. diff --git a/services/tech-stack-selector/db/001_schema.sql b/services/tech-stack-selector/db/001_schema.sql new file mode 100644 index 0000000..369f32d --- /dev/null +++ b/services/tech-stack-selector/db/001_schema.sql @@ -0,0 +1,7769 @@ +-- ===================================================== +-- Enhanced Database Setup for Tech Stack Selector +-- Price-focused design with category-specific tables +-- Prepared for Neo4j migration with knowledge graphs +-- ===================================================== + +-- Drop all existing tables +DROP TABLE IF EXISTS frontend_technologies CASCADE; +DROP TABLE IF EXISTS backend_technologies CASCADE; +DROP TABLE IF EXISTS database_technologies CASCADE; +DROP TABLE IF EXISTS cloud_technologies CASCADE; +DROP TABLE IF EXISTS testing_technologies CASCADE; +DROP TABLE IF EXISTS mobile_technologies CASCADE; +DROP TABLE IF EXISTS devops_technologies CASCADE; +DROP TABLE IF EXISTS ai_ml_technologies CASCADE; +DROP TABLE IF EXISTS price_tiers CASCADE; +DROP TABLE IF EXISTS tech_pricing CASCADE; +DROP TABLE IF EXISTS price_based_stacks CASCADE; +DROP TABLE IF EXISTS stack_recommendations CASCADE; +DROP TABLE IF EXISTS tools CASCADE; + +-- ===================================================== +-- PRICE TIER FOUNDATION +-- ===================================================== + +-- Create price tiers table (foundation for all pricing) +CREATE TABLE price_tiers ( + id SERIAL PRIMARY KEY, + tier_name VARCHAR(50) NOT NULL UNIQUE, + min_price_usd DECIMAL(10,2) NOT NULL, + max_price_usd DECIMAL(10,2) NOT NULL, + target_audience VARCHAR(100), + typical_project_scale VARCHAR(50), + description TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT valid_price_range CHECK (min_price_usd <= max_price_usd) +); + +-- ===================================================== +-- TECHNOLOGY CATEGORY TABLES +-- ===================================================== + +-- Frontend Technologies +CREATE TABLE frontend_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + framework_type VARCHAR(50), -- react-based, vue-based, angular-based, vanilla, etc. + maturity_score INTEGER CHECK (maturity_score >= 1 AND maturity_score <= 100), + learning_curve VARCHAR(20) CHECK (learning_curve IN ('easy', 'medium', 'hard', 'very hard', 'expert', 'beginner', 'advanced')), + performance_rating INTEGER CHECK (performance_rating >= 1 AND performance_rating <= 100), + community_size VARCHAR(20), + bundle_size_kb INTEGER, + mobile_friendly BOOLEAN DEFAULT false, + ssr_support BOOLEAN DEFAULT false, + typescript_support BOOLEAN DEFAULT false, + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Backend Technologies +CREATE TABLE backend_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + language_base VARCHAR(50), -- javascript, python, java, go, etc. + architecture_type VARCHAR(50), -- monolithic, microservices, serverless + maturity_score INTEGER CHECK (maturity_score >= 1 AND maturity_score <= 100), + learning_curve VARCHAR(20) CHECK (learning_curve IN ('easy', 'medium', 'hard', 'very hard', 'expert', 'beginner', 'advanced')), + performance_rating INTEGER CHECK (performance_rating >= 1 AND performance_rating <= 100), + scalability_rating INTEGER CHECK (scalability_rating >= 1 AND scalability_rating <= 100), + memory_efficiency INTEGER CHECK (memory_efficiency >= 1 AND memory_efficiency <= 100), + concurrent_handling VARCHAR(50), -- excellent, good, fair, poor + api_capabilities TEXT[], + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Database Technologies +CREATE TABLE database_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + database_type VARCHAR(50), -- relational, nosql, graph, key-value, document + acid_compliance BOOLEAN DEFAULT false, + horizontal_scaling BOOLEAN DEFAULT false, + vertical_scaling BOOLEAN DEFAULT true, + maturity_score INTEGER CHECK (maturity_score >= 1 AND maturity_score <= 100), + performance_rating INTEGER CHECK (performance_rating >= 1 AND performance_rating <= 100), + consistency_model VARCHAR(50), -- strong, eventual, weak + query_language VARCHAR(50), -- sql, mongodb-query, cypher, etc. + max_storage_capacity VARCHAR(50), + backup_features TEXT[], + security_features TEXT[], + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +-- Cloud Technologies +CREATE TABLE cloud_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + provider VARCHAR(50), -- aws, azure, gcp, digitalocean, etc. + service_type VARCHAR(50), -- iaas, paas, saas, serverless, container + global_availability INTEGER, -- number of regions + uptime_sla DECIMAL(5,3), -- 99.999 + auto_scaling BOOLEAN DEFAULT false, + serverless_support BOOLEAN DEFAULT false, + container_support BOOLEAN DEFAULT false, + managed_services TEXT[], + security_certifications TEXT[], + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + free_tier_available BOOLEAN DEFAULT false, + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Testing Technologies +CREATE TABLE testing_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + testing_type VARCHAR(50), -- unit, integration, e2e, performance, security + framework_support TEXT[], -- jest, mocha, cypress, selenium + automation_level VARCHAR(20), -- full, partial, manual + ci_cd_integration BOOLEAN DEFAULT false, + browser_support TEXT[], + mobile_testing BOOLEAN DEFAULT false, + api_testing BOOLEAN DEFAULT false, + performance_testing BOOLEAN DEFAULT false, + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Mobile Technologies +CREATE TABLE mobile_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + platform_support TEXT[], -- ios, android, web, desktop + development_approach VARCHAR(50), -- native, hybrid, cross-platform + language_base VARCHAR(50), + performance_rating INTEGER CHECK (performance_rating >= 1 AND performance_rating <= 100), + learning_curve VARCHAR(20) CHECK (learning_curve IN ('easy', 'medium', 'hard', 'very hard', 'expert', 'beginner', 'advanced')), + ui_native_feel INTEGER CHECK (ui_native_feel >= 1 AND ui_native_feel <= 100), + code_sharing_percentage INTEGER CHECK (code_sharing_percentage >= 0 AND code_sharing_percentage <= 100), + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- DevOps Technologies +CREATE TABLE devops_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + category VARCHAR(50), -- ci-cd, containerization, orchestration, monitoring, infrastructure + complexity_level VARCHAR(20) CHECK (complexity_level IN ('easy', 'medium', 'hard')), + scalability_support VARCHAR(20), -- excellent, good, fair + cloud_native BOOLEAN DEFAULT false, + enterprise_ready BOOLEAN DEFAULT false, + automation_capabilities TEXT[], + integration_options TEXT[], + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- AI/ML Technologies +CREATE TABLE ai_ml_technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + ml_type VARCHAR(50), -- deep-learning, machine-learning, nlp, computer-vision + language_support TEXT[], -- python, r, javascript, etc. + gpu_acceleration BOOLEAN DEFAULT false, + cloud_integration BOOLEAN DEFAULT false, + pretrained_models BOOLEAN DEFAULT false, + ease_of_deployment INTEGER CHECK (ease_of_deployment >= 1 AND ease_of_deployment <= 100), + model_accuracy_potential INTEGER CHECK (model_accuracy_potential >= 1 AND model_accuracy_potential <= 100), + primary_use_cases TEXT[], + strengths TEXT[], + weaknesses TEXT[], + license_type VARCHAR(50), + domain TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +-- ===================================================== +-- PRICING TABLES +-- ===================================================== + +-- Universal tech pricing table +CREATE TABLE tech_pricing ( + id SERIAL PRIMARY KEY, + tech_name VARCHAR(100) NOT NULL, + tech_category VARCHAR(50) NOT NULL, -- frontend, backend, database, etc. + price_tier_id INTEGER REFERENCES price_tiers(id), + + -- Cost breakdown + development_cost_usd DECIMAL(10,2) DEFAULT 0, -- One-time setup cost + monthly_operational_cost_usd DECIMAL(10,2) DEFAULT 0, -- Monthly running cost + license_cost_usd DECIMAL(10,2) DEFAULT 0, -- License fees + training_cost_usd DECIMAL(10,2) DEFAULT 0, -- Team training cost + maintenance_cost_percentage DECIMAL(5,2) DEFAULT 0, -- % of dev cost annually + + -- Scaling cost factors + cost_per_user_usd DECIMAL(8,4) DEFAULT 0, + cost_per_request_usd DECIMAL(8,6) DEFAULT 0, + storage_cost_per_gb_usd DECIMAL(6,4) DEFAULT 0, + bandwidth_cost_per_gb_usd DECIMAL(6,4) DEFAULT 0, + + -- Resource requirements (affects hosting costs) + min_cpu_cores DECIMAL(3,1) DEFAULT 0.5, + min_ram_gb DECIMAL(5,1) DEFAULT 0.5, + min_storage_gb DECIMAL(8,1) DEFAULT 1, + bandwidth_gb_month DECIMAL(10,2) DEFAULT 10, + + -- Cost efficiency metrics + total_cost_of_ownership_score INTEGER CHECK (total_cost_of_ownership_score >= 1 AND total_cost_of_ownership_score <= 100), + price_performance_ratio INTEGER CHECK (price_performance_ratio >= 1 AND price_performance_ratio <= 100), + + notes TEXT, + last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + UNIQUE(tech_name, tech_category) +); + +-- Price-based tech stack combinations +CREATE TABLE price_based_stacks ( + id SERIAL PRIMARY KEY, + stack_name VARCHAR(100) NOT NULL, + price_tier_id INTEGER REFERENCES price_tiers(id), + total_monthly_cost_usd DECIMAL(10,2), + total_setup_cost_usd DECIMAL(10,2), + + -- Tech stack composition + frontend_tech VARCHAR(100), + backend_tech VARCHAR(100), + database_tech VARCHAR(100), + cloud_tech VARCHAR(100), + testing_tech VARCHAR(100), + mobile_tech VARCHAR(100), + devops_tech VARCHAR(100), + ai_ml_tech VARCHAR(100), + + -- Stack characteristics + suitable_project_scales TEXT[], + team_size_range VARCHAR(20), -- 1-2, 3-5, 6-10, 10+ + development_time_months INTEGER, + maintenance_complexity VARCHAR(20), -- low, medium, high + scalability_ceiling VARCHAR(50), -- small, medium, large, enterprise + + -- Business metrics + recommended_domains TEXT[], + success_rate_percentage INTEGER, + user_satisfaction_score INTEGER CHECK (user_satisfaction_score >= 1 AND user_satisfaction_score <= 100), + + description TEXT, + pros TEXT[], + cons TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Stack recommendations based on price and requirements +CREATE TABLE stack_recommendations ( + id SERIAL PRIMARY KEY, + price_tier_id INTEGER REFERENCES price_tiers(id), + business_domain VARCHAR(50), + project_scale VARCHAR(20), + team_experience_level VARCHAR(20), -- beginner, intermediate, expert + + recommended_stack_id INTEGER REFERENCES price_based_stacks(id), + confidence_score INTEGER CHECK (confidence_score >= 1 AND confidence_score <= 100), + recommendation_reasons TEXT[], + potential_risks TEXT[], + alternative_stacks INTEGER[], -- array of stack IDs + + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- ===================================================== +-- DATA INSERTION - PRICE TIERS +-- ===================================================== + +INSERT INTO price_tiers (tier_name, min_price_usd, max_price_usd, target_audience, typical_project_scale, description) VALUES +('Micro Budget', 5.00, 25.00, 'Solo developers, students, hobby projects', 'Personal/Learning', 'Ultra-low cost solutions using free tiers and minimal paid services'), +('Startup Budget', 25.01, 100.00, 'Early startups, small teams, MVPs', 'Small', 'Cost-effective solutions for getting started with some paid services'), +('Small Business', 100.01, 300.00, 'Small businesses, established startups', 'Small to Medium', 'Balanced cost and functionality for growing businesses'), +('Growth Stage', 300.01, 600.00, 'Growing companies, mid-size teams', 'Medium', 'Scalable solutions with good performance and reliability'), +('Scale-Up', 600.01, 1000.00, 'Scale-up companies, larger teams', 'Medium to Large', 'High-performance solutions with advanced features and scaling capabilities'); + +-- ===================================================== +-- DATA INSERTION - FRONTEND TECHNOLOGIES +-- ===================================================== + +INSERT INTO frontend_technologies ( + name, framework_type, maturity_score, learning_curve, performance_rating, + community_size, bundle_size_kb, mobile_friendly, ssr_support, typescript_support, + primary_use_cases, strengths, weaknesses, license_type, domain +) VALUES +-- React Ecosystem +('React', 'react-based', 95, 'medium', 88, 'large', 42, true, true, true, + ARRAY['Single Page Applications', 'Component-based UI', 'Large-scale web apps', 'Progressive Web Apps'], + ARRAY['Huge ecosystem', 'Component reusability', 'Virtual DOM efficiency', 'Strong community support', 'Excellent tooling'], + ARRAY['Steep learning curve', 'Rapid ecosystem changes', 'JSX syntax barrier', 'SEO challenges without SSR'], + 'MIT', + ARRAY['E-commerce', 'Social Media', 'Enterprise Web Apps', 'Progressive Web Apps', 'SaaS Platforms']), + +('Next.js', 'react-based', 93, 'medium', 90, 'large', 68, true, true, true, + ARRAY['Full-stack React apps', 'Static site generation', 'Server-side rendering', 'E-commerce platforms'], + ARRAY['Excellent SSR/SSG', 'Full-stack capabilities', 'Great performance', 'Vercel integration', 'File-based routing'], + ARRAY['Vercel vendor lock-in potential', 'Complex configuration', 'Learning curve for full-stack features'], + 'MIT', + ARRAY['E-commerce', 'Static Websites', 'Content Management Systems', 'SaaS Platforms', 'Full-stack Applications']), + +('Gatsby', 'react-based', 85, 'hard', 87, 'medium', 95, false, true, true, + ARRAY['Static sites', 'JAMstack applications', 'Performance-focused sites', 'Content-driven websites'], + ARRAY['Excellent static generation', 'GraphQL integration', 'Plugin ecosystem', 'Great performance'], + ARRAY['Complex build process', 'Long build times', 'GraphQL learning curve', 'Over-engineering for simple sites'], + 'MIT', + ARRAY['Blogs', 'Documentation Sites', 'Marketing Websites', 'Portfolio Sites', 'E-commerce']), + +('React Native', 'react-based', 90, 'medium', 82, 'large', 0, true, false, true, + ARRAY['Cross-platform mobile apps', 'Native mobile development', 'Hybrid applications'], + ARRAY['Code reusability', 'Native performance', 'Large ecosystem', 'Hot reloading'], + ARRAY['Platform-specific bugs', 'Bridge performance issues', 'Native module complexity'], + 'MIT', + ARRAY['Mobile Apps', 'Cross-platform Development', 'Startups', 'Enterprise Mobile']), + +('Create React App', 'react-based', 80, 'easy', 75, 'large', 45, true, false, true, + ARRAY['Quick React setup', 'Prototyping', 'Learning React', 'Simple SPAs'], + ARRAY['Zero configuration', 'Quick setup', 'Great for beginners', 'Webpack abstraction'], + ARRAY['Limited customization', 'Ejecting complexity', 'Not suitable for complex apps', 'Bundle size issues'], + 'MIT', + ARRAY['Prototyping', 'Learning Projects', 'Simple Web Apps', 'MVPs']), + +-- Vue.js Ecosystem +('Vue.js', 'vue-based', 90, 'easy', 85, 'large', 34, true, true, true, + ARRAY['Progressive Web Apps', 'Single Page Applications', 'Component-based UI', 'Small to medium projects'], + ARRAY['Gentle learning curve', 'Excellent documentation', 'Flexible architecture', 'Good performance'], + ARRAY['Smaller job market', 'Less mature ecosystem compared to React', 'Fewer third-party libraries'], + 'MIT', + ARRAY['E-commerce', 'Small Business Websites', 'Prototyping', 'SaaS Platforms', 'Content Management Systems']), + +('Nuxt.js', 'vue-based', 88, 'medium', 88, 'medium', 72, true, true, true, + ARRAY['Vue.js applications', 'Server-side rendering', 'Static site generation', 'Universal apps'], + ARRAY['Auto-routing', 'SSR/SSG support', 'Module ecosystem', 'Convention over configuration'], + ARRAY['Convention limitations', 'Learning curve', 'Less flexible than custom setup'], + 'MIT', + ARRAY['E-commerce', 'Content Websites', 'SaaS Applications', 'Static Sites']), + +('Quasar', 'vue-based', 82, 'medium', 83, 'small', 89, true, true, true, + ARRAY['Cross-platform apps', 'Material Design apps', 'Desktop applications', 'Mobile development'], + ARRAY['Material Design components', 'Cross-platform', 'CLI tools', 'Comprehensive framework'], + ARRAY['Learning curve', 'Smaller community', 'Opinionated structure'], + 'MIT', + ARRAY['Enterprise Apps', 'Cross-platform Development', 'Admin Dashboards', 'Mobile Apps']), + +('Gridsome', 'vue-based', 75, 'medium', 85, 'small', 78, false, true, true, + ARRAY['Static site generation', 'JAMstack sites', 'Vue-based static sites'], + ARRAY['GraphQL data layer', 'Static generation', 'Vue.js integration', 'Performance focused'], + ARRAY['Small community', 'Limited plugins', 'GraphQL complexity'], + 'MIT', + ARRAY['Blogs', 'Documentation', 'Marketing Sites', 'Portfolio Sites']), + +-- Angular Ecosystem +('Angular', 'angular-based', 92, 'hard', 90, 'large', 128, true, true, true, + ARRAY['Enterprise applications', 'Large-scale SPAs', 'Complex business applications', 'Progressive Web Apps'], + ARRAY['Full-featured framework', 'Built-in TypeScript', 'Robust architecture', 'Excellent tooling', 'Strong opinions'], + ARRAY['Steep learning curve', 'Heavy bundle size', 'Complex for simple projects', 'Frequent breaking changes'], + 'MIT', + ARRAY['Enterprise Web Apps', 'Financial Services', 'Healthcare Systems', 'Large-scale SaaS', 'Business Intelligence']), + +('AngularJS', 'angular-based', 60, 'medium', 65, 'medium', 55, true, false, false, + ARRAY['Legacy applications', 'Simple web apps', 'Two-way data binding apps'], + ARRAY['Two-way data binding', 'Dependency injection', 'MVC architecture'], + ARRAY['End of life', 'Performance issues', 'Digest cycle problems', 'Legacy technology'], + 'MIT', + ARRAY['Legacy Systems', 'Maintenance Projects', 'Simple Web Apps']), + +('Ionic', 'angular-based', 85, 'medium', 78, 'medium', 145, true, false, true, + ARRAY['Hybrid mobile apps', 'Cross-platform development', 'Progressive Web Apps'], + ARRAY['Cross-platform', 'Native UI components', 'Angular integration', 'Capacitor platform'], + ARRAY['Performance limitations', 'Webview dependency', 'Native feel challenges'], + 'MIT', + ARRAY['Mobile Apps', 'Hybrid Development', 'PWAs', 'Cross-platform Apps']), + +-- Svelte Ecosystem +('Svelte', 'svelte-based', 85, 'medium', 92, 'medium', 8, true, true, true, + ARRAY['Fast web applications', 'Small bundle requirements', 'Interactive dashboards', 'Performance-critical apps'], + ARRAY['Smallest bundle size', 'No virtual DOM overhead', 'Easy to learn', 'Great performance'], + ARRAY['Smaller ecosystem', 'Limited job opportunities', 'Fewer learning resources', 'Less mature tooling'], + 'MIT', + ARRAY['Startups', 'Interactive Dashboards', 'Performance-critical Apps', 'Progressive Web Apps', 'Prototyping']), + +('SvelteKit', 'svelte-based', 80, 'medium', 90, 'small', 15, true, true, true, + ARRAY['Full-stack Svelte apps', 'Server-side rendering', 'Static site generation'], + ARRAY['Full-stack capabilities', 'File-based routing', 'Excellent performance', 'Modern architecture'], + ARRAY['Young framework', 'Smaller community', 'Limited ecosystem'], + 'MIT', + ARRAY['Full-stack Apps', 'Static Sites', 'SaaS Applications', 'Performance Apps']), + +('Sapper', 'svelte-based', 70, 'medium', 88, 'small', 12, true, true, false, + ARRAY['Svelte applications', 'Server-side rendering', 'Static exports'], + ARRAY['SSR support', 'Small bundle size', 'File-based routing'], + ARRAY['Deprecated in favor of SvelteKit', 'Limited features', 'Small community'], + 'MIT', + ARRAY['Legacy Svelte Apps', 'Simple SSR Apps']), + +-- Vanilla JavaScript & Utilities +('Vanilla JavaScript', 'vanilla', 100, 'hard', 95, 'large', 0, true, false, false, + ARRAY['All web applications', 'Performance-critical apps', 'Learning fundamentals'], + ARRAY['No framework overhead', 'Maximum performance', 'Full control', 'Universal compatibility'], + ARRAY['More boilerplate', 'Manual DOM manipulation', 'No built-in structure'], + 'Public Domain', + ARRAY['All Domains', 'Performance Apps', 'Legacy Systems', 'Embedded Systems']), + +('jQuery', 'library', 75, 'easy', 70, 'large', 32, true, false, false, + ARRAY['DOM manipulation', 'Legacy applications', 'Simple interactions'], + ARRAY['Simple API', 'Cross-browser compatibility', 'Large plugin ecosystem', 'Easy to learn'], + ARRAY['Performance overhead', 'Legacy approach', 'Not suitable for complex apps'], + 'MIT', + ARRAY['Legacy Systems', 'Simple Websites', 'WordPress Themes', 'Quick Prototypes']), + +('Lodash', 'utility', 95, 'easy', 85, 'large', 24, true, true, true, + ARRAY['Utility functions', 'Data manipulation', 'Functional programming'], + ARRAY['Comprehensive utilities', 'Consistent API', 'Performance optimized', 'Modular'], + ARRAY['Bundle size if not tree-shaken', 'Some functions obsolete with modern JS'], + 'MIT', + ARRAY['All JavaScript Projects', 'Data Processing', 'Utility Functions']), + +('Moment.js', 'utility', 85, 'easy', 75, 'large', 67, true, true, false, + ARRAY['Date manipulation', 'Time formatting', 'Date parsing'], + ARRAY['Comprehensive date handling', 'Locale support', 'Easy API'], + ARRAY['Large bundle size', 'Mutable API', 'Performance issues', 'Maintenance mode'], + 'MIT', + ARRAY['Legacy Projects', 'Date-heavy Applications', 'International Apps']), + +('Day.js', 'utility', 88, 'easy', 90, 'medium', 2, true, true, true, + ARRAY['Date manipulation', 'Moment.js replacement', 'Lightweight date handling'], + ARRAY['Tiny size', 'Moment.js compatible API', 'Immutable', 'Tree-shakable'], + ARRAY['Smaller feature set', 'Fewer plugins than Moment'], + 'MIT', + ARRAY['Modern Web Apps', 'Performance-critical Apps', 'Mobile Applications']), + +-- Build Tools & Bundlers +('Webpack', 'build-tool', 92, 'hard', 85, 'large', 0, true, true, true, + ARRAY['Module bundling', 'Asset processing', 'Code splitting'], + ARRAY['Powerful configuration', 'Plugin ecosystem', 'Code splitting', 'Hot reloading'], + ARRAY['Complex configuration', 'Steep learning curve', 'Slow build times'], + 'MIT', + ARRAY['Complex Applications', 'Enterprise Projects', 'Custom Build Processes']), + +('Vite', 'build-tool', 90, 'medium', 95, 'large', 0, true, true, true, + ARRAY['Fast development', 'Modern bundling', 'ES modules'], + ARRAY['Lightning fast HMR', 'ES modules native', 'Simple configuration', 'Framework agnostic'], + ARRAY['Node.js focused', 'Newer ecosystem', 'Limited IE support'], + 'MIT', + ARRAY['Modern Web Apps', 'Vue Applications', 'React Applications', 'Development Tools']), + +('Parcel', 'build-tool', 82, 'easy', 88, 'medium', 0, true, true, true, + ARRAY['Zero-config bundling', 'Quick prototyping', 'Simple projects'], + ARRAY['Zero configuration', 'Fast builds', 'Built-in support for many formats'], + ARRAY['Limited customization', 'Smaller ecosystem', 'Less control'], + 'MIT', + ARRAY['Prototyping', 'Simple Applications', 'Learning Projects']), + +('Rollup', 'build-tool', 88, 'medium', 92, 'medium', 0, true, true, true, + ARRAY['Library bundling', 'ES modules', 'Tree shaking'], + ARRAY['Excellent tree shaking', 'ES modules focus', 'Small bundles', 'Plugin architecture'], + ARRAY['Complex for applications', 'Smaller ecosystem than Webpack'], + 'MIT', + ARRAY['Library Development', 'Component Libraries', 'Modern Applications']), + +('esbuild', 'build-tool', 85, 'medium', 98, 'medium', 0, true, true, true, + ARRAY['Ultra-fast bundling', 'TypeScript compilation', 'Minification'], + ARRAY['Extremely fast', 'TypeScript support', 'Tree shaking', 'Minimal configuration'], + ARRAY['Go-based (different ecosystem)', 'Limited plugins', 'Newer tool'], + 'MIT', + ARRAY['Performance-critical Builds', 'TypeScript Projects', 'Fast Development']), + +-- CSS Frameworks & Preprocessors +('Tailwind CSS', 'css-framework', 92, 'medium', 90, 'large', 0, true, true, true, + ARRAY['Utility-first styling', 'Rapid UI development', 'Component styling'], + ARRAY['Utility-first approach', 'Highly customizable', 'Small production builds', 'Design system'], + ARRAY['Learning curve', 'HTML verbosity', 'Initial setup complexity'], + 'MIT', + ARRAY['Modern Web Apps', 'Component Libraries', 'Rapid Prototyping', 'Design Systems']), + +('Bootstrap', 'css-framework', 88, 'easy', 78, 'large', 58, true, false, false, + ARRAY['Responsive websites', 'Quick prototyping', 'Admin dashboards'], + ARRAY['Comprehensive components', 'Responsive grid', 'Large community', 'Easy to learn'], + ARRAY['Generic look', 'Heavy if not customized', 'jQuery dependency (v4 removed)'], + 'MIT', + ARRAY['Business Websites', 'Admin Dashboards', 'Prototyping', 'Legacy Projects']), + +('Bulma', 'css-framework', 82, 'easy', 85, 'medium', 48, true, false, false, + ARRAY['Modern CSS framework', 'Flexbox-based layouts', 'Component styling'], + ARRAY['Modern Flexbox approach', 'No JavaScript', 'Clean syntax', 'Modular'], + ARRAY['Smaller community', 'Less customizable than Tailwind', 'Fewer components'], + 'MIT', + ARRAY['Modern Websites', 'Clean Designs', 'Flexbox Layouts']), + +('Sass/SCSS', 'css-preprocessor', 90, 'medium', 88, 'large', 0, true, true, true, + ARRAY['CSS preprocessing', 'Style organization', 'Design systems'], + ARRAY['Variables and mixins', 'Nested syntax', 'Mature ecosystem', 'Powerful functions'], + ARRAY['Compilation step needed', 'Learning curve', 'Can become complex'], + 'MIT', + ARRAY['Large Projects', 'Design Systems', 'Component Libraries', 'Enterprise Apps']), + +('Less', 'css-preprocessor', 85, 'easy', 82, 'medium', 0, true, false, false, + ARRAY['CSS preprocessing', 'Dynamic stylesheets', 'Style enhancement'], + ARRAY['JavaScript-like syntax', 'Client-side compilation', 'Easy to learn'], + ARRAY['Less powerful than Sass', 'Smaller community', 'Performance concerns'], + 'Apache 2.0', + ARRAY['Bootstrap Projects', 'JavaScript-heavy Apps', 'Simple Preprocessing']), + +('Styled Components', 'css-in-js', 87, 'medium', 83, 'medium', 12, true, true, true, + ARRAY['CSS-in-JS', 'Component styling', 'Dynamic styling'], + ARRAY['Component-scoped styles', 'Dynamic styling', 'JavaScript integration', 'No class name conflicts'], + ARRAY['Runtime overhead', 'Learning curve', 'Bundle size increase'], + 'MIT', + ARRAY['React Applications', 'Component Libraries', 'Dynamic Styling']), + +('Emotion', 'css-in-js', 85, 'medium', 85, 'medium', 8, true, true, true, + ARRAY['CSS-in-JS', 'Performance-focused styling', 'Component styling'], + ARRAY['Performance focused', 'Flexible API', 'Small bundle', 'Framework agnostic'], + ARRAY['Runtime overhead', 'Learning curve', 'Complex setup options'], + 'MIT', + ARRAY['React Applications', 'Performance Apps', 'Component Libraries']), + +-- State Management +('Redux', 'state-management', 90, 'hard', 85, 'large', 6, true, true, true, + ARRAY['Application state management', 'Complex state logic', 'Time travel debugging'], + ARRAY['Predictable state', 'DevTools', 'Middleware ecosystem', 'Time travel debugging'], + ARRAY['Boilerplate heavy', 'Learning curve', 'Overkill for simple apps'], + 'MIT', + ARRAY['Large Applications', 'Complex State', 'Enterprise Apps', 'Redux-heavy Ecosystems']), + +('MobX', 'state-management', 85, 'medium', 88, 'medium', 16, true, true, true, + ARRAY['Reactive state management', 'Object-oriented state', 'Simple state updates'], + ARRAY['Less boilerplate', 'Reactive updates', 'Object-oriented', 'Easy to learn'], + ARRAY['Magic behavior', 'Debugging challenges', 'Less predictable'], + 'MIT', + ARRAY['React Applications', 'Rapid Development', 'Object-oriented Apps']), + +('Zustand', 'state-management', 82, 'easy', 92, 'small', 1, true, true, true, + ARRAY['Lightweight state management', 'Simple state logic', 'Hooks-based state'], + ARRAY['Minimal boilerplate', 'TypeScript friendly', 'Tiny size', 'Simple API'], + ARRAY['Smaller ecosystem', 'Less mature', 'Fewer learning resources'], + 'MIT', + ARRAY['Modern React Apps', 'Small to Medium Projects', 'Lightweight State']), + +('Recoil', 'state-management', 75, 'medium', 85, 'small', 22, true, true, true, + ARRAY['Experimental React state', 'Atomic state management', 'Complex state graphs'], + ARRAY['Atomic state model', 'React integration', 'Async state handling'], + ARRAY['Experimental status', 'Facebook dependency', 'Complex concepts'], + 'MIT', + ARRAY['Experimental Projects', 'Complex State Graphs', 'Facebook Ecosystem']), + +('Valtio', 'state-management', 78, 'easy', 90, 'small', 3, true, true, true, + ARRAY['Proxy-based state', 'Mutable state management', 'React state'], + ARRAY['Mutable API', 'Proxy-based', 'Small size', 'Simple usage'], + ARRAY['Newer library', 'Proxy limitations', 'Smaller community'], + 'MIT', + ARRAY['Modern React Apps', 'Simple State Management', 'Prototype Projects']), + +-- Testing Frameworks +('Jest', 'testing', 92, 'medium', 88, 'large', 0, true, true, true, + ARRAY['Unit testing', 'Integration testing', 'Snapshot testing'], + ARRAY['Zero config', 'Snapshot testing', 'Mocking capabilities', 'Watch mode'], + ARRAY['Slow for large codebases', 'Memory usage', 'Complex configuration'], + 'MIT', + ARRAY['JavaScript Testing', 'React Testing', 'Node.js Testing', 'Frontend Testing']), + +('Cypress', 'testing', 88, 'medium', 85, 'medium', 0, false, false, true, + ARRAY['End-to-end testing', 'Integration testing', 'Browser testing'], + ARRAY['Real browser testing', 'Time travel debugging', 'Easy setup', 'Visual testing'], + ARRAY['Only Chromium-based browsers', 'Slower than unit tests', 'Flaky tests'], + 'MIT', + ARRAY['E2E Testing', 'Integration Testing', 'Web Application Testing']), + +('Playwright', 'testing', 85, 'medium', 90, 'medium', 0, false, false, true, + ARRAY['Cross-browser testing', 'End-to-end testing', 'Automation'], + ARRAY['Multi-browser support', 'Fast execution', 'Mobile testing', 'Microsoft backing'], + ARRAY['Newer tool', 'Learning curve', 'Less mature ecosystem'], + 'Apache 2.0', + ARRAY['Cross-browser Testing', 'E2E Testing', 'Automation', 'Enterprise Testing']), + +('Testing Library', 'testing', 90, 'easy', 90, 'large', 0, true, true, true, + ARRAY['Component testing', 'User-centric testing', 'Accessibility testing'], + ARRAY['User-focused testing', 'Framework agnostic', 'Accessibility emphasis', 'Simple API'], + ARRAY['Opinionated approach', 'Limited for complex interactions'], + 'MIT', + ARRAY['React Testing', 'Component Testing', 'Accessibility Testing', 'User-focused Testing']), + +('Vitest', 'testing', 82, 'easy', 95, 'medium', 0, true, true, true, + ARRAY['Vite-powered testing', 'Unit testing', 'Fast testing'], + ARRAY['Vite integration', 'Fast execution', 'Jest compatibility', 'Modern features'], + ARRAY['Newer tool', 'Vite dependency', 'Smaller ecosystem'], + 'MIT', + ARRAY['Vite Projects', 'Modern Testing', 'Fast Unit Tests', 'Vue Testing']), + +-- UI Component Libraries +('Material-UI (MUI)', 'component-library', 90, 'medium', 85, 'large', 89, true, true, true, + ARRAY['Material Design apps', 'React components', 'Design systems'], + ARRAY['Material Design', 'Comprehensive components', 'Theming system', 'TypeScript support'], + ARRAY['Bundle size', 'Design limitations', 'Learning curve'], + 'MIT', + ARRAY['Enterprise Apps', 'Admin Dashboards', 'Material Design Apps', 'React Projects']), + +('Ant Design', 'component-library', 88, 'medium', 83, 'large', 120, true, true, true, + ARRAY['Enterprise applications', 'Admin interfaces', 'Data-heavy apps'], + ARRAY['Enterprise focus', 'Rich components', 'Comprehensive', 'Good documentation'], + ARRAY['Large bundle size', 'Chinese design language', 'Less customizable'], + 'MIT', + ARRAY['Enterprise Apps', 'Admin Dashboards', 'Data Management', 'Business Applications']), + +('Chakra UI', 'component-library', 85, 'easy', 88, 'medium', 45, true, true, true, + ARRAY['Modern React apps', 'Accessibility-focused', 'Component systems'], + ARRAY['Accessibility first', 'Modular design', 'Easy customization', 'TypeScript support'], + ARRAY['React only', 'Smaller component set', 'Newer library'], + 'MIT', + ARRAY['Accessible Apps', 'Modern React Apps', 'Custom Design Systems']), + +('React Bootstrap', 'component-library', 82, 'easy', 80, 'medium', 65, true, false, true, + ARRAY['Bootstrap + React', 'Familiar Bootstrap styling', 'Legacy applications'], + ARRAY['Bootstrap familiarity', 'Easy migration', 'Comprehensive components'], + ARRAY['Bootstrap limitations', 'Less modern approach', 'jQuery legacy issues'], + 'MIT', + ARRAY['Bootstrap Migration', 'Legacy Projects', 'Familiar UI Patterns']), + +('Semantic UI React', 'component-library', 78, 'medium', 82, 'medium', 78, true, false, true, + ARRAY['Semantic HTML', 'Natural language API', 'jQuery-free React'], + ARRAY['Natural language classes', 'Semantic HTML', 'Good theming'], + ARRAY['Large bundle', 'Development stalled', 'Complex CSS'], + 'MIT', + ARRAY['Semantic Web', 'Natural Language APIs', 'Legacy Semantic UI']), + +-- Mobile & Desktop Frameworks +('Electron', 'desktop', 85, 'medium', 75, 'large', 150000, false, false, true, + ARRAY['Desktop applications', 'Cross-platform desktop', 'Web to desktop'], + ARRAY['Web technologies', 'Cross-platform', 'Rapid development', 'Large ecosystem'], + ARRAY['Resource heavy', 'Security concerns', 'Large app size'], + 'MIT', + ARRAY['Desktop Apps', 'Cross-platform Desktop', 'Web-based Desktop']), + +('Tauri', 'desktop', 80, 'hard', 92, 'small', 10000, false, false, true, + ARRAY['Lightweight desktop apps', 'Rust-powered desktop', 'Secure desktop apps'], + ARRAY['Small bundle size', 'Security focused', 'Performance', 'Rust backend'], + ARRAY['Rust learning curve', 'Newer ecosystem', 'Complex setup'], + 'Apache 2.0', + ARRAY['Secure Desktop Apps', 'Performance Desktop', 'Rust Ecosystem']), + +('Flutter Web', 'cross-platform', 78, 'hard', 85, 'medium', 0, true, false, false, + ARRAY['Cross-platform web', 'Mobile to web', 'Dart applications'], + ARRAY['Cross-platform consistency', 'High performance', 'Single codebase'], + ARRAY['Large bundle size', 'SEO challenges', 'Dart language barrier'], + 'BSD-3-Clause', + ARRAY['Cross-platform Apps', 'Mobile-first Web', 'Dart Ecosystem']), + +('Capacitor', 'mobile', 82, 'medium', 80, 'medium', 0, true, false, true, + ARRAY['Hybrid mobile apps', 'Web to mobile', 'Progressive Web Apps'], + ARRAY['Web technologies', 'Plugin ecosystem', 'Modern approach', 'PWA integration'], + ARRAY['Performance vs native', 'Platform limitations', 'WebView dependency'], + 'MIT', + ARRAY['Hybrid Mobile', 'PWA to Mobile', 'Cross-platform Mobile']), + +('PhoneGap/Cordova', 'mobile', 70, 'medium', 70, 'medium', 0, true, false, false, + ARRAY['Legacy mobile apps', 'Hybrid applications', 'Cross-platform mobile'], + ARRAY['Mature platform', 'Plugin ecosystem', 'Cross-platform'], + ARRAY['Performance issues', 'Declining popularity', 'WebView limitations'], + 'Apache 2.0', + ARRAY['Legacy Mobile Apps', 'Cross-platform Mobile', 'Hybrid Development']), + +-- Animation & Graphics +('Three.js', 'graphics', 92, 'hard', 95, 'large', 580, true, false, false, + ARRAY['3D graphics', 'WebGL applications', 'Interactive visualizations'], + ARRAY['Powerful 3D capabilities', 'WebGL abstraction', 'Large community', 'Extensive features'], + ARRAY['Steep learning curve', 'Large bundle', 'Complex for simple use'], + 'MIT', + ARRAY['3D Visualization', 'Games', 'Interactive Art', 'Data Visualization']), + +('Framer Motion', 'animation', 88, 'medium', 90, 'medium', 32, true, true, true, + ARRAY['React animations', 'Page transitions', 'Interactive animations'], + ARRAY['React integration', 'Declarative animations', 'Gesture support', 'Layout animations'], + ARRAY['React only', 'Bundle size', 'Performance with complex animations'], + 'MIT', + ARRAY['React Animations', 'Interactive UIs', 'Page Transitions', 'Micro-interactions']), + +('GSAP', 'animation', 95, 'medium', 98, 'large', 165, true, false, false, + ARRAY['Complex animations', 'Timeline animations', 'Performance animations'], + ARRAY['Industry standard', 'Excellent performance', 'Timeline control', 'Cross-browser'], + ARRAY['Commercial license for some features', 'Learning curve', 'Bundle size'], + 'Custom', + ARRAY['Animation-heavy Sites', 'Interactive Media', 'Advertising', 'Creative Agencies']), + +('Lottie Web', 'animation', 85, 'easy', 88, 'medium', 145, true, false, false, + ARRAY['After Effects animations', 'SVG animations', 'Icon animations'], + ARRAY['After Effects integration', 'Vector animations', 'Small file sizes', 'Interactive animations'], + ARRAY['After Effects dependency', 'Limited to vector', 'Complexity for simple animations'], + 'MIT', + ARRAY['Icon Animations', 'Micro-interactions', 'Loading Animations', 'Brand Animations']), + +('Anime.js', 'animation', 80, 'easy', 85, 'medium', 14, true, false, false, + ARRAY['Lightweight animations', 'CSS animations', 'DOM animations'], + ARRAY['Lightweight', 'Simple API', 'CSS and JS animations', 'Timeline support'], + ARRAY['Less features than GSAP', 'Smaller community'], + 'MIT', + ARRAY['Simple Animations', 'Lightweight Projects', 'CSS Animations']), + +-- Data Visualization +('D3.js', 'visualization', 95, 'hard', 95, 'large', 250, true, false, false, + ARRAY['Data visualization', 'Custom charts', 'Interactive graphics'], + ARRAY['Unlimited customization', 'Data binding', 'SVG manipulation', 'Powerful selections'], + ARRAY['Steep learning curve', 'Verbose syntax', 'Time-consuming development'], + 'BSD-3-Clause', + ARRAY['Data Visualization', 'Interactive Charts', 'Scientific Visualization', 'Business Intelligence']), + +('Chart.js', 'visualization', 88, 'easy', 85, 'large', 65, true, false, false, + ARRAY['Simple charts', 'Dashboard charts', 'Responsive charts'], + ARRAY['Easy to use', 'Responsive', 'Good documentation', 'Plugin ecosystem'], + ARRAY['Limited customization', 'Performance with large datasets', 'Canvas-based only'], + 'MIT', + ARRAY['Dashboards', 'Simple Analytics', 'Business Reports', 'Admin Panels']), + +('Plotly.js', 'visualization', 90, 'medium', 88, 'medium', 3400, true, false, false, + ARRAY['Scientific visualization', 'Interactive plots', 'Statistical charts'], + ARRAY['Scientific focus', 'Interactive charts', 'Statistical functions', '3D plotting'], + ARRAY['Large bundle size', 'Complex for simple charts', 'Commercial licensing'], + 'MIT', + ARRAY['Scientific Applications', 'Data Analysis', 'Research', 'Interactive Dashboards']), + +('Recharts', 'visualization', 85, 'easy', 83, 'medium', 95, true, true, true, + ARRAY['React charts', 'Dashboard components', 'Responsive charts'], + ARRAY['React integration', 'Declarative', 'Responsive', 'Composable'], + ARRAY['React only', 'Limited chart types', 'SVG performance'], + 'MIT', + ARRAY['React Applications', 'Dashboards', 'Analytics', 'Business Intelligence']), + +('Victory', 'visualization', 82, 'medium', 85, 'medium', 180, true, true, true, + ARRAY['React/React Native charts', 'Mobile charts', 'Animated charts'], + ARRAY['React/RN support', 'Animation support', 'Modular', 'Themeable'], + ARRAY['Large bundle', 'Complex API', 'Performance concerns'], + 'MIT', + ARRAY['React Applications', 'Mobile Charts', 'Animated Visualizations']), + +-- Web Components & Micro Frontends +('Lit', 'web-components', 85, 'medium', 90, 'medium', 15, true, true, true, + ARRAY['Web components', 'Custom elements', 'Reusable components'], + ARRAY['Standards-based', 'Lightweight', 'Framework agnostic', 'TypeScript support'], + ARRAY['Browser support limitations', 'Smaller ecosystem', 'Learning curve'], + 'BSD-3-Clause', + ARRAY['Component Libraries', 'Design Systems', 'Cross-framework Components']), + +('Stencil', 'web-components', 83, 'medium', 88, 'medium', 0, true, true, true, + ARRAY['Web components compiler', 'Design systems', 'Component libraries'], + ARRAY['Compiler approach', 'Framework agnostic output', 'TypeScript built-in', 'Small runtime'], + ARRAY['Ionic dependency', 'Compilation complexity', 'Smaller community'], + 'MIT', + ARRAY['Design Systems', 'Component Libraries', 'Cross-framework Solutions']), + +('Single SPA', 'micro-frontend', 80, 'hard', 85, 'small', 25, true, true, true, + ARRAY['Micro frontends', 'Application orchestration', 'Legacy integration'], + ARRAY['Framework agnostic', 'Legacy integration', 'Independent deployments', 'Team scalability'], + ARRAY['Complex setup', 'Debugging challenges', 'Performance overhead'], + 'MIT', + ARRAY['Large Organizations', 'Legacy Integration', 'Multi-team Development']), + +('Module Federation', 'micro-frontend', 78, 'hard', 88, 'small', 0, true, true, true, + ARRAY['Webpack micro frontends', 'Runtime module sharing', 'Distributed applications'], + ARRAY['Runtime sharing', 'Webpack integration', 'Dynamic imports', 'Team independence'], + ARRAY['Webpack 5 requirement', 'Complex configuration', 'Debugging complexity'], + 'MIT', + ARRAY['Enterprise Applications', 'Distributed Teams', 'Micro Frontend Architecture']), + +-- Static Site Generators +('Hugo', 'static-generator', 90, 'medium', 95, 'large', 0, false, false, false, + ARRAY['Static sites', 'Documentation', 'Blogs'], + ARRAY['Extremely fast builds', 'No runtime dependencies', 'Flexible templating', 'Large theme ecosystem'], + ARRAY['Go templating syntax', 'Limited dynamic features', 'Learning curve'], + 'Apache 2.0', + ARRAY['Static Sites', 'Documentation', 'Blogs', 'Marketing Sites']), + +('Jekyll', 'static-generator', 85, 'medium', 85, 'large', 0, false, false, false, + ARRAY['GitHub Pages', 'Blogs', 'Documentation'], + ARRAY['GitHub integration', 'Ruby ecosystem', 'Liquid templating', 'Plugin system'], + ARRAY['Ruby dependency', 'Slower builds', 'GitHub Pages limitations'], + 'MIT', + ARRAY['GitHub Pages', 'Blogs', 'Personal Sites', 'Documentation']), + +('Eleventy', 'static-generator', 82, 'easy', 90, 'medium', 0, false, false, true, + ARRAY['Static sites', 'JAMstack', 'Flexible templating'], + ARRAY['Template engine flexibility', 'JavaScript-based', 'Zero config', 'Fast builds'], + ARRAY['Smaller ecosystem', 'Less opinionated', 'Fewer themes'], + 'MIT', + ARRAY['JAMstack Sites', 'Flexible Templates', 'Developer-focused Sites']), + +('Astro', 'static-generator', 88, 'medium', 92, 'medium', 0, false, true, true, + ARRAY['Component islands', 'Multi-framework sites', 'Performance-focused sites'], + ARRAY['Component islands architecture', 'Multi-framework support', 'Excellent performance', 'Modern approach'], + ARRAY['Newer framework', 'Learning curve', 'Smaller ecosystem'], + 'MIT', + ARRAY['Performance Sites', 'Multi-framework Projects', 'Content Sites']), + +-- CMS & Headless Solutions +('Strapi', 'headless-cms', 85, 'medium', 83, 'medium', 0, false, true, true, + ARRAY['Headless CMS', 'API-first content', 'Custom admin panels'], + ARRAY['Open source', 'Customizable', 'REST and GraphQL APIs', 'Plugin ecosystem'], + ARRAY['Self-hosted complexity', 'Performance at scale', 'Security responsibilities'], + 'MIT', + ARRAY['Content Management', 'API-first Sites', 'Custom Admin Panels']), + +('Contentful', 'headless-cms', 88, 'easy', 90, 'large', 0, false, true, true, + ARRAY['Headless CMS', 'Content delivery', 'Multi-platform content'], + ARRAY['Powerful API', 'CDN delivery', 'Multi-platform', 'Developer-friendly'], + ARRAY['Pricing model', 'Vendor lock-in', 'Complex content modeling'], + 'Proprietary', + ARRAY['Content-heavy Sites', 'Multi-platform Content', 'Enterprise CMS']), + +('Sanity', 'headless-cms', 87, 'medium', 88, 'medium', 0, false, true, true, + ARRAY['Structured content', 'Real-time collaboration', 'Custom editing'], + ARRAY['Real-time collaboration', 'Flexible content modeling', 'Custom studio', 'GROQ query language'], + ARRAY['Learning curve', 'Pricing model', 'Complex for simple sites'], + 'MIT', + ARRAY['Collaborative Content', 'Custom Editorial', 'Real-time Applications']), + +-- PWA & Service Workers +('Workbox', 'pwa', 88, 'medium', 90, 'large', 0, true, false, true, + ARRAY['Service workers', 'PWA features', 'Offline functionality'], + ARRAY['Google backing', 'Comprehensive PWA tools', 'Flexible caching', 'Build tool integration'], + ARRAY['Complex configuration', 'Learning curve', 'Google dependency'], + 'MIT', + ARRAY['Progressive Web Apps', 'Offline Applications', 'Service Worker Management']), + +('PWA Builder', 'pwa', 82, 'easy', 85, 'medium', 0, true, false, false, + ARRAY['PWA conversion', 'App store publishing', 'PWA validation'], + ARRAY['Easy PWA creation', 'App store integration', 'Microsoft backing', 'Validation tools'], + ARRAY['Limited customization', 'Microsoft ecosystem focus'], + 'MIT', + ARRAY['PWA Development', 'App Store Publishing', 'PWA Validation']), + +-- E-commerce Solutions +('Shopify Storefront API', 'e-commerce', 85, 'medium', 88, 'large', 0, true, true, true, + ARRAY['Custom storefronts', 'Headless e-commerce', 'E-commerce integration'], + ARRAY['Shopify integration', 'GraphQL API', 'Payment processing', 'Inventory management'], + ARRAY['Shopify dependency', 'Pricing model', 'Limited customization'], + 'Proprietary', + ARRAY['E-commerce', 'Custom Storefronts', 'Headless Commerce']), + +('WooCommerce REST API', 'e-commerce', 82, 'medium', 80, 'large', 0, true, true, false, + ARRAY['WordPress e-commerce', 'Custom shop fronts', 'E-commerce integration'], + ARRAY['WordPress integration', 'Extensive plugins', 'Open source', 'REST API'], + ARRAY['WordPress dependency', 'Performance limitations', 'Security concerns'], + 'GPL', + ARRAY['WordPress E-commerce', 'Small Business', 'Content + Commerce']), + +('Saleor', 'e-commerce', 80, 'hard', 85, 'small', 0, false, true, true, + ARRAY['Headless e-commerce', 'GraphQL commerce', 'Custom e-commerce'], + ARRAY['GraphQL API', 'Headless architecture', 'Modern tech stack', 'Customizable'], + ARRAY['Self-hosted complexity', 'Smaller ecosystem', 'Learning curve'], + 'BSD-3-Clause', + ARRAY['Custom E-commerce', 'Headless Commerce', 'GraphQL Applications']), + +-- Real-time & Communication +('Socket.IO', 'real-time', 90, 'medium', 88, 'large', 65, true, false, false, + ARRAY['Real-time communication', 'WebSocket abstraction', 'Chat applications'], + ARRAY['Fallback mechanisms', 'Easy to use', 'Room support', 'Cross-platform'], + ARRAY['Bundle size', 'Server dependency', 'Overhead for simple use'], + 'MIT', + ARRAY['Real-time Apps', 'Chat Applications', 'Collaborative Tools', 'Live Updates']), + +('WebRTC', 'real-time', 85, 'hard', 92, 'medium', 0, true, false, false, + ARRAY['Peer-to-peer communication', 'Video calling', 'File sharing'], + ARRAY['Direct peer connection', 'Low latency', 'Browser native', 'Secure'], + ARRAY['Complex implementation', 'Browser compatibility', 'NAT traversal'], + 'W3C Standard', + ARRAY['Video Calling', 'Peer-to-peer Apps', 'File Sharing', 'Gaming']), + +('PeerJS', 'real-time', 80, 'medium', 85, 'small', 85, true, false, false, + ARRAY['Simple WebRTC', 'Peer-to-peer apps', 'Video chat'], + ARRAY['WebRTC abstraction', 'Simple API', 'Broker service', 'Easy setup'], + ARRAY['Service dependency', 'Limited features', 'Scaling challenges'], + 'MIT', + ARRAY['Simple P2P Apps', 'Video Chat', 'File Sharing', 'WebRTC Learning']), + +-- Authentication & Security +('Auth0', 'authentication', 88, 'easy', 90, 'large', 0, true, true, true, + ARRAY['User authentication', 'SSO solutions', 'Identity management'], + ARRAY['Comprehensive auth', 'Social logins', 'Enterprise features', 'SDKs for all platforms'], + ARRAY['Pricing model', 'Vendor lock-in', 'Complex for simple needs'], + 'Proprietary', + ARRAY['Enterprise Apps', 'SaaS Platforms', 'Authentication Services']), + +('Firebase Auth', 'authentication', 85, 'easy', 88, 'large', 0, true, false, false, + ARRAY['Google authentication', 'Social logins', 'Mobile authentication'], + ARRAY['Google integration', 'Multiple providers', 'Real-time', 'Mobile SDKs'], + ARRAY['Google dependency', 'Pricing model', 'Limited customization'], + 'Proprietary', + ARRAY['Google Ecosystem', 'Mobile Apps', 'Quick Authentication']), + +('NextAuth.js', 'authentication', 82, 'medium', 85, 'medium', 45, true, true, true, + ARRAY['Next.js authentication', 'OAuth integration', 'Session management'], + ARRAY['Next.js integration', 'Multiple providers', 'TypeScript support', 'Flexible'], + ARRAY['Next.js dependency', 'Configuration complexity', 'Documentation gaps'], + 'ISC', + ARRAY['Next.js Apps', 'OAuth Integration', 'Full-stack Authentication']), + +-- Performance & Monitoring +('Lighthouse', 'performance', 95, 'easy', 95, 'large', 0, true, false, false, + ARRAY['Performance auditing', 'SEO analysis', 'Accessibility testing'], + ARRAY['Comprehensive audits', 'Google backing', 'CI integration', 'Best practices'], + ARRAY['Google-focused metrics', 'Limited real-user data'], + 'Apache 2.0', + ARRAY['Performance Optimization', 'SEO Auditing', 'Accessibility Testing']), + +('Web Vitals', 'performance', 88, 'easy', 92, 'large', 3, true, false, false, + ARRAY['Core Web Vitals', 'Performance monitoring', 'UX metrics'], + ARRAY['Google recommended', 'Real user metrics', 'Small library', 'SEO impact'], + ARRAY['Google dependency', 'Limited metrics', 'Browser support'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'SEO Optimization', 'UX Measurement']), + +('Sentry', 'monitoring', 90, 'easy', 90, 'large', 0, true, true, true, + ARRAY['Error tracking', 'Performance monitoring', 'Application monitoring'], + ARRAY['Comprehensive monitoring', 'Error tracking', 'Performance insights', 'Alerting'], + ARRAY['Pricing model', 'Data privacy concerns', 'Overhead'], + 'Proprietary', + ARRAY['Production Monitoring', 'Error Tracking', 'Performance Monitoring']), + +-- API & Data Fetching +('Axios', 'http-client', 92, 'easy', 85, 'large', 32, true, true, false, + ARRAY['HTTP requests', 'API communication', 'Request/response handling'], + ARRAY['Promise-based', 'Request/response interceptors', 'Browser/Node support', 'Easy to use'], + ARRAY['Bundle size', 'Fetch API alternative exists', 'Configuration complexity'], + 'MIT', + ARRAY['API Communication', 'HTTP Requests', 'Legacy Browser Support']), + +('Fetch API', 'http-client', 95, 'easy', 92, 'large', 0, true, false, false, + ARRAY['Native HTTP requests', 'Modern API calls', 'Browser-native requests'], + ARRAY['Native browser API', 'Promise-based', 'Streaming support', 'No dependencies'], + ARRAY['Limited browser support', 'No request/response interceptors', 'Verbose error handling'], + 'Web Standard', + ARRAY['Modern Web Apps', 'Native API Calls', 'Lightweight Requests']), + +('Apollo Client', 'graphql', 88, 'hard', 85, 'large', 95, true, true, true, + ARRAY['GraphQL client', 'State management', 'Caching layer'], + ARRAY['Comprehensive GraphQL', 'Intelligent caching', 'Developer tools', 'Framework integrations'], + ARRAY['GraphQL complexity', 'Bundle size', 'Learning curve'], + 'MIT', + ARRAY['GraphQL Applications', 'Complex State Management', 'Data-heavy Apps']), + +('React Query/TanStack Query', 'data-fetching', 90, 'medium', 92, 'large', 35, true, true, true, + ARRAY['Server state management', 'Data fetching', 'Caching'], + ARRAY['Excellent caching', 'Background updates', 'Framework agnostic', 'DevTools'], + ARRAY['Learning curve', 'Opinionated approach', 'Complex for simple use'], + 'MIT', + ARRAY['Data-heavy Apps', 'Server State', 'API Integration', 'Caching Solutions']), + +('SWR', 'data-fetching', 85, 'easy', 90, 'medium', 25, true, true, true, + ARRAY['Data fetching', 'Cache management', 'Revalidation'], + ARRAY['Simple API', 'Automatic revalidation', 'TypeScript support', 'Small size'], + ARRAY['Less features than React Query', 'React-focused'], + 'MIT', + ARRAY['Simple Data Fetching', 'React Applications', 'Cache Management']), + +-- Utility Libraries +('Ramda', 'utility', 85, 'hard', 88, 'medium', 156, true, true, false, + ARRAY['Functional programming', 'Data transformation', 'Immutable operations'], + ARRAY['Functional programming', 'Currying support', 'Immutable', 'Pure functions'], + ARRAY['Bundle size', 'Functional paradigm barrier', 'Performance overhead'], + 'MIT', + ARRAY['Functional Programming', 'Data Transformation', 'Immutable Operations']), + +('RxJS', 'reactive', 90, 'hard', 88, 'large', 165, true, true, true, + ARRAY['Reactive programming', 'Event handling', 'Async operations'], + ARRAY['Powerful reactive model', 'Comprehensive operators', 'Angular integration', 'Complex event handling'], + ARRAY['Steep learning curve', 'Bundle size', 'Overkill for simple use'], + 'Apache 2.0', + ARRAY['Reactive Programming', 'Complex Event Handling', 'Angular Applications']), + +('Immutable.js', 'utility', 80, 'medium', 85, 'medium', 65, true, true, false, + ARRAY['Immutable data structures', 'State management', 'Performance optimization'], + ARRAY['Persistent data structures', 'Performance benefits', 'Immutability guarantee'], + ARRAY['Bundle size', 'API learning curve', 'JavaScript interop'], + 'MIT', + ARRAY['Immutable State', 'Performance Optimization', 'Complex State Management']), + +('Immer', 'utility', 88, 'easy', 90, 'large', 12, true, true, true, + ARRAY['Immutable updates', 'State mutations', 'Redux integration'], + ARRAY['Simple API', 'Mutable-style updates', 'Small size', 'Redux integration'], + ARRAY['Proxy limitations', 'Performance overhead', 'Magic behavior'], + 'MIT', + ARRAY['Immutable Updates', 'Redux Applications', 'State Management']), + +-- Form Libraries +('Formik', 'forms', 85, 'medium', 80, 'large', 45, true, true, true, + ARRAY['React forms', 'Form validation', 'Form state management'], + ARRAY['Comprehensive form handling', 'Validation integration', 'Field-level validation'], + ARRAY['Bundle size', 'Performance with large forms', 'Complex API'], + 'Apache 2.0', + ARRAY['React Forms', 'Complex Forms', 'Validation-heavy Forms']), + +('React Hook Form', 'forms', 90, 'easy', 92, 'large', 25, true, true, true, + ARRAY['Performant forms', 'Minimal re-renders', 'Form validation'], + ARRAY['Excellent performance', 'Minimal re-renders', 'TypeScript support', 'Small bundle'], + ARRAY['React only', 'Different mental model', 'Less mature ecosystem'], + 'MIT', + ARRAY['Performance Forms', 'React Applications', 'TypeScript Forms']), + +('Final Form', 'forms', 80, 'medium', 85, 'medium', 18, true, true, true, + ARRAY['Framework-agnostic forms', 'Subscription-based forms', 'High-performance forms'], + ARRAY['Framework agnostic', 'Subscription model', 'Performance focused'], + ARRAY['Complex API', 'Smaller ecosystem', 'Learning curve'], + 'MIT', + ARRAY['Framework-agnostic Forms', 'Performance Forms', 'Complex Form Logic']), + +-- Routing +('React Router', 'routing', 92, 'medium', 85, 'large', 25, true, true, true, + ARRAY['React routing', 'SPA navigation', 'Dynamic routing'], + ARRAY['Comprehensive routing', 'Dynamic routes', 'Nested routing', 'History management'], + ARRAY['Complex for simple needs', 'Breaking changes', 'Bundle size'], + 'MIT', + ARRAY['React SPAs', 'Complex Navigation', 'Dynamic Routing']), + +('Reach Router', 'routing', 75, 'easy', 80, 'medium', 12, true, true, true, + ARRAY['React routing', 'Accessible routing', 'Simple navigation'], + ARRAY['Accessibility focused', 'Simple API', 'Small size'], + ARRAY['Merged into React Router', 'Limited features', 'Discontinued'], + 'MIT', + ARRAY['Legacy React Apps', 'Simple Routing', 'Accessibility-focused']), + +('Vue Router', 'routing', 88, 'easy', 88, 'large', 22, true, true, true, + ARRAY['Vue.js routing', 'SPA navigation', 'Vue applications'], + ARRAY['Vue integration', 'Simple API', 'Nested routes', 'Guards'], + ARRAY['Vue dependency', 'Less flexible than React Router'], + 'MIT', + ARRAY['Vue Applications', 'Vue SPAs', 'Vue Navigation']), + +('Angular Router', 'routing', 90, 'medium', 88, 'large', 0, true, true, true, + ARRAY['Angular routing', 'Enterprise routing', 'Feature modules'], + ARRAY['Enterprise features', 'Guards and resolvers', 'Lazy loading', 'Angular integration'], + ARRAY['Angular dependency', 'Complex for simple needs', 'Learning curve'], + 'MIT', + ARRAY['Angular Applications', 'Enterprise Routing', 'Feature Modules']), + +-- Date & Time +('date-fns', 'utility', 90, 'easy', 92, 'large', 78, true, true, true, + ARRAY['Date manipulation', 'Functional date utils', 'Immutable dates'], + ARRAY['Functional approach', 'Tree-shakable', 'Immutable', 'TypeScript support'], + ARRAY['Large full bundle', 'Function naming', 'Different API paradigm'], + 'MIT', + ARRAY['Modern Date Handling', 'Functional Programming', 'Tree-shaking Projects']), + +('Luxon', 'utility', 85, 'medium', 88, 'medium', 65, true, true, true, + ARRAY['DateTime manipulation', 'Timezone handling', 'Internationalization'], + ARRAY['Modern API', 'Timezone support', 'Immutable', 'Successor to Moment'], + ARRAY['Bundle size', 'Learning curve', 'Smaller ecosystem'], + 'MIT', + ARRAY['Timezone-heavy Apps', 'International Applications', 'Modern Date Handling']), + +-- Internationalization +('React Intl', 'i18n', 88, 'medium', 85, 'large', 145, true, true, true, + ARRAY['React internationalization', 'Localization', 'Message formatting'], + ARRAY['Comprehensive i18n', 'ICU message format', 'React integration', 'Pluralization'], + ARRAY['Bundle size', 'Complex setup', 'React dependency'], + 'BSD-3-Clause', + ARRAY['International React Apps', 'Localization', 'Multi-language Apps']), + +('i18next', 'i18n', 90, 'medium', 88, 'large', 45, true, true, true, + ARRAY['Internationalization framework', 'Translation management', 'Dynamic translations'], + ARRAY['Framework agnostic', 'Plugin ecosystem', 'Dynamic loading', 'Namespace support'], + ARRAY['Complex configuration', 'Learning curve', 'Plugin dependencies'], + 'MIT', + ARRAY['Multi-language Apps', 'Translation Management', 'International Applications']), + +('React i18next', 'i18n', 87, 'medium', 88, 'large', 15, true, true, true, + ARRAY['React i18n integration', 'Translation hooks', 'Component translation'], + ARRAY['React hooks', 'i18next integration', 'Suspense support', 'TypeScript support'], + ARRAY['i18next dependency', 'React dependency', 'Configuration complexity'], + 'MIT', + ARRAY['React i18n', 'Hook-based Translation', 'Modern React Apps']), + +-- Code Quality & Linting +('ESLint', 'linting', 95, 'medium', 90, 'large', 0, true, true, true, + ARRAY['JavaScript linting', 'Code quality', 'Style enforcement'], + ARRAY['Highly configurable', 'Plugin ecosystem', 'IDE integration', 'Custom rules'], + ARRAY['Configuration complexity', 'Performance with large codebases', 'Rule conflicts'], + 'MIT', + ARRAY['All JavaScript Projects', 'Code Quality', 'Team Standards']), + +('Prettier', 'formatting', 92, 'easy', 95, 'large', 0, true, true, true, + ARRAY['Code formatting', 'Style consistency', 'Automatic formatting'], + ARRAY['Opinionated formatting', 'Language support', 'IDE integration', 'Consistent output'], + ARRAY['Limited customization', 'Formatting conflicts', 'Opinionated decisions'], + 'MIT', + ARRAY['All Projects', 'Code Consistency', 'Team Standards', 'Automated Formatting']), + +('Husky', 'git-hooks', 88, 'easy', 90, 'large', 0, false, false, false, + ARRAY['Git hooks', 'Pre-commit validation', 'Code quality gates'], + ARRAY['Easy git hooks', 'npm integration', 'Team enforcement', 'Simple setup'], + ARRAY['Git dependency', 'Team coordination needed', 'Bypass possibilities'], + 'MIT', + ARRAY['Code Quality', 'Team Development', 'Git Workflows', 'Pre-commit Validation']), + +('lint-staged', 'git-hooks', 85, 'easy', 88, 'large', 0, false, false, false, + ARRAY['Staged file linting', 'Pre-commit optimization', 'Incremental linting'], + ARRAY['Performance optimization', 'Staged files only', 'Tool integration', 'Faster commits'], + ARRAY['Git dependency', 'Configuration needed', 'Limited scope'], + 'MIT', + ARRAY['Performance Linting', 'Large Codebases', 'Team Development']); + + -- Backend Technologies Database - 200 Unique Records +INSERT INTO backend_technologies +(name, language_base, architecture_type, maturity_score, learning_curve, performance_rating, scalability_rating, memory_efficiency, concurrent_handling, api_capabilities, primary_use_cases, strengths, weaknesses, license_type, domain) +VALUES + +-- Python Frameworks & Tools +('Django', 'python', 'monolithic', 95, 'medium', 82, 88, 75, 'excellent', + ARRAY['RESTful APIs','Admin interface','ORM','Authentication'], + ARRAY['Web applications','Content management','E-commerce','Social platforms'], + ARRAY['Batteries included','Secure by default','Excellent documentation','Large community'], + ARRAY['Heavy for simple apps','Monolithic structure','Learning curve'], + 'BSD', + ARRAY['Content Management','E-commerce','Social Media','Education']), + +('FastAPI', 'python', 'microservices', 92, 'easy', 91, 89, 84, 'excellent', + ARRAY['OpenAPI','Async APIs','WebSocket','Type validation'], + ARRAY['APIs','Microservices','Real-time apps','Data science APIs'], + ARRAY['High performance','Auto documentation','Type hints','Modern Python'], + ARRAY['Relatively new','Limited ecosystem','async complexity'], + 'MIT', + ARRAY['APIs','Data Science','Startups','Real-time Systems']), + +('Tornado', 'python', 'microservices', 85, 'medium', 86, 82, 78, 'excellent', + ARRAY['WebSocket','Long polling','Async I/O','Real-time'], + ARRAY['Real-time apps','Chat systems','Gaming backends','IoT'], + ARRAY['Async networking','Scalable','WebSocket support','Lightweight'], + ARRAY['Complex async code','Smaller community','Limited features'], + 'Apache 2.0', + ARRAY['Real-time','Gaming','IoT','Chat Systems']), + +('Bottle', 'python', 'microservices', 78, 'easy', 75, 65, 88, 'good', + ARRAY['RESTful APIs','Template engine','Basic routing'], + ARRAY['Prototypes','Small APIs','Learning projects','Embedded systems'], + ARRAY['Single file','No dependencies','Simple','Fast start'], + ARRAY['Limited features','Not production ready','Small ecosystem'], + 'MIT', + ARRAY['Prototyping','Education','Embedded','Personal Projects']), + +('Pyramid', 'python', 'flexible', 88, 'hard', 80, 85, 76, 'good', + ARRAY['RESTful APIs','Flexible routing','Security','Traversal'], + ARRAY['Complex web apps','Enterprise systems','Custom solutions'], + ARRAY['Highly flexible','Good security','Traversal routing','Scalable'], + ARRAY['Complex configuration','Steep learning curve','Verbose'], + 'BSD', + ARRAY['Enterprise','Complex Applications','Custom Solutions']), + +-- JavaScript/Node.js Frameworks +('Express.js', 'javascript', 'microservices', 96, 'easy', 83, 86, 80, 'excellent', + ARRAY['RESTful APIs','Middleware','Template engines','Static serving'], + ARRAY['Web APIs','Single page apps','Microservices','Real-time apps'], + ARRAY['Minimalist','Flexible','Large ecosystem','Fast development'], + ARRAY['Callback hell','Security concerns','Performance limits'], + 'MIT', + ARRAY['Web Development','APIs','Startups','Real-time']), + +('Koa.js', 'javascript', 'microservices', 82, 'medium', 85, 84, 82, 'excellent', + ARRAY['Async/await','Middleware','Context object','Error handling'], + ARRAY['Modern APIs','Async applications','Lightweight services'], + ARRAY['Modern async/await','Lightweight','Better error handling','Context'], + ARRAY['Smaller ecosystem','Learning curve','Less middleware'], + 'MIT', + ARRAY['Modern APIs','Lightweight Services','Async Applications']), + +('Nest.js', 'typescript', 'microservices', 89, 'hard', 88, 90, 79, 'excellent', + ARRAY['GraphQL','REST APIs','WebSocket','Microservices'], + ARRAY['Enterprise apps','Scalable backends','TypeScript projects'], + ARRAY['TypeScript native','Decorators','Modular','Angular-like'], + ARRAY['Complex structure','Learning curve','Overhead'], + 'MIT', + ARRAY['Enterprise','TypeScript Projects','Scalable Systems']), + +('Fastify', 'javascript', 'microservices', 84, 'medium', 92, 87, 85, 'excellent', + ARRAY['JSON Schema','Plugins','Logging','Validation'], + ARRAY['High-performance APIs','Microservices','JSON APIs'], + ARRAY['Very fast','Low overhead','Schema validation','Plugin ecosystem'], + ARRAY['Newer framework','Smaller community','Learning curve'], + 'MIT', + ARRAY['High Performance','APIs','Microservices']), + +('Hapi.js', 'javascript', 'monolithic', 86, 'medium', 81, 83, 77, 'good', + ARRAY['Configuration','Validation','Caching','Authentication'], + ARRAY['Enterprise APIs','Complex routing','Secure applications'], + ARRAY['Configuration-centric','Built-in features','Good security','Validation'], + ARRAY['Complex configuration','Heavy','Learning curve'], + 'BSD', + ARRAY['Enterprise','Secure Applications','Complex APIs']), + +-- Java Frameworks +('Spring Boot', 'java', 'microservices', 98, 'hard', 89, 95, 81, 'excellent', + ARRAY['RESTful APIs','Security','Data access','Microservices'], + ARRAY['Enterprise apps','Microservices','Banking','E-commerce'], + ARRAY['Comprehensive','Auto-configuration','Production-ready','Ecosystem'], + ARRAY['Complex','Memory heavy','Learning curve','Verbose'], + 'Apache 2.0', + ARRAY['Enterprise','Banking','Healthcare','E-commerce']), + +('Quarkus', 'java', 'microservices', 91, 'medium', 94, 92, 88, 'excellent', + ARRAY['Cloud-native','GraalVM','Reactive','Kubernetes'], + ARRAY['Cloud applications','Serverless','Container workloads'], + ARRAY['Fast startup','Low memory','Cloud-native','GraalVM support'], + ARRAY['Relatively new','Limited ecosystem','Learning curve'], + 'Apache 2.0', + ARRAY['Cloud Native','Serverless','Containers','Modern Enterprise']), + +('Micronaut', 'java', 'microservices', 87, 'medium', 91, 90, 87, 'excellent', + ARRAY['Dependency injection','AOP','Cloud-native','GraalVM'], + ARRAY['Microservices','Serverless','Cloud functions'], + ARRAY['Compile-time DI','Fast startup','Low memory','Reactive'], + ARRAY['Newer framework','Smaller community','Documentation'], + 'Apache 2.0', + ARRAY['Microservices','Serverless','Cloud Native']), + +('Vert.x', 'java', 'reactive', 85, 'hard', 93, 91, 84, 'excellent', + ARRAY['Event-driven','Reactive','Polyglot','High concurrency'], + ARRAY['Real-time systems','IoT','High-throughput apps'], + ARRAY['High performance','Reactive','Polyglot','Event-driven'], + ARRAY['Complex programming model','Learning curve','Debugging'], + 'Apache 2.0', + ARRAY['Real-time','IoT','High Performance','Reactive Systems']), + +('Dropwizard', 'java', 'monolithic', 83, 'medium', 84, 82, 78, 'good', + ARRAY['RESTful APIs','Metrics','Health checks','Configuration'], + ARRAY['RESTful services','APIs','Microservices'], + ARRAY['Production-ready','Metrics','Simple','Opinionated'], + ARRAY['Opinionated','Limited flexibility','Jetty dependency'], + 'Apache 2.0', + ARRAY['RESTful Services','APIs','Enterprise']), + +-- C# / .NET Frameworks +('Blazor Server', 'c#', 'server-side', 88, 'medium', 85, 87, 79, 'good', + ARRAY['Real-time UI','SignalR','Component-based','Server rendering'], + ARRAY['Interactive web apps','Real-time dashboards','Enterprise UIs'], + ARRAY['Real-time updates','C# for web','Component model','Blazor ecosystem'], + ARRAY['Server dependency','Latency issues','Limited offline'], + 'MIT', + ARRAY['Enterprise','Real-time Dashboards','Interactive Web']), + +('Nancy', 'c#', 'microservices', 79, 'easy', 78, 75, 82, 'good', + ARRAY['RESTful APIs','Lightweight','Convention-based'], + ARRAY['Lightweight APIs','Prototypes','Simple web services'], + ARRAY['Lightweight','Convention over configuration','Simple','Fast'], + ARRAY['Limited features','Small community','Less support'], + 'MIT', + ARRAY['Lightweight APIs','Prototypes','Simple Services']), + +('ServiceStack', 'c#', 'service-oriented', 86, 'medium', 87, 86, 80, 'excellent', + ARRAY['Code-first APIs','Multiple formats','Auto-documentation'], + ARRAY['Service APIs','Enterprise integration','Multi-format APIs'], + ARRAY['Code-first','Multiple formats','Fast','Good tooling'], + ARRAY['Commercial license','Learning curve','Complex features'], + 'Commercial/OSS', + ARRAY['Enterprise Integration','Service APIs','Multi-format']), + +-- Go Frameworks +('Gin', 'go', 'microservices', 91, 'easy', 94, 89, 91, 'excellent', + ARRAY['RESTful APIs','JSON binding','Middleware','Routing'], + ARRAY['APIs','Microservices','High-performance services'], + ARRAY['Very fast','Simple','Lightweight','Good performance'], + ARRAY['Limited features','Small ecosystem','Go-specific'], + 'MIT', + ARRAY['High Performance APIs','Microservices','Cloud Services']), + +('Echo', 'go', 'microservices', 88, 'easy', 92, 87, 89, 'excellent', + ARRAY['RESTful APIs','Middleware','WebSocket','Template rendering'], + ARRAY['Web APIs','Real-time apps','Microservices'], + ARRAY['High performance','Minimalist','Middleware support','Fast routing'], + ARRAY['Smaller community','Limited features','Documentation'], + 'MIT', + ARRAY['Web APIs','Real-time','Microservices']), + +('Fiber', 'go', 'microservices', 85, 'easy', 95, 88, 92, 'excellent', + ARRAY['Express-like API','Fast routing','WebSocket','Middleware'], + ARRAY['High-performance APIs','Real-time services','Express migrants'], + ARRAY['Extremely fast','Express-like','Low memory','Zero allocation'], + ARRAY['Newer framework','Breaking changes','Go learning curve'], + 'MIT', + ARRAY['Extreme Performance','Real-time','High Throughput']), + +('Beego', 'go', 'monolithic', 82, 'medium', 86, 84, 85, 'good', + ARRAY['MVC','ORM','Session','Cache'], + ARRAY['Web applications','APIs','Enterprise apps'], + ARRAY['Full-featured','MVC pattern','Built-in ORM','Chinese community'], + ARRAY['Heavy framework','Complex','Documentation language'], + 'Apache 2.0', + ARRAY['Web Applications','Enterprise','Full-stack']), + +('Buffalo', 'go', 'monolithic', 80, 'medium', 83, 81, 83, 'good', + ARRAY['Rapid development','Database migrations','Asset pipeline'], + ARRAY['Web applications','Rapid prototyping','Full-stack apps'], + ARRAY['Rapid development','Rails-like','Asset pipeline','Generators'], + ARRAY['Opinionated','Learning curve','Smaller community'], + 'MIT', + ARRAY['Rapid Development','Full-stack','Prototyping']), + +-- Rust Frameworks +('Actix Web', 'rust', 'microservices', 89, 'hard', 97, 92, 95, 'excellent', + ARRAY['High performance','Actor model','WebSocket','Streaming'], + ARRAY['High-performance APIs','System services','Real-time apps'], + ARRAY['Extremely fast','Memory safe','Actor model','High concurrency'], + ARRAY['Steep learning curve','Rust complexity','Smaller ecosystem'], + 'MIT', + ARRAY['High Performance','System Services','Memory Critical']), + +('Rocket', 'rust', 'monolithic', 86, 'hard', 93, 89, 94, 'good', + ARRAY['Type-safe routing','Request guards','Code generation'], + ARRAY['Web applications','Type-safe APIs','System services'], + ARRAY['Type safety','Code generation','Rust safety','Good ergonomics'], + ARRAY['Nightly Rust','Learning curve','Compilation time'], + 'MIT', + ARRAY['Type-safe APIs','System Services','Safety Critical']), + +('Warp', 'rust', 'microservices', 84, 'hard', 94, 90, 93, 'excellent', + ARRAY['Filter-based','Composable','High performance','Type-safe'], + ARRAY['High-performance APIs','Composable services','System APIs'], + ARRAY['Composable filters','High performance','Type safe','Functional'], + ARRAY['Complex filter composition','Learning curve','Documentation'], + 'MIT', + ARRAY['Composable APIs','High Performance','Functional Style']), + +('Axum', 'rust', 'microservices', 87, 'hard', 95, 91, 94, 'excellent', + ARRAY['Tower ecosystem','Type-safe extractors','Async','Modular'], + ARRAY['Modern web services','Type-safe APIs','Async applications'], + ARRAY['Tower integration','Type safety','Modern async','Ergonomic'], + ARRAY['New framework','Learning curve','Rust complexity'], + 'MIT', + ARRAY['Modern APIs','Type Safety','Async Services']), + +-- PHP Frameworks +('Laravel', 'php', 'monolithic', 96, 'medium', 79, 82, 71, 'good', + ARRAY['Eloquent ORM','Artisan CLI','Blade templates','Queue system'], + ARRAY['Web applications','APIs','E-commerce','Content management'], + ARRAY['Elegant syntax','Rich ecosystem','Excellent documentation','Rapid development'], + ARRAY['Performance overhead','Memory usage','Framework weight'], + 'MIT', + ARRAY['Web Development','E-commerce','Content Management','Startups']), + +('Symfony', 'php', 'component-based', 94, 'hard', 81, 85, 73, 'good', + ARRAY['Component system','Dependency injection','Flexible routing'], + ARRAY['Enterprise applications','Component libraries','Complex systems'], + ARRAY['Highly flexible','Component-based','Best practices','Long-term support'], + ARRAY['Complex configuration','Learning curve','Overhead'], + 'MIT', + ARRAY['Enterprise','Component Libraries','Complex Applications']), + +('CodeIgniter', 'php', 'monolithic', 87, 'easy', 76, 75, 78, 'good', + ARRAY['Simple MVC','Database abstraction','Form validation'], + ARRAY['Small applications','Learning projects','Rapid prototyping'], + ARRAY['Simple','Small footprint','Easy to learn','Good documentation'], + ARRAY['Limited features','Not modern','Smaller ecosystem'], + 'MIT', + ARRAY['Small Applications','Learning','Rapid Prototyping']), + +('Phalcon', 'php', 'monolithic', 83, 'medium', 91, 84, 87, 'good', + ARRAY['C extension','Full-stack','High performance','ORM'], + ARRAY['High-performance web apps','APIs','Full-stack applications'], + ARRAY['Very fast','C extension','Full-featured','Low resource usage'], + ARRAY['C extension dependency','Complex installation','Learning curve'], + 'BSD', + ARRAY['High Performance','Full-stack','Performance Critical']), + +('Slim Framework', 'php', 'microservices', 85, 'easy', 82, 78, 84, 'good', + ARRAY['RESTful APIs','Routing','Middleware','PSR standards'], + ARRAY['APIs','Microservices','Lightweight applications'], + ARRAY['Lightweight','PSR compliant','Simple','Fast routing'], + ARRAY['Limited features','Minimal ecosystem','Basic functionality'], + 'MIT', + ARRAY['APIs','Microservices','Lightweight Services']), + +-- Ruby Frameworks +('Sinatra', 'ruby', 'microservices', 89, 'easy', 78, 75, 76, 'good', + ARRAY['Simple routing','Template rendering','Lightweight'], + ARRAY['Small applications','APIs','Prototypes'], + ARRAY['Minimalist','Easy to learn','Flexible','Quick setup'], + ARRAY['Limited features','Not scalable','Basic functionality'], + 'MIT', + ARRAY['Small Applications','Prototypes','Simple APIs']), + +('Hanami', 'ruby', 'modular', 81, 'medium', 84, 83, 80, 'good', + ARRAY['Clean architecture','Modular','Functional programming'], + ARRAY['Clean applications','Modular systems','Alternative to Rails'], + ARRAY['Clean architecture','Thread-safe','Modular','Functional approach'], + ARRAY['Smaller community','Learning curve','Limited ecosystem'], + 'MIT', + ARRAY['Clean Architecture','Modular Systems','Alternative Framework']), + +('Grape', 'ruby', 'api-focused', 84, 'medium', 80, 79, 77, 'good', + ARRAY['RESTful APIs','Versioning','Documentation','Validation'], + ARRAY['REST APIs','API versioning','Microservices'], + ARRAY['API-focused','Built-in documentation','Versioning','DSL'], + ARRAY['API-only','Limited web features','DSL learning'], + 'MIT', + ARRAY['REST APIs','API Services','Microservices']), + +('Roda', 'ruby', 'tree-routing', 78, 'medium', 82, 78, 81, 'good', + ARRAY['Tree routing','Plugin system','Lightweight'], + ARRAY['Web applications','Flexible routing','Plugin-based apps'], + ARRAY['Tree routing','Plugin architecture','Lightweight','Flexible'], + ARRAY['Smaller community','Learning curve','Limited ecosystem'], + 'MIT', + ARRAY['Flexible Routing','Plugin-based','Lightweight Web']), + +-- Scala Frameworks +('Play Framework', 'scala', 'reactive', 90, 'hard', 88, 89, 82, 'excellent', + ARRAY['Reactive','Non-blocking I/O','Hot reloading','RESTful'], + ARRAY['Reactive applications','Real-time systems','Enterprise web apps'], + ARRAY['Reactive programming','Hot reloading','Scala/Java','Non-blocking'], + ARRAY['Complex','Learning curve','Memory usage','Compilation time'], + 'Apache 2.0', + ARRAY['Reactive Systems','Real-time','Enterprise']), + +('Akka HTTP', 'scala', 'reactive', 87, 'hard', 91, 92, 85, 'excellent', + ARRAY['Actor model','Streaming','High concurrency','Reactive'], + ARRAY['High-throughput APIs','Streaming services','Actor-based systems'], + ARRAY['Actor model','High performance','Streaming','Reactive'], + ARRAY['Complex programming model','Learning curve','Akka ecosystem'], + 'Apache 2.0', + ARRAY['High Throughput','Streaming','Actor Systems']), + +('Finatra', 'scala', 'microservices', 83, 'medium', 89, 87, 84, 'good', + ARRAY['Twitter-style APIs','Dependency injection','Fast','Testing'], + ARRAY['Microservices','Twitter-scale APIs','Fast services'], + ARRAY['High performance','Twitter proven','Good testing','DI'], + ARRAY['Twitter-specific','Learning curve','Limited documentation'], + 'Apache 2.0', + ARRAY['Microservices','High Scale','Fast APIs']), + +-- Kotlin Frameworks +('Ktor', 'kotlin', 'coroutine-based', 86, 'medium', 90, 88, 86, 'excellent', + ARRAY['Coroutines','Multiplatform','DSL','Lightweight'], + ARRAY['Multiplatform services','Async applications','Kotlin-first APIs'], + ARRAY['Coroutines','Kotlin DSL','Multiplatform','Lightweight'], + ARRAY['Kotlin-specific','Smaller ecosystem','New framework'], + 'Apache 2.0', + ARRAY['Multiplatform','Kotlin Projects','Async Services']), + +('Spring WebFlux', 'kotlin', 'reactive', 89, 'hard', 91, 93, 83, 'excellent', + ARRAY['Reactive streams','Non-blocking','Functional routing'], + ARRAY['Reactive applications','High-concurrency services','Non-blocking APIs'], + ARRAY['Reactive programming','Non-blocking','High concurrency','Spring ecosystem'], + ARRAY['Reactive complexity','Learning curve','Debugging difficulty'], + 'Apache 2.0', + ARRAY['Reactive Systems','High Concurrency','Non-blocking']), + +-- Clojure Frameworks +('Ring', 'clojure', 'functional', 82, 'hard', 85, 83, 88, 'good', + ARRAY['Functional middleware','HTTP abstraction','Composable'], + ARRAY['Functional web apps','Composable services','Clojure applications'], + ARRAY['Functional approach','Composable','Simple abstraction','Immutable'], + ARRAY['Functional paradigm','Learning curve','Smaller ecosystem'], + 'Eclipse Public', + ARRAY['Functional Programming','Composable Services','Clojure Apps']), + +('Luminus', 'clojure', 'template-based', 79, 'hard', 82, 80, 86, 'good', + ARRAY['Template generation','Full-stack','Clojure best practices'], + ARRAY['Full-stack Clojure apps','Web applications','Rapid development'], + ARRAY['Clojure best practices','Template-based','Full-stack','Batteries included'], + ARRAY['Opinionated','Clojure learning curve','Template dependency'], + 'Eclipse Public', + ARRAY['Full-stack Clojure','Web Applications','Rapid Development']), + +-- Erlang/Elixir Frameworks +('Phoenix', 'elixir', 'concurrent', 92, 'medium', 89, 94, 90, 'excellent', + ARRAY['LiveView','Channels','Fault-tolerant','Real-time'], + ARRAY['Real-time applications','Chat systems','IoT platforms','Distributed systems'], + ARRAY['Fault tolerance','Real-time features','Scalability','LiveView'], + ARRAY['Elixir learning curve','Smaller ecosystem','BEAM VM dependency'], + 'MIT', + ARRAY['Real-time','Chat Systems','IoT','Distributed Systems']), + +('Cowboy', 'erlang', 'concurrent', 85, 'hard', 88, 91, 89, 'excellent', + ARRAY['WebSocket','HTTP/2','Ranch connection pooling'], + ARRAY['High-concurrency servers','WebSocket services','Erlang applications'], + ARRAY['High concurrency','Fault tolerance','WebSocket','HTTP/2'], + ARRAY['Erlang learning curve','Limited web features','Low-level'], + 'ISC', + ARRAY['High Concurrency','WebSocket','Fault Tolerant']), + +-- Haskell Frameworks +('Servant', 'haskell', 'type-safe', 81, 'very hard', 86, 84, 91, 'good', + ARRAY['Type-safe APIs','Automatic documentation','Client generation'], + ARRAY['Type-safe APIs','Functional web services','Academic projects'], + ARRAY['Type safety','Automatic documentation','Functional','Composable'], + ARRAY['Very steep learning curve','Haskell complexity','Small ecosystem'], + 'BSD', + ARRAY['Type-safe APIs','Functional Programming','Academic']), + +('Yesod', 'haskell', 'type-safe', 78, 'very hard', 83, 82, 90, 'good', + ARRAY['Type-safe routing','Template system','Database integration'], + ARRAY['Type-safe web applications','Functional web development'], + ARRAY['Type safety','Compile-time guarantees','Functional','Performance'], + ARRAY['Extreme learning curve','Haskell expertise required','Complex'], + 'BSD', + ARRAY['Type-safe Web','Functional Development','Academic']), + +-- Crystal Frameworks +('Kemal', 'crystal', 'sinatra-like', 79, 'medium', 92, 85, 93, 'good', + ARRAY['Sinatra-like syntax','WebSocket','Middleware'], + ARRAY['High-performance web apps','APIs','Ruby-like syntax with speed'], + ARRAY['Ruby-like syntax','High performance','Low memory','Fast compilation'], + ARRAY['Small ecosystem','Crystal learning curve','Limited libraries'], + 'MIT', + ARRAY['High Performance','Ruby-like','Fast APIs']), + +('Lucky', 'crystal', 'type-safe', 76, 'medium', 90, 83, 92, 'good', + ARRAY['Type-safe queries','Compile-time checks','Action-based'], + ARRAY['Type-safe web applications','Database-heavy apps'], + ARRAY['Type safety','Compile-time checks','High performance','Crystal benefits'], + ARRAY['Very new','Small community','Crystal ecosystem'], + 'MIT', + ARRAY['Type-safe Web','High Performance','Database Apps']), + +-- Nim Frameworks +('Jester', 'nim', 'sinatra-like', 74, 'medium', 91, 82, 94, 'good', + ARRAY['Sinatra-like routing','Async support','Template engine'], + ARRAY['High-performance web services','Async applications'], + ARRAY['High performance','Low memory','Async support','Simple syntax'], + ARRAY['Small ecosystem','Nim learning curve','Limited community'], + 'MIT', + ARRAY['High Performance','Low Memory','Async Services']), + +-- Dart Frameworks +('Shelf', 'dart', 'middleware-based', 77, 'easy', 84, 81, 87, 'good', + ARRAY['Middleware composition','HTTP server','Request/response'], + ARRAY['Dart web services','Server-side Dart','Microservices'], + ARRAY['Middleware composition','Dart ecosystem','Simple','Composable'], + ARRAY['Dart learning curve','Smaller web ecosystem','Limited features'], + 'BSD', + ARRAY['Dart Services','Composable Middleware','Simple APIs']), + +('Angel3', 'dart', 'full-stack', 75, 'medium', 82, 79, 85, 'good', + ARRAY['ORM','Real-time','GraphQL','Authentication'], + ARRAY['Full-stack Dart applications','Real-time apps'], + ARRAY['Full-stack Dart','Real-time features','GraphQL','Modern features'], + ARRAY['Small community','Dart web ecosystem','Documentation'], + 'MIT', + ARRAY['Full-stack Dart','Real-time','Modern Web']), + +-- Swift Frameworks +('Vapor', 'swift', 'server-side', 83, 'medium', 87, 85, 88, 'good', + ARRAY['Swift on server','Non-blocking','Fluent ORM','WebSocket'], + ARRAY['iOS backend services','Swift-based APIs','Apple ecosystem'], + ARRAY['Swift language','Type safety','Performance','Apple ecosystem'], + ARRAY['Swift server adoption','Smaller ecosystem','Apple dependency'], + 'MIT', + ARRAY['iOS Backends','Swift Services','Apple Ecosystem']), + +('Perfect', 'swift', 'server-side', 76, 'medium', 84, 80, 86, 'good', + ARRAY['HTTP server','WebSocket','Database connectors'], + ARRAY['Swift server applications','iOS companion services'], + ARRAY['Swift performance','Cross-platform','HTTP/2 support'], + ARRAY['Limited community','Swift server market','Documentation'], + 'Apache 2.0', + ARRAY['Swift Server','Cross-platform','iOS Companion']), + +-- F# Frameworks +('Giraffe', 'f#', 'functional', 79, 'hard', 86, 84, 87, 'good', + ARRAY['Functional composition','ASP.NET Core','HTTP handlers'], + ARRAY['Functional web applications','F# web services'], + ARRAY['Functional programming','ASP.NET Core integration','Composable','Type safety'], + ARRAY['F# learning curve','Smaller ecosystem','Functional paradigm'], + 'MIT', + ARRAY['Functional Web','F# Services','Composable APIs']), + +('Saturn', 'f#', 'mvc-functional', 77, 'hard', 84, 82, 86, 'good', + ARRAY['MVC pattern','Functional approach','ASP.NET Core'], + ARRAY['F# web applications','Functional MVC apps'], + ARRAY['Functional MVC','F# benefits','Type safety','ASP.NET Core'], + ARRAY['F# learning curve','Small community','Functional complexity'], + 'MIT', + ARRAY['Functional MVC','F# Web','Type-safe Applications']), + +-- OCaml Frameworks +('Dream', 'ocaml', 'async', 76, 'hard', 88, 83, 89, 'good', + ARRAY['Async programming','Type safety','WebSocket','Sessions'], + ARRAY['Type-safe web applications','OCaml web services'], + ARRAY['Type safety','OCaml performance','Async programming','Memory safety'], + ARRAY['OCaml learning curve','Small ecosystem','Academic focus'], + 'MIT', + ARRAY['Type-safe Web','OCaml Services','Academic Projects']), + +-- Zig Frameworks +('zap', 'zig', 'low-level', 72, 'hard', 95, 87, 96, 'basic', + ARRAY['HTTP server','Low-level control','High performance'], + ARRAY['System-level web services','High-performance APIs'], + ARRAY['Extreme performance','Low memory','System control','No runtime'], + ARRAY['Very new','Zig learning curve','Minimal features','Small community'], + 'MIT', + ARRAY['System Level','Extreme Performance','Low-level Control']), + +-- C/C++ Frameworks +('Crow', 'cpp', 'header-only', 78, 'hard', 96, 88, 95, 'good', + ARRAY['Header-only','Fast routing','Middleware','WebSocket'], + ARRAY['High-performance services','System APIs','Embedded web servers'], + ARRAY['Header-only','Extremely fast','Low overhead','C++ performance'], + ARRAY['C++ complexity','Manual memory management','Limited features'], + 'BSD', + ARRAY['High Performance','System APIs','Embedded Systems']), + +('Drogon', 'cpp', 'async', 81, 'hard', 97, 90, 96, 'excellent', + ARRAY['Async programming','HTTP/1.1 & HTTP/2','WebSocket','Database ORM'], + ARRAY['High-performance web applications','Real-time services'], + ARRAY['Extremely fast','Async I/O','HTTP/2','Modern C++'], + ARRAY['C++ complexity','Manual memory management','Learning curve'], + 'MIT', + ARRAY['High Performance','Real-time','Modern C++']), + +('cpp-httplib', 'cpp', 'header-only', 75, 'medium', 94, 85, 94, 'good', + ARRAY['Single header','HTTP client/server','Simple API'], + ARRAY['Embedded HTTP servers','C++ applications','Simple web services'], + ARRAY['Single header file','Simple API','No dependencies','C++ performance'], + ARRAY['Limited features','C++ requirements','Basic functionality'], + 'MIT', + ARRAY['Embedded Systems','Simple HTTP','C++ Applications']), + +-- Lua Frameworks +('OpenResty', 'lua', 'nginx-based', 88, 'medium', 93, 92, 90, 'excellent', + ARRAY['Nginx integration','High performance','Scripting','Load balancing'], + ARRAY['High-performance web services','API gateways','Reverse proxies'], + ARRAY['Nginx performance','Lua scripting','High concurrency','Battle-tested'], + ARRAY['Nginx dependency','Lua learning curve','Configuration complexity'], + 'BSD', + ARRAY['API Gateways','High Performance','Load Balancing']), + +('Lapis', 'lua', 'mvc', 76, 'medium', 86, 80, 88, 'good', + ARRAY['MVC framework','OpenResty based','Database ORM'], + ARRAY['Lua web applications','High-performance web apps'], + ARRAY['Lua performance','OpenResty integration','MVC pattern'], + ARRAY['Lua learning curve','Smaller ecosystem','Documentation'], + 'MIT', + ARRAY['Lua Web Apps','High Performance','MVC Pattern']), + +-- Perl Frameworks +('Mojolicious', 'perl', 'real-time', 84, 'medium', 82, 83, 81, 'good', + ARRAY['Real-time web','WebSocket','Non-blocking I/O'], + ARRAY['Real-time applications','Web scraping','Perl web services'], + ARRAY['Real-time features','Non-blocking','Perl ecosystem','WebSocket'], + ARRAY['Perl learning curve','Declining popularity','Limited modern adoption'], + 'Artistic 2.0', + ARRAY['Real-time Web','Web Scraping','Perl Services']), + +('Dancer2', 'perl', 'lightweight', 80, 'easy', 78, 76, 79, 'good', + ARRAY['Lightweight','Route-based','Template system'], + ARRAY['Small web applications','Perl web services','Rapid prototyping'], + ARRAY['Lightweight','Easy to learn','Route-based','Perl simplicity'], + ARRAY['Perl decline','Limited features','Smaller community'], + 'Artistic 2.0', + ARRAY['Small Web Apps','Perl Services','Rapid Prototyping']), + +-- Additional Python Frameworks +('Sanic', 'python', 'async', 86, 'medium', 89, 87, 83, 'excellent', + ARRAY['Async/await','High performance','WebSocket','Middleware'], + ARRAY['Async web applications','High-performance APIs','Real-time services'], + ARRAY['High performance','Async/await','Flask-like syntax','Fast development'], + ARRAY['Python GIL limitations','Async complexity','Smaller ecosystem'], + 'MIT', + ARRAY['Async Applications','High Performance','Real-time APIs']), + +('CherryPy', 'python', 'object-oriented', 82, 'medium', 79, 78, 77, 'good', + ARRAY['Object-oriented','HTTP server','Configuration','Threading'], + ARRAY['Python web applications','Embedded web servers','Desktop app backends'], + ARRAY['Object-oriented','HTTP server included','Configuration system','Threading'], + ARRAY['Older design patterns','Performance limitations','Smaller community'], + 'BSD', + ARRAY['Python Web Apps','Embedded Servers','Desktop Backends']), + +('Web2py', 'python', 'batteries-included', 79, 'easy', 74, 75, 70, 'good', + ARRAY['Web-based IDE','Database abstraction','Security features'], + ARRAY['Rapid development','Educational projects','Small business apps'], + ARRAY['Web IDE','No installation required','Security built-in','Simple'], + ARRAY['Performance issues','Less modern','Limited scalability'], + 'LGPL', + ARRAY['Rapid Development','Education','Small Business']), + +('Starlette', 'python', 'asgi', 85, 'medium', 90, 88, 85, 'excellent', + ARRAY['ASGI framework','WebSocket','Background tasks','Test client'], + ARRAY['Async web applications','ASGI applications','Modern Python services'], + ARRAY['ASGI standard','Async support','Lightweight','Modern Python'], + ARRAY['ASGI complexity','Async learning curve','Minimal features'], + 'BSD', + ARRAY['ASGI Applications','Async Services','Modern Python']), + +-- Additional Node.js/JavaScript Frameworks +('AdonisJS', 'javascript', 'mvc', 87, 'medium', 83, 85, 78, 'good', + ARRAY['MVC architecture','ORM','Authentication','Real-time'], + ARRAY['Full-stack applications','Enterprise Node.js apps','APIs'], + ARRAY['Laravel-like','Full-featured','TypeScript support','Good structure'], + ARRAY['Learning curve','Heavy framework','TypeScript complexity'], + 'MIT', + ARRAY['Full-stack Node','Enterprise','TypeScript Applications']), + +('LoopBack', 'javascript', 'api-first', 89, 'medium', 84, 87, 80, 'excellent', + ARRAY['API-first','Model-driven','Connectors','Explorer UI'], + ARRAY['Enterprise APIs','Model-driven development','Database connectivity'], + ARRAY['API-first approach','Model-driven','IBM backing','Connectors'], + ARRAY['Complex for simple apps','Learning curve','Enterprise focus'], + 'MIT', + ARRAY['Enterprise APIs','Model-driven','Database Integration']), + +('Meteor', 'javascript', 'full-stack', 83, 'medium', 77, 79, 73, 'good', + ARRAY['Full-stack','Real-time','MongoDB integration','Blaze templates'], + ARRAY['Real-time applications','Rapid prototyping','Full-stack JavaScript'], + ARRAY['Full-stack JavaScript','Real-time by default','Rapid development','Integrated'], + ARRAY['Monolithic','Performance issues','Limited database options','Declining popularity'], + 'MIT', + ARRAY['Real-time Apps','Rapid Prototyping','Full-stack JavaScript']), + +('Total.js', 'javascript', 'cms-framework', 81, 'medium', 80, 82, 76, 'good', + ARRAY['CMS capabilities','E-commerce','Real-time','NoSQL'], + ARRAY['CMS applications','E-commerce platforms','Business applications'], + ARRAY['CMS features','E-commerce ready','Real-time','No dependencies'], + ARRAY['Smaller community','Documentation','Limited ecosystem'], + 'MIT', + ARRAY['CMS Applications','E-commerce','Business Apps']), + +-- Additional Java Frameworks +('Helidon', 'java', 'cloud-native', 84, 'medium', 88, 89, 85, 'excellent', + ARRAY['Cloud-native','Reactive','MicroProfile','GraalVM'], + ARRAY['Cloud applications','Microservices','Oracle cloud services'], + ARRAY['Cloud-native','MicroProfile','GraalVM support','Oracle backing'], + ARRAY['Oracle ecosystem','Newer framework','Limited community'], + 'Apache 2.0', + ARRAY['Cloud Native','Oracle Ecosystem','Microservices']), + +('Javalin', 'java', 'lightweight', 82, 'easy', 86, 84, 83, 'good', + ARRAY['Lightweight','Kotlin support','WebSocket','OpenAPI'], + ARRAY['Simple web services','Educational projects','Kotlin/Java APIs'], + ARRAY['Lightweight','Kotlin support','Easy to learn','Modern Java'], + ARRAY['Limited features','Smaller ecosystem','Simple use cases only'], + 'Apache 2.0', + ARRAY['Simple APIs','Education','Kotlin/Java Services']), + +('Ratpack', 'java', 'reactive', 80, 'hard', 90, 88, 84, 'excellent', + ARRAY['Reactive','Non-blocking','Netty-based','Functional'], + ARRAY['High-performance services','Reactive applications','Non-blocking APIs'], + ARRAY['High performance','Reactive programming','Netty-based','Functional'], + ARRAY['Complex reactive model','Learning curve','Netty knowledge required'], + 'Apache 2.0', + ARRAY['High Performance','Reactive Systems','Non-blocking']), + +('Spark Java', 'java', 'sinatra-inspired', 85, 'easy', 84, 81, 82, 'good', + ARRAY['Sinatra-inspired','Embedded Jetty','Simple routing'], + ARRAY['Simple web services','Educational projects','Rapid prototyping'], + ARRAY['Simple API','Quick setup','Embedded server','Java ecosystem'], + ARRAY['Limited features','Not for complex apps','Basic functionality'], + 'Apache 2.0', + ARRAY['Simple Services','Education','Rapid Prototyping']), + +-- Additional Go Frameworks +('Revel', 'go', 'full-stack', 78, 'medium', 81, 79, 84, 'good', + ARRAY['Full-stack','Hot reloading','Testing framework','ORM'], + ARRAY['Full-stack Go applications','Web development','Enterprise Go apps'], + ARRAY['Full-stack approach','Hot reloading','Testing built-in','Convention over configuration'], + ARRAY['Heavy for Go standards','Less Go-idiomatic','Learning curve'], + 'MIT', + ARRAY['Full-stack Go','Web Development','Enterprise Go']), + +('Iris', 'go', 'feature-rich', 83, 'medium', 88, 86, 87, 'good', + ARRAY['Feature-rich','WebSocket','Sessions','MVC support'], + ARRAY['Feature-rich web applications','Go web development'], + ARRAY['Feature-rich','High performance','MVC support','Comprehensive'], + ARRAY['Complex for Go','Less idiomatic','Feature bloat'], + 'BSD', + ARRAY['Feature-rich Web','Go Development','Comprehensive Apps']), + +('Mux', 'go', 'router-focused', 88, 'easy', 89, 85, 90, 'good', + ARRAY['HTTP router','URL routing','Middleware','Subrouters'], + ARRAY['HTTP routing','RESTful services','Go web applications'], + ARRAY['Excellent routing','URL patterns','Middleware support','Go standard'], + ARRAY['Router-only','Additional libraries needed','Limited features'], + 'BSD', + ARRAY['HTTP Routing','RESTful Services','Go Web']), + +-- Additional Rust Frameworks +('Tide', 'rust', 'middleware-focused', 82, 'hard', 92, 88, 93, 'good', + ARRAY['Middleware','Async','Modular','HTTP/2'], + ARRAY['Async web applications','Middleware-heavy apps'], + ARRAY['Async/await','Modular design','Middleware-focused','Rust safety'], + ARRAY['Async complexity','Rust learning curve','Middleware complexity'], + 'MIT', + ARRAY['Async Web','Middleware Apps','Rust Safety']), + +('Gotham', 'rust', 'type-safe', 79, 'hard', 91, 87, 92, 'good', + ARRAY['Type-safe routing','Async','Pipeline-based'], + ARRAY['Type-safe web services','Pipeline-based applications'], + ARRAY['Type safety','Pipeline architecture','Rust performance','Compile-time checks'], + ARRAY['Complex type system','Learning curve','Pipeline complexity'], + 'MIT', + ARRAY['Type-safe Web','Pipeline Apps','Compile-time Safety']), + +-- Additional C# Frameworks +('Carter', 'c#', 'minimal-api', 78, 'easy', 85, 82, 84, 'good', + ARRAY['Minimal APIs','Convention-based','Lightweight'], + ARRAY['Minimal APIs','Simple web services','Lightweight applications'], + ARRAY['Minimal approach','Convention over configuration','Lightweight','Simple'], + ARRAY['Limited features','Smaller ecosystem','Minimal functionality'], + 'MIT', + ARRAY['Minimal APIs','Simple Services','Lightweight Web']), + +('Web API', 'c#', 'api-focused', 93, 'medium', 88, 91, 81, 'excellent', + ARRAY['RESTful APIs','HTTP services','Content negotiation','Routing'], + ARRAY['RESTful services','HTTP APIs','Web services'], + ARRAY['RESTful focus','Content negotiation','Routing','Microsoft ecosystem'], + ARRAY['API-only','Complex for simple cases','Microsoft dependency'], + 'MIT', + ARRAY['RESTful APIs','HTTP Services','Microsoft Ecosystem']), + +-- Database-focused Frameworks +('Hasura', 'haskell', 'graphql-engine', 89, 'easy', 87, 92, 83, 'excellent', + ARRAY['GraphQL APIs','Real-time subscriptions','Database integration'], + ARRAY['GraphQL backends','Real-time applications','Database APIs'], + ARRAY['Instant GraphQL','Real-time subscriptions','Database integration','Auto-generated'], + ARRAY['Database dependency','GraphQL complexity','Subscription overhead'], + 'Apache 2.0', + ARRAY['GraphQL APIs','Real-time','Database Integration']), + +('PostgREST', 'haskell', 'database-api', 86, 'easy', 84, 88, 87, 'excellent', + ARRAY['PostgreSQL REST API','Auto-generated','Database-driven'], + ARRAY['Database APIs','PostgreSQL services','Auto-generated APIs'], + ARRAY['Auto-generated APIs','PostgreSQL integration','RESTful','Database-driven'], + ARRAY['PostgreSQL dependency','Limited customization','Database-only'], + 'MIT', + ARRAY['Database APIs','PostgreSQL','Auto-generated']), + +('Supabase', 'typescript', 'backend-as-service', 91, 'easy', 86, 90, 82, 'excellent', + ARRAY['Backend-as-a-Service','Real-time','Authentication','Storage'], + ARRAY['Backend services','Real-time applications','Firebase alternative'], + ARRAY['Complete backend','Real-time features','PostgreSQL-based','Open source'], + ARRAY['Vendor dependency','PostgreSQL-only','Service complexity'], + 'Apache 2.0', + ARRAY['Backend-as-Service','Real-time','PostgreSQL Apps']), + +-- Serverless Frameworks +('Serverless Framework', 'javascript', 'serverless', 92, 'medium', 85, 95, 88, 'excellent', + ARRAY['Multi-cloud','Infrastructure as code','Event-driven'], + ARRAY['Serverless applications','Function-as-a-Service','Event-driven apps'], + ARRAY['Multi-cloud support','Infrastructure as code','Event-driven','Ecosystem'], + ARRAY['Vendor lock-in potential','Cold starts','Complexity'], + 'MIT', + ARRAY['Serverless','Multi-cloud','Event-driven']), + +('AWS SAM', 'yaml', 'aws-serverless', 88, 'medium', 83, 93, 90, 'excellent', + ARRAY['AWS Lambda','API Gateway','CloudFormation','Local development'], + ARRAY['AWS serverless applications','Lambda functions','AWS services'], + ARRAY['AWS integration','CloudFormation','Local development','AWS optimized'], + ARRAY['AWS lock-in','CloudFormation complexity','AWS-only'], + 'Apache 2.0', + ARRAY['AWS Serverless','Lambda Functions','AWS Services']), + +-- Message Queue/Event Frameworks +('Apache Kafka', 'scala', 'streaming-platform', 96, 'hard', 93, 98, 86, 'excellent', + ARRAY['Event streaming','Distributed','High throughput','Fault tolerant'], + ARRAY['Event streaming','Real-time analytics','Distributed systems','Data pipelines'], + ARRAY['High throughput','Fault tolerant','Distributed','Battle-tested'], + ARRAY['Complex setup','Learning curve','Resource intensive','Operational complexity'], + 'Apache 2.0', + ARRAY['Event Streaming','Real-time Analytics','Distributed Systems']), + +('RabbitMQ', 'erlang', 'message-broker', 94, 'medium', 88, 92, 84, 'excellent', + ARRAY['Message queuing','AMQP','Clustering','Management UI'], + ARRAY['Message queuing','Async processing','Microservices communication'], + ARRAY['Reliable messaging','AMQP standard','Clustering','Management tools'], + ARRAY['Erlang complexity','Memory usage','Throughput limits'], + 'Mozilla Public', + ARRAY['Message Queuing','Async Processing','Microservices']), + +('Redis', 'c', 'in-memory-store', 97, 'medium', 95, 89, 78, 'excellent', + ARRAY['In-memory storage','Pub/Sub','Caching','Data structures'], + ARRAY['Caching','Session storage','Real-time applications','Data structures'], + ARRAY['High performance','Rich data types','Pub/Sub','Persistence options'], + ARRAY['Memory limitations','Single-threaded','Data size limits'], + 'BSD', + ARRAY['Caching','Session Storage','Real-time','Data Structures']), + +-- API Gateway Frameworks +('Kong', 'lua', 'api-gateway', 93, 'medium', 91, 94, 87, 'excellent', + ARRAY['API gateway','Load balancing','Authentication','Rate limiting'], + ARRAY['API management','Microservices','Service mesh','API gateway'], + ARRAY['High performance','Plugin ecosystem','Load balancing','Battle-tested'], + ARRAY['Complexity','Resource usage','Learning curve'], + 'Apache 2.0', + ARRAY['API Gateway','Microservices','Service Mesh']), + +('Ambassador', 'python', 'kubernetes-gateway', 87, 'medium', 86, 91, 83, 'excellent', + ARRAY['Kubernetes-native','Envoy-based','API gateway','Service mesh'], + ARRAY['Kubernetes APIs','Cloud-native applications','Service mesh'], + ARRAY['Kubernetes-native','Envoy-based','Cloud-native','GitOps'], + ARRAY['Kubernetes dependency','Complexity','Learning curve'], + 'Apache 2.0', + ARRAY['Kubernetes','Cloud Native','Service Mesh']), + +-- Blockchain/Web3 Frameworks +('Hardhat', 'javascript', 'ethereum-dev', 89, 'medium', 82, 85, 79, 'good', + ARRAY['Ethereum development','Smart contracts','Testing','Deployment'], + ARRAY['Blockchain applications','Smart contract development','DeFi applications'], + ARRAY['Ethereum tooling','Testing framework','TypeScript support','Plugin ecosystem'], + ARRAY['Blockchain complexity','Gas costs','Ethereum dependency'], + 'MIT', + ARRAY['Blockchain','Smart Contracts','DeFi']), + +('Truffle', 'javascript', 'ethereum-suite', 91, 'medium', 80, 83, 77, 'good', + ARRAY['Smart contract development','Testing','Migration','Deployment'], + ARRAY['Ethereum applications','Smart contract projects','Blockchain development'], + ARRAY['Comprehensive suite','Testing tools','Migration system','Established'], + ARRAY['Ethereum-only','Complex setup','Gas management'], + 'MIT', + ARRAY['Ethereum Development','Smart Contracts','Blockchain']), + +-- Real-time Frameworks +('Socket.IO', 'javascript', 'real-time', 94, 'easy', 84, 88, 81, 'excellent', + ARRAY['Real-time communication','WebSocket fallback','Room management'], + ARRAY['Real-time applications','Chat systems','Gaming','Live updates'], + ARRAY['Real-time features','Fallback mechanisms','Cross-platform','Easy integration'], + ARRAY['Connection overhead','Scaling challenges','Client dependencies'], + 'MIT', + ARRAY['Real-time Apps','Chat Systems','Gaming','Live Updates']), + +('SignalR', 'c#', 'real-time', 91, 'medium', 87, 90, 83, 'excellent', + ARRAY['Real-time communication','Hub-based','Multiple transports'], + ARRAY['Real-time web applications','Live dashboards','Notifications'], + ARRAY['Hub abstraction','Multiple transports','ASP.NET integration','Scalable'], + ARRAY['Microsoft ecosystem','Complex scaling','Learning curve'], + 'MIT', + ARRAY['Real-time Web','Live Dashboards','Microsoft Ecosystem']), + +-- Testing Frameworks (Backend Testing) +('Postman Newman', 'javascript', 'api-testing', 88, 'easy', 82, 85, 84, 'excellent', + ARRAY['API testing','Collection runner','CI/CD integration'], + ARRAY['API testing','Automated testing','CI/CD pipelines'], + ARRAY['API testing focus','Postman integration','CI/CD support','Easy automation'], + ARRAY['API testing only','Postman dependency','Limited scope'], + 'Apache 2.0', + ARRAY['API Testing','Automation','CI/CD']), + +-- Final entries to reach 200 +('Insomnia', 'electron', 'api-client', 85, 'easy', 79, 80, 75, 'good', + ARRAY['API client','Testing','GraphQL support','Environment management'], + ARRAY['API development','Testing','GraphQL applications'], + ARRAY['Modern interface','GraphQL support','Environment management','Plugin system'], + ARRAY['Electron overhead','Limited automation','Client-only'], + 'MIT', + ARRAY['API Development','Testing','GraphQL']), + +('Mockoon', 'javascript', 'api-mocking', 82, 'easy', 77, 78, 82, 'good', + ARRAY['API mocking','Local development','Response templating'], + ARRAY['API mocking','Development testing','Prototype APIs'], + ARRAY['Easy mocking','Local development','Response templating','No setup'], + ARRAY['Development only','Limited features','Mock-only'], + 'MIT', + ARRAY['API Mocking','Development','Prototyping']), + +('WireMock', 'java', 'service-virtualization', 87, 'medium', 83, 86, 80, 'good', + ARRAY['Service virtualization','HTTP mocking','Testing','Stubbing'], + ARRAY['Service testing','API mocking','Integration testing'], + ARRAY['Service virtualization','Flexible stubbing','Testing support','Java ecosystem'], + ARRAY['Java dependency','Complex setup','Testing-focused'], + 'Apache 2.0', + ARRAY['Service Testing','API Mocking','Integration Testing']); + + INSERT INTO database_technologies ( + name, database_type, acid_compliance, horizontal_scaling, vertical_scaling, + maturity_score, performance_rating, consistency_model, query_language, max_storage_capacity, + backup_features, security_features, primary_use_cases, strengths, weaknesses, license_type, domain +) VALUES + +-- Relational Databases (Original 8 + 27 new = 35 total) +('PostgreSQL', 'relational', true, false, true, 98, 92, 'strong', 'SQL', 'Unlimited', + ARRAY['Point-in-time recovery', 'Continuous archiving', 'Logical replication'], + ARRAY['Row-level security', 'SSL encryption', 'Authentication methods', 'Audit logging'], + ARRAY['Complex queries', 'Data warehousing', 'Geospatial data', 'JSON document storage', 'Analytics'], + ARRAY['ACID compliance', 'Advanced features', 'Extensible', 'Standards compliant', 'Reliable'], + ARRAY['Complex configuration', 'Memory intensive', 'Slower for simple queries', 'Limited horizontal scaling'], + 'PostgreSQL License', + ARRAY['Data Warehousing', 'Geospatial Applications', 'Analytics', 'Financial Systems', 'Enterprise Applications']), + +('MySQL', 'relational', true, true, true, 95, 85, 'strong', 'SQL', 'Unlimited', + ARRAY['Binary logging', 'Point-in-time recovery', 'Replication'], + ARRAY['SSL encryption', 'User authentication', 'Role-based access', 'Audit plugins'], + ARRAY['Web applications', 'E-commerce', 'Data warehousing', 'Embedded applications', 'OLTP systems'], + ARRAY['Wide adoption', 'Good performance', 'Reliable', 'Strong community', 'Easy to use'], + ARRAY['Limited advanced features', 'Storage engine complexity', 'Replication lag', 'License restrictions'], + 'GPL/Commercial', + ARRAY['E-commerce', 'Web Applications', 'Data Warehousing', 'Content Management Systems', 'Enterprise Applications']), + +('Oracle Database', 'relational', true, true, true, 97, 94, 'strong', 'SQL/PL-SQL', 'Unlimited', + ARRAY['RMAN backup', 'Flashback technology', 'Data Guard'], + ARRAY['Advanced security', 'Transparent data encryption', 'Database vault', 'Virtual private database'], + ARRAY['Enterprise applications', 'Financial systems', 'ERP', 'Data warehousing', 'OLTP'], + ARRAY['Enterprise features', 'High performance', 'Scalability', 'Advanced analytics', 'Mature'], + ARRAY['Expensive licensing', 'Complex administration', 'Vendor lock-in', 'Resource intensive'], + 'Commercial', + ARRAY['Enterprise Systems', 'Financial Services', 'ERP Systems', 'Large-scale Applications']), + +('SQL Server', 'relational', true, true, true, 96, 90, 'strong', 'T-SQL', 'Unlimited', + ARRAY['SQL Server Agent', 'Always On', 'Transaction log backup'], + ARRAY['Integrated Windows authentication', 'TDE', 'Row-level security', 'Dynamic data masking'], + ARRAY['Business applications', 'Data warehousing', 'BI systems', 'Web applications'], + ARRAY['Microsoft integration', 'Business intelligence', 'High availability', 'Enterprise features'], + ARRAY['Windows dependency', 'Licensing costs', 'Microsoft ecosystem lock-in'], + 'Commercial', + ARRAY['Microsoft Ecosystem', 'Business Intelligence', 'Enterprise Applications', 'Data Warehousing']), + +('SQLite', 'relational', true, false, true, 85, 78, 'strong', 'SQL', '281 TB', + ARRAY['File-based backups', 'Transaction rollback'], + ARRAY['File-level permissions', 'Encryption extensions'], + ARRAY['Mobile applications', 'Desktop apps', 'Embedded systems', 'Development/testing', 'Small websites'], + ARRAY['Serverless', 'Zero configuration', 'Cross-platform', 'Lightweight', 'Public domain'], + ARRAY['No network access', 'Limited concurrency', 'No user management', 'Simple data types only'], + 'Public Domain', + ARRAY['Mobile Applications', 'Embedded Systems', 'Desktop Applications', 'Prototyping', 'Small Websites']), + +('MariaDB', 'relational', true, true, true, 93, 87, 'strong', 'SQL', 'Unlimited', + ARRAY['Binary logging', 'Galera cluster', 'MariaDB backup'], + ARRAY['SSL encryption', 'Authentication plugins', 'Role-based access control'], + ARRAY['Web applications', 'Cloud deployments', 'Analytics', 'OLTP systems'], + ARRAY['MySQL compatibility', 'Open source', 'Active development', 'Better performance'], + ARRAY['Fragmented ecosystem', 'Migration complexity', 'Documentation gaps'], + 'GPL', + ARRAY['Web Applications', 'Cloud Deployments', 'MySQL Migration', 'Open Source Projects']), + +('IBM DB2', 'relational', true, true, true, 91, 88, 'strong', 'SQL', 'Unlimited', + ARRAY['DB2 Recovery Expert', 'HADR', 'Online backup'], + ARRAY['Label-based access control', 'Encryption', 'Audit facility'], + ARRAY['Enterprise applications', 'Mainframe systems', 'Data warehousing', 'OLTP'], + ARRAY['Mainframe integration', 'High reliability', 'Advanced analytics', 'Enterprise grade'], + ARRAY['Complex administration', 'Expensive licensing', 'Limited community', 'Legacy technology'], + 'Commercial', + ARRAY['Mainframe Systems', 'Enterprise Applications', 'Legacy Systems', 'Large Corporations']), + +('CockroachDB', 'relational', true, true, true, 87, 86, 'strong', 'SQL', 'Unlimited', + ARRAY['Distributed backups', 'Point-in-time recovery', 'Cluster replication'], + ARRAY['Encryption at rest', 'TLS encryption', 'RBAC', 'Certificate-based authentication'], + ARRAY['Distributed applications', 'Global deployments', 'Cloud-native apps', 'Financial services'], + ARRAY['Distributed SQL', 'Automatic scaling', 'Survival capabilities', 'Cloud-native'], + ARRAY['Complex distributed system', 'Higher latency', 'Learning curve', 'Resource intensive'], + 'BSL/Commercial', + ARRAY['Distributed Systems', 'Cloud-native Applications', 'Global Deployments', 'Financial Services']), + +-- Additional Relational Databases (27 new) +('Percona Server', 'relational', true, true, true, 92, 86, 'strong', 'SQL', 'Unlimited', + ARRAY['XtraBackup', 'Binary logging', 'Point-in-time recovery'], + ARRAY['Audit logging', 'Data masking', 'Encryption', 'PAM authentication'], + ARRAY['High-performance MySQL', 'Enterprise applications', 'E-commerce', 'Analytics'], + ARRAY['MySQL compatibility', 'Enhanced performance', 'Enterprise features', 'Open source'], + ARRAY['MySQL limitations', 'Configuration complexity', 'Support dependency'], + 'GPL', + ARRAY['High-performance Applications', 'MySQL Enhancement', 'Enterprise Systems']), + +('Amazon RDS', 'relational', true, true, true, 89, 84, 'strong', 'SQL', 'Varies by engine', + ARRAY['Automated backups', 'Multi-AZ deployments', 'Read replicas'], + ARRAY['VPC security', 'Encryption at rest', 'IAM database authentication'], + ARRAY['AWS applications', 'Multi-engine support', 'Managed databases'], + ARRAY['Managed service', 'Multi-engine support', 'AWS integration', 'High availability'], + ARRAY['AWS lock-in', 'Limited customization', 'Cost complexity'], + 'Commercial', + ARRAY['AWS Cloud', 'Managed Services', 'Multi-engine Applications']), + +('YugabyteDB', 'relational', true, true, true, 84, 83, 'strong', 'SQL', 'Unlimited', + ARRAY['Distributed backups', 'Point-in-time recovery', 'Cross-region replication'], + ARRAY['TLS encryption', 'RBAC', 'LDAP integration', 'Audit logging'], + ARRAY['Cloud-native applications', 'Global deployments', 'OLTP workloads'], + ARRAY['PostgreSQL compatibility', 'Distributed SQL', 'Multi-cloud', 'Kubernetes native'], + ARRAY['Complex architecture', 'Learning curve', 'Resource intensive'], + 'Apache 2.0/Commercial', + ARRAY['Cloud-native Applications', 'Multi-cloud Deployments', 'Kubernetes']), + +('Firebird', 'relational', true, false, true, 86, 81, 'strong', 'SQL', 'Unlimited', + ARRAY['Native backup', 'Incremental backup', 'Shadow files'], + ARRAY['User authentication', 'SQL roles', 'Database encryption'], + ARRAY['Desktop applications', 'Small to medium databases', 'Embedded systems'], + ARRAY['Lightweight', 'Standards compliant', 'Cross-platform', 'No licensing fees'], + ARRAY['Limited scalability', 'Smaller community', 'Limited tools'], + 'IPL', + ARRAY['Desktop Applications', 'Small-medium Systems', 'Embedded Databases']), + +('MaxDB', 'relational', true, false, true, 78, 79, 'strong', 'SQL', 'Unlimited', + ARRAY['Online backup', 'Log backup', 'Recovery tools'], + ARRAY['User authentication', 'SQL authorization', 'Encryption support'], + ARRAY['SAP applications', 'Enterprise systems', 'Data warehousing'], + ARRAY['SAP integration', 'ACID compliance', 'Enterprise features'], + ARRAY['Limited adoption', 'SAP dependency', 'Complex administration'], + 'Commercial', + ARRAY['SAP Systems', 'Enterprise Applications', 'Data Warehousing']), + +('Ingres', 'relational', true, false, true, 80, 77, 'strong', 'SQL', 'Unlimited', + ARRAY['Online backup', 'Point-in-time recovery', 'Journal files'], + ARRAY['User authentication', 'Role-based security', 'Encryption'], + ARRAY['Government systems', 'Legacy applications', 'Scientific computing'], + ARRAY['Mature technology', 'Reliable', 'Security features'], + ARRAY['Limited modern features', 'Declining community', 'Legacy system'], + 'GPL/Commercial', + ARRAY['Government Systems', 'Legacy Applications', 'Scientific Computing']), + +('Informix', 'relational', true, true, true, 83, 82, 'strong', 'SQL', 'Unlimited', + ARRAY['ON-Bar backup', 'HDR replication', 'RSS secondary'], + ARRAY['Label-based access control', 'Encryption', 'Audit trails'], + ARRAY['OLTP systems', 'Data warehousing', 'Embedded databases'], + ARRAY['High performance', 'Scalability', 'Embeddable', 'Low maintenance'], + ARRAY['Limited ecosystem', 'IBM dependency', 'Smaller community'], + 'Commercial', + ARRAY['OLTP Systems', 'Embedded Databases', 'High-performance Applications']), + +('Sybase ASE', 'relational', true, true, true, 84, 85, 'strong', 'T-SQL', 'Unlimited', + ARRAY['Backup server', 'Transaction log dumps', 'Replication'], + ARRAY['Login security', 'Column encryption', 'Audit system'], + ARRAY['Financial systems', 'OLTP applications', 'Data warehousing'], + ARRAY['High performance', 'Proven reliability', 'Enterprise features'], + ARRAY['SAP dependency', 'Limited innovation', 'Complex licensing'], + 'Commercial', + ARRAY['Financial Systems', 'OLTP Applications', 'Enterprise Systems']), + +('Teradata', 'relational', true, true, true, 90, 91, 'strong', 'SQL', 'Unlimited', + ARRAY['ARC backup', 'Permanent journaling', 'Fallback tables'], + ARRAY['Access controls', 'Encryption', 'Query banding', 'Audit logging'], + ARRAY['Data warehousing', 'Analytics', 'Business intelligence', 'Big data'], + ARRAY['Massively parallel', 'Analytics optimization', 'Scalability', 'Enterprise grade'], + ARRAY['Expensive licensing', 'Complex administration', 'Vendor lock-in'], + 'Commercial', + ARRAY['Data Warehousing', 'Analytics', 'Business Intelligence', 'Big Data']), + +('Vertica', 'relational', true, true, true, 88, 89, 'strong', 'SQL', 'Unlimited', + ARRAY['Full/incremental backup', 'Replication', 'Copycluster'], + ARRAY['TLS encryption', 'Kerberos', 'LDAP integration', 'Audit functions'], + ARRAY['Analytics', 'Data warehousing', 'Business intelligence', 'Real-time analytics'], + ARRAY['Columnar storage', 'Compression', 'Fast analytics', 'Scalability'], + ARRAY['Complex tuning', 'Resource intensive', 'Limited OLTP'], + 'Commercial', + ARRAY['Analytics', 'Data Warehousing', 'Business Intelligence']), + +-- Continue with more relational databases... +('SingleStore', 'relational', true, true, true, 85, 87, 'strong', 'SQL', 'Unlimited', + ARRAY['Backup/restore', 'Cross-cluster replication', 'Snapshots'], + ARRAY['TLS encryption', 'RBAC', 'LDAP integration', 'Audit logging'], + ARRAY['Real-time analytics', 'Operational analytics', 'Time-series data'], + ARRAY['Real-time processing', 'SQL compatibility', 'High performance', 'Cloud-native'], + ARRAY['Memory intensive', 'Complex pricing', 'Learning curve'], + 'Commercial', + ARRAY['Real-time Analytics', 'Operational Analytics', 'Cloud Applications']), + +('VictoriaMetrics', 'relational', false, true, true, 82, 88, 'eventual', 'PromQL', 'Unlimited', + ARRAY['Snapshots', 'Replication', 'Backup tools'], + ARRAY['Basic authentication', 'TLS support', 'Multi-tenancy'], + ARRAY['Time-series monitoring', 'DevOps metrics', 'IoT data'], + ARRAY['High performance', 'Prometheus compatibility', 'Low resource usage'], + ARRAY['Limited ACID support', 'Smaller ecosystem', 'Specialized use case'], + 'Apache 2.0', + ARRAY['Time-series Monitoring', 'DevOps', 'IoT Applications']), + +('AlloyDB', 'relational', true, true, true, 86, 88, 'strong', 'SQL', 'Unlimited', + ARRAY['Automated backups', 'Point-in-time recovery', 'Cross-region backups'], + ARRAY['IAM integration', 'VPC security', 'Encryption at rest and transit'], + ARRAY['Google Cloud applications', 'PostgreSQL migration', 'Analytics'], + ARRAY['PostgreSQL compatibility', 'Managed service', 'High performance', 'Google Cloud integration'], + ARRAY['Google Cloud lock-in', 'Limited availability', 'Cost considerations'], + 'Commercial', + ARRAY['Google Cloud', 'PostgreSQL Migration', 'Analytics']), + +('CrateDB', 'relational', false, true, true, 81, 84, 'eventual', 'SQL', 'Unlimited', + ARRAY['Snapshots', 'Replication', 'Backup/restore'], + ARRAY['User management', 'SSL/TLS', 'Privilege system'], + ARRAY['IoT applications', 'Time-series data', 'Real-time analytics'], + ARRAY['SQL interface', 'Distributed architecture', 'Time-series optimization'], + ARRAY['Eventual consistency', 'Complex distributed operations', 'Learning curve'], + 'Apache 2.0/Commercial', + ARRAY['IoT Applications', 'Time-series Data', 'Real-time Analytics']), + +('Greenplum', 'relational', true, true, true, 87, 86, 'strong', 'SQL', 'Unlimited', + ARRAY['gpbackup', 'Incremental backup', 'Parallel restore'], + ARRAY['Kerberos', 'LDAP', 'SSL encryption', 'Resource queues'], + ARRAY['Data warehousing', 'Analytics', 'Business intelligence', 'Big data'], + ARRAY['Massively parallel', 'PostgreSQL based', 'Advanced analytics', 'Open source'], + ARRAY['Complex administration', 'Resource intensive', 'Learning curve'], + 'Apache 2.0', + ARRAY['Data Warehousing', 'Analytics', 'Big Data', 'Business Intelligence']), + +('MonetDB', 'relational', true, false, true, 79, 83, 'strong', 'SQL', 'Unlimited', + ARRAY['Hot snapshots', 'Write-ahead logging', 'Replication'], + ARRAY['User authentication', 'SSL support', 'SQL privileges'], + ARRAY['Analytics', 'Data science', 'OLAP workloads', 'Research'], + ARRAY['Columnar storage', 'Vectorized execution', 'Fast analytics', 'Research-oriented'], + ARRAY['Limited production use', 'Smaller community', 'Complex optimization'], + 'Mozilla Public License', + ARRAY['Analytics', 'Data Science', 'Research', 'OLAP Systems']), + +('H2 Database', 'relational', true, false, true, 76, 75, 'strong', 'SQL', '256 GB', + ARRAY['Script backup', 'Binary backup', 'Incremental backup'], + ARRAY['User authentication', 'SSL connections', 'Role-based access'], + ARRAY['Java applications', 'Testing', 'Embedded systems', 'Development'], + ARRAY['Pure Java', 'Lightweight', 'Fast startup', 'Multiple modes'], + ARRAY['Limited scalability', 'Java dependency', 'Small community'], + 'EPL/MPL', + ARRAY['Java Applications', 'Testing', 'Development', 'Embedded Systems']), + +('Derby', 'relational', true, false, true, 77, 74, 'strong', 'SQL', 'Unlimited', + ARRAY['Online backup', 'Import/export', 'Log archiving'], + ARRAY['User authentication', 'SQL authorization', 'Encryption'], + ARRAY['Java applications', 'Embedded systems', 'Development', 'Testing'], + ARRAY['Pure Java', 'Embeddable', 'Standards compliant', 'Apache project'], + ARRAY['Limited features', 'Performance limitations', 'Java dependency'], + 'Apache 2.0', + ARRAY['Java Applications', 'Embedded Systems', 'Development']), + +('HSQLDB', 'relational', true, false, true, 75, 73, 'strong', 'SQL', 'Unlimited', + ARRAY['Script backup', 'Binary backup', 'Checkpoint'], + ARRAY['User authentication', 'SQL roles', 'Access rights'], + ARRAY['Java applications', 'Testing', 'Embedded databases', 'Development'], + ARRAY['Lightweight', 'Fast startup', 'Multiple modes', 'Standards compliant'], + ARRAY['Limited scalability', 'Java dependency', 'Basic features'], + 'BSD', + ARRAY['Java Applications', 'Testing', 'Development', 'Embedded Systems']), + +('Apache Drill', 'relational', false, true, true, 80, 81, 'eventual', 'SQL', 'Unlimited', + ARRAY['Storage plugin backups', 'Metadata backup'], + ARRAY['User authentication', 'Impersonation', 'Authorization'], + ARRAY['Big data analytics', 'Data exploration', 'Multi-source queries'], + ARRAY['Schema-free', 'Multi-source queries', 'SQL interface', 'Self-service analytics'], + ARRAY['Complex setup', 'Performance tuning', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Big Data Analytics', 'Data Exploration', 'Multi-source Analysis']), + +('Apache Impala', 'relational', false, true, true, 83, 85, 'eventual', 'SQL', 'HDFS dependent', + ARRAY['HDFS snapshots', 'Table backups'], + ARRAY['Kerberos', 'LDAP', 'Sentry integration', 'SSL/TLS'], + ARRAY['Big data analytics', 'Business intelligence', 'Interactive queries'], + ARRAY['Fast SQL queries', 'Hadoop integration', 'In-memory processing', 'Real-time analytics'], + ARRAY['Hadoop dependency', 'Memory intensive', 'Limited ACID support'], + 'Apache 2.0', + ARRAY['Big Data Analytics', 'Hadoop Ecosystem', 'Business Intelligence']), + +('Presto', 'relational', false, true, true, 84, 87, 'eventual', 'SQL', 'Source dependent', + ARRAY['Connector-specific backup strategies'], + ARRAY['Authentication plugins', 'Access control', 'SSL/TLS'], + ARRAY['Interactive analytics', 'Data lake queries', 'Multi-source analysis'], + ARRAY['Fast queries', 'Multi-source federation', 'SQL standard compliance', 'Scalable'], + ARRAY['In-memory limitations', 'Complex setup', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Interactive Analytics', 'Data Lakes', 'Multi-source Analysis']), + +('Trino', 'relational', false, true, true, 85, 88, 'eventual', 'SQL', 'Source dependent', + ARRAY['Connector-specific backup strategies'], + ARRAY['Authentication methods', 'Authorization', 'SSL/TLS', 'Resource groups'], + ARRAY['Interactive analytics', 'Data lake queries', 'Federation', 'Ad-hoc analysis'], + ARRAY['High performance', 'Multi-source queries', 'SQL compliance', 'Active development'], + ARRAY['Memory constraints', 'Complex configuration', 'Resource management'], + 'Apache 2.0', + ARRAY['Interactive Analytics', 'Data Lakes', 'Query Federation']), + +('Databricks SQL', 'relational', false, true, true, 87, 89, 'eventual', 'SQL', 'Unlimited', + ARRAY['Delta Lake time travel', 'Automated backups', 'Cross-region replication'], + ARRAY['Unity Catalog', 'Fine-grained access control', 'Encryption', 'Audit logs'], + ARRAY['Analytics', 'Data science', 'Business intelligence', 'Data engineering'], + ARRAY['Unified analytics', 'Auto-scaling', 'Collaborative', 'MLOps integration'], + ARRAY['Vendor lock-in', 'Complex pricing', 'Learning curve'], + 'Commercial', + ARRAY['Analytics', 'Data Science', 'Business Intelligence', 'MLOps']), + +('Snowflake', 'relational', true, true, true, 92, 90, 'strong', 'SQL', 'Unlimited', + ARRAY['Continuous data protection', 'Time Travel', 'Fail-safe'], + ARRAY['Multi-factor authentication', 'End-to-end encryption', 'Access control', 'Data masking'], + ARRAY['Data warehousing', 'Analytics', 'Data sharing', 'Data engineering'], + ARRAY['Separation of storage/compute', 'Auto-scaling', 'Data sharing', 'Cloud-native'], + ARRAY['Vendor lock-in', 'Cost management complexity', 'Limited customization'], + 'Commercial', + ARRAY['Data Warehousing', 'Analytics', 'Data Sharing', 'Cloud Applications']), + +('BigQuery', 'relational', false, true, true, 90, 91, 'eventual', 'SQL', 'Unlimited', + ARRAY['Automated backups', 'Dataset snapshots', 'Cross-region replication'], + ARRAY['IAM integration', 'Column-level security', 'Encryption', 'VPC Service Controls'], + ARRAY['Analytics', 'Data warehousing', 'Business intelligence', 'Machine learning'], + ARRAY['Serverless', 'Petabyte scale', 'Built-in ML', 'Google Cloud integration'], + ARRAY['Google Cloud lock-in', 'Cost unpredictability', 'Limited real-time updates'], + 'Commercial', + ARRAY['Analytics', 'Data Warehousing', 'Machine Learning', 'Google Cloud']), + +('Redshift', 'relational', false, true, true, 89, 88, 'eventual', 'SQL', 'Unlimited', + ARRAY['Automated backups', 'Cross-region snapshots', 'Incremental backups'], + ARRAY['VPC security', 'Encryption at rest', 'IAM integration', 'Database audit logging'], + ARRAY['Data warehousing', 'Analytics', 'Business intelligence', 'ETL processing'], + ARRAY['AWS integration', 'Columnar storage', 'Massively parallel', 'Cost-effective'], + ARRAY['AWS lock-in', 'Limited concurrency', 'Maintenance windows'], + 'Commercial', + ARRAY['Data Warehousing', 'Analytics', 'Business Intelligence', 'AWS Cloud']), + +-- Document Databases (Original 4 + 16 new = 20 total) +('MongoDB', 'document', false, true, true, 90, 88, 'eventual', 'MongoDB Query Language', 'Unlimited', + ARRAY['Replica sets', 'Sharding', 'Point-in-time snapshots'], + ARRAY['Authentication', 'Authorization', 'Encryption at rest', 'Network encryption'], + ARRAY['Content management', 'Real-time applications', 'IoT data', 'Catalog management', 'User profiles'], + ARRAY['Flexible schema', 'Horizontal scaling', 'JSON-like documents', 'Fast development'], + ARRAY['No ACID transactions', 'Memory usage', 'Data consistency challenges', 'Complex queries'], + 'SSPL', + ARRAY['Content Management Systems', 'IoT', 'E-commerce', 'Real-time Applications', 'Social Media']), + +('CouchDB', 'document', false, true, false, 82, 75, 'eventual', 'MapReduce/Mango', 'Unlimited', + ARRAY['Incremental replication', 'Multi-master sync', 'Snapshot backups'], + ARRAY['User authentication', 'Database-level permissions', 'SSL support'], + ARRAY['Offline-first applications', 'Mobile sync', 'Content management', 'Collaborative applications'], + ARRAY['Multi-master replication', 'Offline capabilities', 'HTTP API', 'Conflict resolution'], + ARRAY['Limited query capabilities', 'View complexity', 'Performance issues', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['Offline Applications', 'Mobile Sync', 'Collaborative Systems', 'Content Management']), + +('Amazon DocumentDB', 'document', true, true, true, 88, 85, 'strong', 'MongoDB API', '64 TB per cluster', + ARRAY['Automated backups', 'Point-in-time recovery', 'Cross-region snapshots'], + ARRAY['VPC isolation', 'Encryption at rest', 'IAM integration', 'TLS encryption'], + ARRAY['AWS applications', 'Document storage', 'Content management', 'User profiles'], + ARRAY['Managed service', 'AWS integration', 'High availability', 'Automatic scaling'], + ARRAY['AWS lock-in', 'Limited MongoDB compatibility', 'Regional availability', 'Pricing complexity'], + 'Commercial', + ARRAY['AWS Cloud', 'Managed Services', 'Enterprise Applications', 'Content Management']), + +('RavenDB', 'document', true, true, true, 84, 82, 'eventual', 'RQL', 'Unlimited', + ARRAY['Incremental backups', 'Snapshot backups', 'Replication'], + ARRAY['X.509 certificates', 'HTTPS', 'Database encryption', 'User authentication'], + ARRAY['.NET applications', 'Document storage', 'Full-text search', 'Real-time applications'], + ARRAY['.NET integration', 'ACID transactions', 'Full-text search', 'Real-time indexing'], + ARRAY['Limited ecosystem', 'Windows-centric', 'Learning curve', 'Commercial licensing'], + 'AGPL/Commercial', + ARRAY['.NET Applications', 'Enterprise Systems', 'Full-text Search', 'Windows Environments']), + +-- Additional Document Databases (16 new) +('Couchbase', 'document', false, true, true, 87, 86, 'eventual', 'N1QL', 'Unlimited', + ARRAY['Cross datacenter replication', 'Incremental backup', 'Full backup'], + ARRAY['RBAC', 'LDAP integration', 'X.509 certificates', 'Audit logging'], + ARRAY['Mobile applications', 'Web applications', 'Real-time analytics', 'Session storage'], + ARRAY['Memory-first architecture', 'Full-text search', 'Mobile sync', 'SQL-like queries'], + ARRAY['Complex configuration', 'Memory intensive', 'Learning curve'], + 'Apache 2.0/Commercial', + ARRAY['Mobile Applications', 'Web Applications', 'Real-time Analytics']), + +('OrientDB', 'multi-model', true, true, true, 81, 79, 'strong', 'SQL/Gremlin', 'Unlimited', + ARRAY['Incremental backup', 'Full backup', 'Import/export'], + ARRAY['User authentication', 'Role-based security', 'Record-level security'], + ARRAY['Multi-model applications', 'Graph databases', 'Document storage'], + ARRAY['Multi-model support', 'ACID compliance', 'SQL support', 'Graph capabilities'], + ARRAY['Complex configuration', 'Performance issues', 'Limited documentation'], + 'Apache 2.0/Commercial', + ARRAY['Multi-model Applications', 'Graph Analytics', 'Document Storage']), + +('FaunaDB', 'document', true, true, true, 83, 84, 'strong', 'FQL', 'Unlimited', + ARRAY['Automatic backups', 'Point-in-time recovery', 'Global replication'], + ARRAY['Identity-based access', 'Attribute-based access control', 'Encryption'], + ARRAY['Serverless applications', 'JAMstack', 'Real-time applications'], + ARRAY['Serverless', 'ACID transactions', 'Global consistency', 'Multi-model'], + ARRAY['Vendor lock-in', 'Complex pricing', 'Learning curve', 'Limited tooling'], + 'Commercial', + ARRAY['Serverless Applications', 'JAMstack', 'Real-time Applications']), + +('Firebase Firestore', 'document', false, true, true, 85, 83, 'eventual', 'Firebase API', 'Unlimited', + ARRAY['Automatic backups', 'Export/import', 'Real-time sync'], + ARRAY['Firebase Authentication', 'Security rules', 'IAM integration'], + ARRAY['Mobile applications', 'Web applications', 'Real-time sync'], + ARRAY['Real-time updates', 'Offline support', 'Google integration', 'Easy scaling'], + ARRAY['Google lock-in', 'Query limitations', 'Cost at scale', 'Vendor dependency'], + 'Commercial', + ARRAY['Mobile Applications', 'Web Applications', 'Real-time Sync']), + +('Elasticsearch', 'document', false, true, true, 91, 89, 'eventual', 'Query DSL', 'Unlimited', + ARRAY['Snapshot/restore', 'Cross-cluster replication', 'Index lifecycle management'], + ARRAY['Authentication', 'Authorization', 'Field-level security', 'Audit logging'], + ARRAY['Full-text search', 'Log analytics', 'Real-time search', 'Business intelligence'], + ARRAY['Full-text search', 'Real-time indexing', 'Distributed architecture', 'Analytics'], + ARRAY['Memory intensive', 'Complex configuration', 'License changes', 'Operational complexity'], + 'Elastic License/Commercial', + ARRAY['Full-text Search', 'Log Analytics', 'Business Intelligence', 'Real-time Search']), + +('PouchDB', 'document', false, true, false, 78, 76, 'eventual', 'JavaScript API', 'Browser dependent', + ARRAY['Replication', 'Sync protocols', 'Local storage'], + ARRAY['Browser security model', 'Basic authentication'], + ARRAY['Offline-first web apps', 'Mobile web applications', 'Progressive web apps'], + ARRAY['Offline capabilities', 'CouchDB sync', 'Browser-based', 'JavaScript native'], + ARRAY['Browser limitations', 'Storage constraints', 'Performance limitations'], + 'Apache 2.0', + ARRAY['Offline Web Applications', 'Progressive Web Apps', 'Mobile Web']), + +('AzureDB Cosmos DB', 'multi-model', false, true, true, 89, 87, 'tunable', 'Multiple APIs', 'Unlimited', + ARRAY['Automatic backups', 'Point-in-time restore', 'Geo-redundant backups'], + ARRAY['AAD integration', 'RBAC', 'Private endpoints', 'Encryption'], + ARRAY['Global applications', 'IoT data', 'Gaming', 'Web applications'], + ARRAY['Multi-model', 'Global distribution', 'Guaranteed SLAs', 'Multi-API support'], + ARRAY['Azure lock-in', 'Complex pricing', 'Learning curve', 'API limitations'], + 'Commercial', + ARRAY['Azure Cloud', 'Global Applications', 'IoT', 'Gaming']), + +('Amazon SimpleDB', 'document', false, true, false, 72, 70, 'eventual', 'SimpleDB API', '10 GB per domain', + ARRAY['Automatic replication', 'Point-in-time consistency'], + ARRAY['AWS IAM', 'HTTPS encryption', 'Access policies'], + ARRAY['Simple web applications', 'Metadata storage', 'Configuration data'], + ARRAY['Simple API', 'Automatic scaling', 'No administration', 'AWS integration'], + ARRAY['Limited functionality', 'Storage limitations', 'Query constraints', 'Deprecated'], + 'Commercial', + ARRAY['Simple Web Applications', 'Configuration Storage', 'AWS Legacy']), + +('MarkLogic', 'multi-model', true, true, true, 86, 84, 'strong', 'XQuery/JavaScript', 'Unlimited', + ARRAY['Incremental backup', 'Point-in-time recovery', 'Database replication'], + ARRAY['Role-based security', 'Element-level security', 'Encryption', 'Audit logging'], + ARRAY['Content management', 'Government systems', 'Publishing', 'Data integration'], + ARRAY['Multi-model', 'Enterprise features', 'Semantic capabilities', 'ACID compliance'], + ARRAY['Expensive licensing', 'Complex administration', 'Steep learning curve'], + 'Commercial', + ARRAY['Content Management', 'Government Systems', 'Enterprise Data Integration']), + +('Apache Jackrabbit', 'document', true, false, true, 79, 77, 'strong', 'JCR API', 'Unlimited', + ARRAY['Backup utilities', 'Repository export', 'Clustering support'], + ARRAY['Access control', 'User authentication', 'Permission management'], + ARRAY['Content management', 'Document management', 'Java applications'], + ARRAY['JCR standard compliance', 'Hierarchical storage', 'Version control'], + ARRAY['Java dependency', 'Limited scalability', 'Complex configuration'], + 'Apache 2.0', + ARRAY['Content Management', 'Document Management', 'Java Applications']), + +('eXist-db', 'document', true, false, true, 74, 72, 'strong', 'XQuery', 'Unlimited', + ARRAY['Database backup', 'Incremental backup', 'Replication'], + ARRAY['User authentication', 'Access control lists', 'SSL support'], + ARRAY['XML applications', 'Digital humanities', 'Publishing systems'], + ARRAY['Native XML storage', 'XQuery support', 'Full-text search', 'Open source'], + ARRAY['Limited scalability', 'Niche use cases', 'Small community'], + 'LGPL', + ARRAY['XML Applications', 'Digital Humanities', 'Publishing']), + +('BaseX', 'document', true, false, true, 76, 74, 'strong', 'XQuery', 'Unlimited', + ARRAY['Database backup', 'Export functions', 'Replication support'], + ARRAY['User management', 'Database permissions', 'SSL connections'], + ARRAY['XML processing', 'Digital archives', 'Research projects'], + ARRAY['Fast XML processing', 'XQuery 3.1 support', 'Lightweight', 'Standards compliant'], + ARRAY['Limited non-XML support', 'Smaller ecosystem', 'Specialized use case'], + 'BSD', + ARRAY['XML Processing', 'Digital Archives', 'Research Projects']), + +('Sedna', 'document', true, false, true, 71, 69, 'strong', 'XQuery', 'Unlimited', + ARRAY['Hot backup', 'Incremental backup', 'Recovery utilities'], + ARRAY['User authentication', 'Access privileges', 'Secure connections'], + ARRAY['XML data management', 'Academic projects', 'Small-scale XML applications'], + ARRAY['Native XML storage', 'ACID compliance', 'XQuery support'], + ARRAY['Limited development', 'Small community', 'Outdated features'], + 'Apache 2.0', + ARRAY['XML Data Management', 'Academic Projects', 'Small XML Applications']), + +('Qizx', 'document', true, false, true, 73, 71, 'strong', 'XQuery', 'Unlimited', + ARRAY['Database backup', 'Replication', 'Export utilities'], + ARRAY['User authentication', 'Access control', 'SSL support'], + ARRAY['XML content management', 'Publishing workflows', 'Data integration'], + ARRAY['Enterprise XML features', 'Performance optimization', 'Standards compliance'], + ARRAY['Commercial licensing', 'Limited adoption', 'XML-focused only'], + 'Commercial', + ARRAY['XML Content Management', 'Publishing', 'Data Integration']), + +('Clusterpoint', 'document', false, true, true, 77, 78, 'eventual', 'SQL++/JavaScript', 'Unlimited', + ARRAY['Automatic replication', 'Backup utilities', 'Point-in-time recovery'], + ARRAY['User authentication', 'Access control', 'Encryption support'], + ARRAY['NoSQL applications', 'Real-time analytics', 'Search applications'], + ARRAY['SQL-like queries', 'Full-text search', 'Real-time indexing', 'Distributed'], + ARRAY['Limited ecosystem', 'Complex setup', 'Commercial focus'], + 'Commercial', + ARRAY['NoSQL Applications', 'Real-time Analytics', 'Search Systems']), + +('IBM Cloudant', 'document', false, true, true, 84, 81, 'eventual', 'HTTP API', 'Unlimited', + ARRAY['Continuous replication', 'Incremental backup', 'Cross-region sync'], + ARRAY['IAM integration', 'API key management', 'HTTPS encryption'], + ARRAY['Mobile applications', 'Web applications', 'IoT data storage'], + ARRAY['CouchDB compatibility', 'Global replication', 'Managed service', 'IBM Cloud integration'], + ARRAY['IBM Cloud dependency', 'Eventually consistent', 'Limited querying'], + 'Commercial', + ARRAY['Mobile Applications', 'IBM Cloud', 'IoT Storage']), + +('Riak TS', 'document', false, true, false, 78, 80, 'eventual', 'SQL subset', 'Unlimited', + ARRAY['Multi-datacenter replication', 'Backup utilities'], + ARRAY['Authentication', 'SSL/TLS support', 'Access controls'], + ARRAY['Time-series data', 'IoT applications', 'Sensor data'], + ARRAY['Time-series optimization', 'Distributed architecture', 'High availability'], + ARRAY['Limited SQL support', 'Complex operations', 'Specialized use case'], + 'Apache 2.0', + ARRAY['Time-series Applications', 'IoT Data', 'Sensor Networks']), + +-- Key-Value Databases (Original 5 + 20 new = 25 total) +('Redis', 'key-value', false, true, true, 92, 95, 'eventual', 'Redis commands', '512 MB per key', + ARRAY['RDB snapshots', 'AOF persistence', 'Replica synchronization'], + ARRAY['AUTH command', 'SSL/TLS support', 'ACLs', 'Network security'], + ARRAY['Caching', 'Session storage', 'Real-time analytics', 'Message queuing', 'Leaderboards'], + ARRAY['Extremely fast', 'In-memory storage', 'Rich data structures', 'Pub/Sub messaging'], + ARRAY['Memory limitations', 'Persistence complexity', 'Single-threaded', 'Data durability concerns'], + 'BSD', + ARRAY['Real-time Analytics', 'Caching Systems', 'Gaming', 'Session Management', 'E-commerce']), + +('Amazon DynamoDB', 'key-value', false, true, true, 91, 90, 'eventual', 'DynamoDB API', '400 KB per item', + ARRAY['On-demand backups', 'Point-in-time recovery', 'Cross-region replication'], + ARRAY['IAM integration', 'VPC endpoints', 'Encryption at rest', 'Fine-grained access control'], + ARRAY['Serverless applications', 'IoT data', 'Gaming', 'Mobile backends', 'Real-time bidding'], + ARRAY['Serverless', 'Auto-scaling', 'Low latency', 'AWS integration', 'Managed service'], + ARRAY['AWS lock-in', 'Query limitations', 'Cost unpredictability', 'Learning curve'], + 'Commercial', + ARRAY['Serverless Applications', 'IoT', 'Gaming', 'Mobile Backends', 'AWS Cloud']), + +('Riak KV', 'key-value', false, true, false, 79, 81, 'eventual', 'HTTP API', 'Unlimited', + ARRAY['Multi-datacenter replication', 'Backup/restore utilities'], + ARRAY['SSL/TLS', 'User authentication', 'Access controls'], + ARRAY['Distributed systems', 'High-availability applications', 'Session storage'], + ARRAY['High availability', 'Fault tolerance', 'Distributed architecture', 'Conflict resolution'], + ARRAY['Complex operations', 'Eventual consistency', 'Limited query capabilities', 'Operational complexity'], + 'Apache 2.0', + ARRAY['High Availability Systems', 'Distributed Applications', 'Fault-tolerant Systems']), + +('Berkeley DB', 'key-value', true, false, true, 86, 83, 'strong', 'API calls', 'Unlimited', + ARRAY['Hot backups', 'Incremental backups', 'Transaction logs'], + ARRAY['File permissions', 'Encryption API'], + ARRAY['Embedded systems', 'High-performance applications', 'System software', 'Mobile apps'], + ARRAY['High performance', 'Embeddable', 'ACID compliance', 'Small footprint', 'Mature'], + ARRAY['Low-level API', 'Complex programming', 'Limited tools', 'Oracle licensing'], + 'Sleepycat/Commercial', + ARRAY['Embedded Systems', 'System Software', 'High-performance Applications', 'Mobile Applications']), + +('Hazelcast', 'key-value', false, true, true, 85, 89, 'strong', 'Java API', 'Available memory', + ARRAY['Cluster-wide backups', 'MapStore persistence', 'WAN replication'], + ARRAY['SSL/TLS', 'JAAS integration', 'Client authentication', 'Cluster security'], + ARRAY['Distributed caching', 'Session clustering', 'Real-time processing', 'Microservices'], + ARRAY['In-memory speed', 'Distributed computing', 'Java integration', 'Real-time processing'], + ARRAY['Memory constraints', 'Java ecosystem dependency', 'Complex configuration'], + 'Apache 2.0/Commercial', + ARRAY['Distributed Caching', 'Java Applications', 'Real-time Processing', 'Microservices']), + +-- Additional Key-Value Databases (20 new) +('Apache Ignite', 'key-value', true, true, true, 84, 87, 'strong', 'SQL/Key-Value API', 'Available memory', + ARRAY['Native persistence', 'Incremental snapshots', 'WAL backups'], + ARRAY['Authentication', 'SSL/TLS', 'Transparent data encryption'], + ARRAY['In-memory computing', 'Distributed caching', 'Real-time processing'], + ARRAY['In-memory speed', 'ACID compliance', 'SQL support', 'Distributed computing'], + ARRAY['Memory intensive', 'Complex configuration', 'Java dependency'], + 'Apache 2.0', + ARRAY['In-memory Computing', 'Distributed Caching', 'Real-time Processing']), + +('Memcached', 'key-value', false, true, false, 88, 94, 'eventual', 'Protocol commands', 'Available memory', + ARRAY['No built-in persistence', 'Client-side backup strategies'], + ARRAY['SASL authentication', 'Binary protocol security'], + ARRAY['Web application caching', 'Session storage', 'Database query caching'], + ARRAY['Extremely fast', 'Simple design', 'Wide support', 'Memory efficient'], + ARRAY['No persistence', 'No replication', 'Limited data structures', 'No built-in security'], + 'BSD', + ARRAY['Web Caching', 'Session Storage', 'Database Caching']), + +('Etcd', 'key-value', true, true, false, 86, 84, 'strong', 'gRPC API', 'Available memory', + ARRAY['Raft consensus backups', 'Snapshot backups', 'WAL recovery'], + ARRAY['TLS encryption', 'RBAC', 'Client certificates', 'Audit logging'], + ARRAY['Configuration management', 'Service discovery', 'Distributed coordination'], + ARRAY['Strong consistency', 'Distributed consensus', 'Kubernetes integration', 'Reliable'], + ARRAY['Limited scalability', 'Memory constraints', 'Network partitions sensitivity'], + 'Apache 2.0', + ARRAY['Configuration Management', 'Service Discovery', 'Kubernetes', 'Distributed Systems']), + +('Apache Zookeeper', 'key-value', true, true, false, 85, 82, 'strong', 'ZooKeeper API', 'Available memory', + ARRAY['Transaction logs', 'Snapshots', 'Backup utilities'], + ARRAY['SASL authentication', 'Kerberos integration', 'Access control lists'], + ARRAY['Distributed coordination', 'Configuration management', 'Naming services'], + ARRAY['Proven reliability', 'Strong consistency', 'Mature ecosystem', 'Zab consensus'], + ARRAY['Complex administration', 'Limited scalability', 'Java dependency'], + 'Apache 2.0', + ARRAY['Distributed Coordination', 'Configuration Management', 'Apache Ecosystem']), + +('Consul', 'key-value', true, true, true, 83, 81, 'strong', 'HTTP API', 'Available memory', + ARRAY['Raft snapshots', 'Backup utilities', 'Cross-datacenter replication'], + ARRAY['ACL system', 'TLS encryption', 'Gossip encryption', 'Connect CA'], + ARRAY['Service discovery', 'Configuration management', 'Health checking'], + ARRAY['Service mesh integration', 'Multi-datacenter', 'Health checking', 'DNS integration'], + ARRAY['Complex networking', 'Resource intensive', 'Learning curve'], + 'Mozilla Public License', + ARRAY['Service Discovery', 'Service Mesh', 'Multi-datacenter', 'DevOps']), + +('LevelDB', 'key-value', false, false, true, 80, 86, 'strong', 'C++ API', 'Available disk', + ARRAY['Manual backup', 'File-based backups'], + ARRAY['File system permissions', 'Application-level security'], + ARRAY['Embedded applications', 'Local storage', 'Mobile applications'], + ARRAY['Fast writes', 'Embedded design', 'Google developed', 'LSM tree storage'], + ARRAY['No network interface', 'Single process', 'No built-in replication'], + 'BSD', + ARRAY['Embedded Applications', 'Local Storage', 'Mobile Apps']), + +('RocksDB', 'key-value', false, false, true, 84, 90, 'strong', 'C++ API', 'Available disk', + ARRAY['Backup engine', 'Checkpoint snapshots', 'WAL recovery'], + ARRAY['File system permissions', 'Application-level encryption'], + ARRAY['Embedded storage', 'Write-heavy applications', 'Stream processing'], + ARRAY['High write performance', 'Configurable', 'LSM optimization', 'Facebook developed'], + ARRAY['Complex tuning', 'No network interface', 'Single process'], + 'Apache 2.0/GPL', + ARRAY['Embedded Storage', 'Write-heavy Applications', 'Stream Processing']), + +('Voldemort', 'key-value', false, true, true, 76, 78, 'eventual', 'Java API', 'Unlimited', + ARRAY['Read-only stores', 'Incremental updates', 'Backup utilities'], + ARRAY['Basic authentication', 'SSL support'], + ARRAY['High-volume serving systems', 'Read-heavy workloads'], + ARRAY['High availability', 'Fault tolerance', 'Consistent hashing', 'LinkedIn developed'], + ARRAY['Complex setup', 'Limited features', 'Declining support'], + 'Apache 2.0', + ARRAY['High-volume Systems', 'Read-heavy Workloads', 'Fault-tolerant Systems']), + +('GridDB', 'key-value', true, true, true, 78, 82, 'strong', 'SQL/NoSQL API', 'Unlimited', + ARRAY['Online backup', 'Point-in-time recovery', 'Cluster backup'], + ARRAY['Authentication', 'SSL/TLS', 'Access control'], + ARRAY['IoT applications', 'Time-series data', 'Sensor networks'], + ARRAY['Time-series optimization', 'In-memory processing', 'ACID compliance'], + ARRAY['Limited ecosystem', 'Complex configuration', 'Niche focus'], + 'AGPL/Commercial', + ARRAY['IoT Applications', 'Time-series Data', 'Sensor Networks']), + +('KeyDB', 'key-value', false, true, true, 87, 92, 'eventual', 'Redis commands', '512 MB per key', + ARRAY['RDB snapshots', 'AOF persistence', 'Multi-master replication'], + ARRAY['AUTH command', 'TLS support', 'ACLs'], + ARRAY['High-performance caching', 'Session storage', 'Real-time applications'], + ARRAY['Redis compatibility', 'Multi-threaded', 'Higher performance', 'Active replication'], + ARRAY['Newer project', 'Limited ecosystem', 'Memory constraints'], + 'BSD', + ARRAY['High-performance Caching', 'Real-time Applications', 'Redis Enhancement']), + +('Aerospike', 'key-value', false, true, true, 86, 91, 'eventual', 'Client APIs', 'Unlimited', + ARRAY['Cross-datacenter replication', 'Backup utilities', 'Snapshot backups'], + ARRAY['RBAC', 'LDAP integration', 'TLS encryption', 'Audit logging'], + ARRAY['Real-time applications', 'AdTech', 'Gaming', 'Financial services'], + ARRAY['Extremely fast', 'Hybrid memory architecture', 'Strong consistency options', 'Linear scaling'], + ARRAY['Complex configuration', 'Memory/SSD requirements', 'Commercial licensing'], + 'AGPL/Commercial', + ARRAY['Real-time Applications', 'AdTech', 'Gaming', 'High-performance Systems']), + +('LMDB', 'key-value', true, false, true, 82, 88, 'strong', 'C API', 'Available memory', + ARRAY['File-based backups', 'Memory-mapped backups'], + ARRAY['File permissions', 'Process isolation'], + ARRAY['Embedded applications', 'System databases', 'Caching layers'], + ARRAY['Memory-mapped', 'ACID compliance', 'Zero-copy reads', 'Crash-proof'], + ARRAY['Single writer', 'Memory limitations', 'No network interface'], + 'OpenLDAP License', + ARRAY['Embedded Applications', 'System Databases', 'Caching']), + +('TiKV', 'key-value', true, true, true, 83, 85, 'strong', 'gRPC API', 'Unlimited', + ARRAY['Raft snapshots', 'Incremental backup', 'Cross-region replication'], + ARRAY['TLS encryption', 'Certificate authentication'], + ARRAY['Distributed systems', 'Cloud-native applications', 'Microservices'], + ARRAY['Distributed transactions', 'Raft consensus', 'Cloud-native', 'Rust implementation'], + ARRAY['Complex distributed system', 'Resource intensive', 'Operational complexity'], + 'Apache 2.0', + ARRAY['Distributed Systems', 'Cloud-native Apps', 'Microservices']), + +('FDB (FoundationDB)', 'key-value', true, true, true, 88, 89, 'strong', 'Multi-language APIs', 'Unlimited', + ARRAY['Continuous backup', 'Point-in-time recovery', 'Cross-datacenter replication'], + ARRAY['TLS encryption', 'Client authentication'], + ARRAY['Distributed databases', 'OLTP systems', 'Multi-model databases'], + ARRAY['ACID guarantees', 'Multi-model support', 'Apple developed', 'Strong consistency'], + ARRAY['Complex architecture', 'Limited tooling', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['Distributed Databases', 'OLTP Systems', 'Multi-model Applications']), + +('Infinite Graph', 'key-value', true, true, true, 75, 77, 'strong', 'Java/C++ API', 'Unlimited', + ARRAY['Hot backup', 'Incremental backup', 'Replication'], + ARRAY['User authentication', 'Access controls', 'Encryption support'], + ARRAY['Graph analytics', 'Social networks', 'Fraud detection'], + ARRAY['Distributed graph processing', 'High performance', 'ACID compliance'], + ARRAY['Commercial licensing', 'Complex setup', 'Limited adoption'], + 'Commercial', + ARRAY['Graph Analytics', 'Social Networks', 'Fraud Detection']), + +('Tokyo Cabinet', 'key-value', false, false, true, 74, 83, 'strong', 'C API', 'Available disk', + ARRAY['File-based backup', 'Replication utilities'], + ARRAY['File permissions', 'Access controls'], + ARRAY['Embedded databases', 'High-performance storage', 'System applications'], + ARRAY['High performance', 'Multiple storage formats', 'Lightweight'], + ARRAY['Single process', 'Limited features', 'No network interface'], + 'LGPL', + ARRAY['Embedded Databases', 'High-performance Storage', 'System Applications']), + +('Amazon ElastiCache', 'key-value', false, true, true, 87, 88, 'eventual', 'Redis/Memcached', 'Configurable', + ARRAY['Automated backups', 'Manual snapshots', 'Cross-region replication'], + ARRAY['VPC security', 'Encryption at rest/transit', 'IAM policies'], + ARRAY['Web applications', 'Session storage', 'Real-time analytics'], + ARRAY['Managed service', 'Multi-engine support', 'Auto-scaling', 'AWS integration'], + ARRAY['AWS lock-in', 'Limited customization', 'Cost considerations'], + 'Commercial', + ARRAY['AWS Applications', 'Web Caching', 'Session Storage']), + +('Azure Cache for Redis', 'key-value', false, true, true, 86, 86, 'eventual', 'Redis commands', 'Configurable', + ARRAY['Automated backups', 'Export/import', 'Geo-replication'], + ARRAY['AAD integration', 'VNet isolation', 'TLS encryption'], + ARRAY['Azure applications', 'Session storage', 'Real-time applications'], + ARRAY['Managed service', 'Azure integration', 'High availability', 'Multiple tiers'], + ARRAY['Azure lock-in', 'Limited Redis features', 'Cost complexity'], + 'Commercial', + ARRAY['Azure Applications', 'Session Storage', 'Real-time Apps']), + +('Google Cloud Memorystore', 'key-value', false, true, true, 85, 85, 'eventual', 'Redis/Memcached', 'Configurable', + ARRAY['Automated backups', 'Point-in-time recovery', 'Cross-region replicas'], + ARRAY['VPC security', 'IAM integration', 'TLS encryption'], + ARRAY['Google Cloud applications', 'Gaming', 'Real-time analytics'], + ARRAY['Managed service', 'Google Cloud integration', 'High availability'], + ARRAY['Google Cloud lock-in', 'Limited customization', 'Regional availability'], + 'Commercial', + ARRAY['Google Cloud', 'Gaming', 'Real-time Analytics']), + +('Tarantool', 'key-value', true, true, true, 81, 87, 'strong', 'Lua/SQL', 'Available memory', + ARRAY['WAL backups', 'Snapshots', 'Replication'], + ARRAY['User authentication', 'SSL/TLS support', 'Access controls'], + ARRAY['High-performance applications', 'Game backends', 'Financial systems'], + ARRAY['In-memory speed', 'Lua scripting', 'ACID compliance', 'Stored procedures'], + ARRAY['Lua dependency', 'Memory constraints', 'Limited ecosystem'], + 'BSD', + ARRAY['High-performance Apps', 'Game Backends', 'Financial Systems']), + +-- Column-Family Databases (Original 4 + 11 new = 15 total) +('Apache Cassandra', 'column-family', false, true, true, 89, 87, 'eventual', 'CQL', 'Unlimited', + ARRAY['Incremental backups', 'Snapshot backups', 'Point-in-time recovery'], + ARRAY['SSL/TLS encryption', 'Role-based access control', 'Transparent data encryption'], + ARRAY['Time-series data', 'IoT applications', 'Messaging systems', 'Recommendation engines'], + ARRAY['Linear scalability', 'High availability', 'Distributed architecture', 'No single point of failure'], + ARRAY['Eventual consistency', 'Complex data modeling', 'Memory intensive', 'Operational complexity'], + 'Apache 2.0', + ARRAY['Time-series Data', 'IoT Applications', 'Large-scale Systems', 'Distributed Applications']), + +('HBase', 'column-family', false, true, true, 83, 82, 'strong', 'Java API/Thrift', 'Unlimited', + ARRAY['HDFS snapshots', 'Export/import utilities', 'Replication'], + ARRAY['Kerberos authentication', 'Cell-level security', 'Access control lists'], + ARRAY['Big data analytics', 'Real-time applications', 'Time-series data', 'Log processing'], + ARRAY['Hadoop integration', 'Real-time access', 'Automatic sharding', 'Strong consistency'], + ARRAY['Hadoop dependency', 'Complex setup', 'Java ecosystem', 'Operational overhead'], + 'Apache 2.0', + ARRAY['Big Data Analytics', 'Hadoop Ecosystem', 'Real-time Applications', 'Log Processing']); + + + INSERT INTO cloud_technologies ( + name, provider, service_type, global_availability, uptime_sla, auto_scaling, + serverless_support, container_support, managed_services, security_certifications, + primary_use_cases, strengths, weaknesses, free_tier_available, domain +) VALUES +-- Original 5 entries +('AWS', 'amazon', 'iaas', 25, 99.999, true, true, true, + ARRAY['RDS', 'Lambda', 'S3', 'CloudFront', 'ElastiCache', 'API Gateway', 'Cognito'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA', 'PCI DSS'], + ARRAY['Web hosting', 'Data storage', 'Serverless computing', 'Machine learning', 'Big data analytics'], + ARRAY['Comprehensive services', 'Market leader', 'Global reach', 'Reliable infrastructure', 'Strong security'], + ARRAY['Complex pricing', 'Steep learning curve', 'Vendor lock-in risk', 'Cost optimization challenges'], + true, + ARRAY['Enterprise Applications', 'E-commerce', 'Big Data Analytics', 'Machine Learning', 'IoT']), + +('Vercel', 'vercel', 'paas', 12, 99.99, true, true, true, + ARRAY['Edge Functions', 'Analytics', 'Preview Deployments', 'Domain Management'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['Frontend deployment', 'JAMstack applications', 'Static sites', 'Serverless functions'], + ARRAY['Excellent DX', 'Fast deployments', 'Edge network', 'Git integration', 'Zero config'], + ARRAY['Frontend focused', 'Limited backend capabilities', 'Pricing for scale', 'Less enterprise features'], + true, + ARRAY['Startups', 'Static Websites', 'JAMstack Applications', 'E-commerce', 'Developer Tools']), + +('DigitalOcean', 'digitalocean', 'iaas', 8, 99.99, true, false, true, + ARRAY['Managed Databases', 'Load Balancers', 'Spaces', 'App Platform'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Web applications', 'Development environments', 'Small to medium businesses', 'API hosting'], + ARRAY['Simple pricing', 'Developer friendly', 'Good documentation', 'Affordable', 'Easy to use'], + ARRAY['Limited services', 'Smaller global footprint', 'Less enterprise features', 'Limited scalability'], + true, + ARRAY['Small Business', 'Web Applications', 'Development Environments', 'Startups', 'API Hosting']), + +('Railway', 'railway', 'paas', 3, 99.9, true, false, true, + ARRAY['Postgres', 'Redis', 'Environment management', 'Git deployments'], + ARRAY['SOC 2 Type II'], + ARRAY['Full-stack applications', 'Database hosting', 'API development', 'Rapid prototyping'], + ARRAY['Simple deployment', 'Good pricing', 'Database included', 'Git integration', 'Developer friendly'], + ARRAY['Limited regions', 'Newer platform', 'Fewer services', 'Less enterprise ready'], + true, + ARRAY['Startups', 'Prototyping', 'Full-stack Applications', 'Database Hosting', 'API Development']), + +('Netlify', 'netlify', 'paas', 4, 99.9, true, true, false, + ARRAY['Forms', 'Identity', 'Analytics', 'Split Testing'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['Static sites', 'JAMstack applications', 'Frontend deployment', 'Landing pages'], + ARRAY['Easy deployment', 'CDN included', 'Form handling', 'Branch previews', 'Good free tier'], + ARRAY['Static sites only', 'Limited backend', 'Function limitations', 'Bandwidth costs'], + true, + ARRAY['Static Websites', 'JAMstack Applications', 'Marketing Landing Pages', 'Startups', 'Content Management Systems']), + +-- Major Cloud Providers +('Google Cloud', 'google', 'iaas', 24, 99.999, true, true, true, + ARRAY['BigQuery', 'Cloud Functions', 'Cloud Storage', 'Kubernetes Engine', 'AI Platform'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA', 'PCI DSS'], + ARRAY['Machine learning', 'Data analytics', 'Container orchestration', 'Web hosting'], + ARRAY['AI/ML leadership', 'Kubernetes native', 'Data analytics', 'Global network', 'Competitive pricing'], + ARRAY['Smaller market share', 'Learning curve', 'Documentation gaps', 'Limited enterprise support'], + true, + ARRAY['Machine Learning', 'Data Analytics', 'Container Applications', 'Enterprise', 'Gaming']), + +('Microsoft Azure', 'microsoft', 'iaas', 60, 99.999, true, true, true, + ARRAY['Azure SQL', 'Functions', 'Blob Storage', 'AKS', 'Cognitive Services'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA', 'PCI DSS'], + ARRAY['Enterprise applications', 'Hybrid cloud', 'Windows workloads', 'AI services'], + ARRAY['Enterprise integration', 'Hybrid capabilities', 'Microsoft ecosystem', 'Global presence'], + ARRAY['Complex pricing', 'Learning curve', 'UI complexity', 'Documentation fragmentation'], + true, + ARRAY['Enterprise', 'Windows Applications', 'Hybrid Cloud', 'Government', 'Healthcare']), + +('IBM Cloud', 'ibm', 'iaas', 19, 99.95, true, true, true, + ARRAY['Watson', 'Cloudant', 'Cloud Functions', 'Kubernetes Service'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['Enterprise applications', 'AI/ML', 'Hybrid cloud', 'Mainframe integration'], + ARRAY['Enterprise focus', 'AI capabilities', 'Hybrid cloud', 'Industry expertise'], + ARRAY['Market position', 'Pricing', 'Developer experience', 'Limited consumer focus'], + true, + ARRAY['Enterprise', 'AI/ML', 'Mainframe Integration', 'Financial Services', 'Healthcare']), + +('Oracle Cloud', 'oracle', 'iaas', 37, 99.95, true, true, true, + ARRAY['Autonomous Database', 'Functions', 'Object Storage', 'Container Engine'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['Database workloads', 'Enterprise applications', 'ERP systems'], + ARRAY['Database expertise', 'Performance', 'Enterprise features', 'Autonomous services'], + ARRAY['Limited ecosystem', 'Pricing', 'Market adoption', 'Learning curve'], + true, + ARRAY['Database Applications', 'ERP Systems', 'Enterprise', 'Financial Services', 'Government']), + +('Alibaba Cloud', 'alibaba', 'iaas', 25, 99.95, true, true, true, + ARRAY['MaxCompute', 'Function Compute', 'OSS', 'Container Service'], + ARRAY['ISO 27001', 'SOC 2', 'CSA STAR'], + ARRAY['E-commerce', 'Big data', 'AI/ML', 'Global expansion'], + ARRAY['Asia-Pacific presence', 'E-commerce expertise', 'Competitive pricing', 'AI capabilities'], + ARRAY['Limited Western presence', 'Documentation', 'Regulatory concerns', 'Brand recognition'], + true, + ARRAY['E-commerce', 'Asia-Pacific', 'Big Data', 'Gaming', 'Media']), + +-- Platform as a Service (PaaS) +('Heroku', 'salesforce', 'paas', 6, 99.99, true, false, true, + ARRAY['Postgres', 'Redis', 'Add-ons Marketplace', 'CI/CD'], + ARRAY['SOC 2', 'PCI DSS', 'HIPAA'], + ARRAY['Web applications', 'API development', 'Rapid prototyping', 'MVP development'], + ARRAY['Easy deployment', 'Developer friendly', 'Add-ons ecosystem', 'Git integration'], + ARRAY['Expensive at scale', 'Limited customization', 'Vendor lock-in', 'Performance limitations'], + true, + ARRAY['Startups', 'Web Applications', 'Prototyping', 'API Development', 'MVPs']), + +('Platform.sh', 'platformsh', 'paas', 4, 99.9, true, false, true, + ARRAY['Multi-service architecture', 'Git-driven deployment', 'Environment cloning'], + ARRAY['ISO 27001', 'GDPR compliant'], + ARRAY['Enterprise applications', 'E-commerce', 'Content management', 'Multi-environment development'], + ARRAY['Git-driven workflow', 'Environment management', 'Enterprise focus', 'Multi-service support'], + ARRAY['Complex configuration', 'Learning curve', 'Pricing', 'Limited free tier'], + false, + ARRAY['Enterprise', 'E-commerce', 'Content Management', 'Multi-service Applications', 'Development Teams']), + +('OpenShift', 'redhat', 'paas', 12, 99.95, true, false, true, + ARRAY['Kubernetes', 'DevOps tools', 'Monitoring', 'Security'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Container applications', 'Enterprise development', 'Microservices', 'DevOps'], + ARRAY['Kubernetes native', 'Enterprise grade', 'Security focus', 'Red Hat ecosystem'], + ARRAY['Complexity', 'Cost', 'Learning curve', 'Resource intensive'], + false, + ARRAY['Enterprise', 'Container Applications', 'Microservices', 'DevOps', 'Government']), + +('Cloud Foundry', 'pivotal', 'paas', 8, 99.9, true, false, true, + ARRAY['Buildpacks', 'Services marketplace', 'Multi-cloud'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Enterprise applications', 'Multi-cloud deployment', 'Legacy modernization'], + ARRAY['Multi-cloud', 'Enterprise ready', 'Standardization', 'Open source'], + ARRAY['Complexity', 'Learning curve', 'Market position', 'Limited innovation'], + false, + ARRAY['Enterprise', 'Legacy Modernization', 'Multi-cloud', 'Financial Services', 'Government']), + +('Engine Yard', 'engineyard', 'paas', 3, 99.9, true, false, true, + ARRAY['Ruby on Rails', 'PHP', 'Node.js', 'Database management'], + ARRAY['SOC 2', 'PCI DSS'], + ARRAY['Ruby applications', 'PHP applications', 'Legacy applications'], + ARRAY['Ruby expertise', 'Managed services', 'Performance optimization', 'Support'], + ARRAY['Limited languages', 'Market position', 'Pricing', 'Innovation pace'], + false, + ARRAY['Ruby Applications', 'PHP Applications', 'Legacy Systems', 'E-commerce', 'Enterprise']), + +-- Serverless Platforms +('AWS Lambda', 'amazon', 'faas', 25, 99.999, true, true, false, + ARRAY['Event triggers', 'API Gateway integration', 'Step Functions'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['Event processing', 'API backends', 'Data processing', 'Automation'], + ARRAY['Mature platform', 'Rich ecosystem', 'Event sources', 'Cost effective'], + ARRAY['Cold starts', 'Vendor lock-in', 'Debugging complexity', 'Time limits'], + true, + ARRAY['Event Processing', 'API Backends', 'Data Processing', 'Automation', 'Real-time Applications']), + +('Cloudflare Workers', 'cloudflare', 'faas', 200, 99.99, true, true, false, + ARRAY['Edge computing', 'KV storage', 'Durable Objects'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Edge computing', 'API optimization', 'Content modification', 'Security'], + ARRAY['Edge performance', 'No cold starts', 'Global distribution', 'WebAssembly support'], + ARRAY['Limited runtime', 'V8 isolates only', 'Pricing model', 'Debugging tools'], + true, + ARRAY['Edge Computing', 'API Optimization', 'Content Delivery', 'Security', 'Performance']), + +('Google Cloud Functions', 'google', 'faas', 24, 99.99, true, true, false, + ARRAY['HTTP triggers', 'Cloud Storage triggers', 'Pub/Sub integration'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Event processing', 'Data processing', 'Webhooks', 'API backends'], + ARRAY['GCP integration', 'Auto-scaling', 'Pay per use', 'Multi-language support'], + ARRAY['Cold starts', 'Limited execution time', 'Regional availability', 'Debugging complexity'], + true, + ARRAY['Event Processing', 'Data Processing', 'Webhooks', 'API Backends', 'Integration Services']), + +('Azure Functions', 'microsoft', 'faas', 60, 99.99, true, true, false, + ARRAY['Timer triggers', 'HTTP triggers', 'Logic Apps integration'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Event processing', 'Automation', 'API backends', 'Integration'], + ARRAY['Azure integration', 'Multiple languages', 'Flexible hosting', 'Visual Studio integration'], + ARRAY['Cold starts', 'Complexity', 'Performance variability', 'Pricing complexity'], + true, + ARRAY['Event Processing', 'Automation', 'API Backends', 'Integration', 'Enterprise Applications']), + +-- Container Platforms +('Docker Hub', 'docker', 'container', 1, 99.9, false, false, true, + ARRAY['Container registry', 'Automated builds', 'Webhooks'], + ARRAY['SOC 2'], + ARRAY['Container distribution', 'Image hosting', 'CI/CD integration'], + ARRAY['Industry standard', 'Large community', 'Easy integration', 'Automated builds'], + ARRAY['Rate limiting', 'Storage costs', 'Security concerns', 'Limited enterprise features'], + true, + ARRAY['Container Distribution', 'Development', 'CI/CD', 'Open Source', 'Microservices']), + +('Amazon ECS', 'amazon', 'container', 25, 99.999, true, false, true, + ARRAY['Task definitions', 'Service discovery', 'Load balancing'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Container orchestration', 'Microservices', 'Batch processing'], + ARRAY['AWS integration', 'Managed service', 'Security', 'Performance'], + ARRAY['AWS lock-in', 'Learning curve', 'Less flexible than Kubernetes', 'Complexity'], + true, + ARRAY['Container Orchestration', 'Microservices', 'Batch Processing', 'Enterprise', 'Web Applications']), + +('Amazon EKS', 'amazon', 'container', 25, 99.999, true, false, true, + ARRAY['Managed Kubernetes', 'Auto-scaling', 'Security'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Kubernetes applications', 'Microservices', 'ML workloads'], + ARRAY['Managed Kubernetes', 'AWS integration', 'Security', 'Scalability'], + ARRAY['Cost', 'Complexity', 'Learning curve', 'Management overhead'], + true, + ARRAY['Kubernetes Applications', 'Microservices', 'Machine Learning', 'Enterprise', 'DevOps']), + +('Google Kubernetes Engine', 'google', 'container', 24, 99.999, true, false, true, + ARRAY['Autopilot mode', 'Workload Identity', 'Binary Authorization'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Container orchestration', 'Microservices', 'CI/CD'], + ARRAY['Kubernetes origin', 'Autopilot simplicity', 'Google infrastructure', 'Innovation'], + ARRAY['GCP lock-in', 'Cost optimization', 'Learning curve', 'Complexity'], + true, + ARRAY['Container Orchestration', 'Microservices', 'CI/CD', 'Machine Learning', 'DevOps']), + +('Azure Container Instances', 'microsoft', 'container', 60, 99.9, true, false, true, + ARRAY['Serverless containers', 'Virtual network integration', 'Persistent storage'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Serverless containers', 'Burst scaling', 'Batch jobs'], + ARRAY['Serverless model', 'Fast startup', 'No orchestration needed', 'Pay per second'], + ARRAY['Limited orchestration', 'Networking complexity', 'Storage limitations', 'Regional availability'], + true, + ARRAY['Serverless Containers', 'Burst Scaling', 'Batch Processing', 'Development', 'Testing']), + +-- Database as a Service +('MongoDB Atlas', 'mongodb', 'dbaas', 95, 99.995, true, false, false, + ARRAY['Global clusters', 'Full-text search', 'Data Lake', 'Charts'], + ARRAY['SOC 2', 'ISO 27001', 'HIPAA', 'PCI DSS'], + ARRAY['Document databases', 'Content management', 'Real-time analytics', 'Mobile applications'], + ARRAY['Global distribution', 'Developer friendly', 'Rich querying', 'Managed service'], + ARRAY['Cost at scale', 'Learning curve', 'Memory usage', 'Complex aggregations'], + true, + ARRAY['Content Management', 'Real-time Analytics', 'Mobile Applications', 'IoT', 'E-commerce']), + +('Amazon RDS', 'amazon', 'dbaas', 25, 99.99, true, false, false, + ARRAY['Multi-AZ deployment', 'Read replicas', 'Automated backups'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['Relational databases', 'Web applications', 'Enterprise applications'], + ARRAY['Multiple engines', 'Managed service', 'High availability', 'Security'], + ARRAY['Cost', 'Less control', 'Performance tuning limitations', 'Regional restrictions'], + true, + ARRAY['Web Applications', 'Enterprise Applications', 'E-commerce', 'Data Warehousing', 'Analytics']), + +('Google Cloud SQL', 'google', 'dbaas', 24, 99.95, true, false, false, + ARRAY['High availability', 'Read replicas', 'Point-in-time recovery'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Relational databases', 'Web applications', 'Mobile backends'], + ARRAY['GCP integration', 'Performance insights', 'Automatic storage increase', 'Security'], + ARRAY['GCP lock-in', 'Limited customization', 'Cost', 'Regional limitations'], + true, + ARRAY['Web Applications', 'Mobile Backends', 'Analytics', 'Enterprise Applications', 'Development']), + +('Azure SQL Database', 'microsoft', 'dbaas', 60, 99.99, true, false, false, + ARRAY['Elastic pools', 'Intelligent performance', 'Threat detection'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['SQL Server applications', 'Enterprise applications', 'Data warehousing'], + ARRAY['SQL Server compatibility', 'Intelligent features', 'Elastic scaling', 'Security'], + ARRAY['SQL Server focus', 'Cost complexity', 'Feature limitations', 'Learning curve'], + true, + ARRAY['SQL Server Applications', 'Enterprise Applications', 'Data Warehousing', 'Analytics', 'Migration']), + +('PlanetScale', 'planetscale', 'dbaas', 3, 99.99, true, false, false, + ARRAY['Branching', 'Schema management', 'Connection pooling'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['MySQL applications', 'Schema migrations', 'Development workflows'], + ARRAY['Database branching', 'Schema safety', 'Developer experience', 'Serverless scaling'], + ARRAY['MySQL only', 'Newer platform', 'Limited regions', 'Learning curve'], + true, + ARRAY['MySQL Applications', 'Schema Management', 'Development Workflows', 'Startups', 'SaaS']), + +('Supabase', 'supabase', 'dbaas', 8, 99.9, true, false, false, + ARRAY['Real-time subscriptions', 'Authentication', 'Storage', 'Edge Functions'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['PostgreSQL applications', 'Real-time applications', 'Full-stack development'], + ARRAY['Open source', 'Real-time features', 'Developer experience', 'PostgreSQL power'], + ARRAY['Newer platform', 'Limited enterprise features', 'Growing ecosystem', 'Documentation gaps'], + true, + ARRAY['PostgreSQL Applications', 'Real-time Applications', 'Full-stack Development', 'Startups', 'Modern Web Apps']), + +('CockroachDB', 'cockroachlabs', 'dbaas', 12, 99.99, true, false, false, + ARRAY['Distributed SQL', 'Multi-region', 'ACID transactions'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Distributed applications', 'Global applications', 'Financial services'], + ARRAY['Global consistency', 'Horizontal scaling', 'SQL compatibility', 'Resilience'], + ARRAY['Complexity', 'Cost', 'Learning curve', 'Limited ecosystem'], + true, + ARRAY['Distributed Applications', 'Global Applications', 'Financial Services', 'Gaming', 'IoT']), + +-- CDN and Edge Services +('Cloudflare', 'cloudflare', 'cdn', 200, 99.99, true, true, false, + ARRAY['DDoS protection', 'WAF', 'Workers', 'R2 Storage'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Content delivery', 'Security', 'Performance optimization', 'Edge computing'], + ARRAY['Global network', 'Security features', 'Performance', 'Developer tools'], + ARRAY['Complexity', 'Debugging edge functions', 'Pricing tiers', 'Learning curve'], + true, + ARRAY['Content Delivery', 'Security', 'Performance Optimization', 'Edge Computing', 'DDoS Protection']), + +('Amazon CloudFront', 'amazon', 'cdn', 410, 99.99, true, true, false, + ARRAY['Lambda@Edge', 'Shield DDoS protection', 'Origin Shield'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Content delivery', 'Video streaming', 'API acceleration'], + ARRAY['AWS integration', 'Global reach', 'Edge computing', 'Security'], + ARRAY['AWS lock-in', 'Complexity', 'Cost optimization', 'Configuration complexity'], + true, + ARRAY['Content Delivery', 'Video Streaming', 'API Acceleration', 'Static Websites', 'Enterprise']), + +('Azure CDN', 'microsoft', 'cdn', 130, 99.9, true, false, false, + ARRAY['Rules engine', 'Real-time analytics', 'Purge API'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Content delivery', 'Media streaming', 'Web acceleration'], + ARRAY['Azure integration', 'Multiple providers', 'Analytics', 'Security'], + ARRAY['Azure lock-in', 'Feature differences', 'Complexity', 'Performance variability'], + true, + ARRAY['Content Delivery', 'Media Streaming', 'Web Acceleration', 'Enterprise', 'Gaming']), + +('KeyCDN', 'keycdn', 'cdn', 10, 99.9, true, false, false, + ARRAY['Real-time analytics', 'Image processing', 'Origin Shield'], + ARRAY['ISO 27001'], + ARRAY['Content delivery', 'Image optimization', 'Video streaming'], + ARRAY['Affordable pricing', 'Simple setup', 'Good performance', 'Customer support'], + ARRAY['Limited features', 'Smaller network', 'Less advanced security', 'Limited enterprise features'], + false, + ARRAY['Content Delivery', 'Image Optimization', 'Video Streaming', 'Small Business', 'Startups']), + +-- AI/ML Platforms +('Hugging Face', 'huggingface', 'aiml', 1, 99.9, true, false, true, + ARRAY['Model hosting', 'Inference API', 'Datasets', 'Spaces'], + ARRAY['SOC 2'], + ARRAY['Machine learning', 'Natural language processing', 'Model deployment'], + ARRAY['Open source community', 'Pre-trained models', 'Easy deployment', 'Collaboration'], + ARRAY['Limited enterprise features', 'Performance scaling', 'Cost at scale', 'Model licensing'], + true, + ARRAY['Machine Learning', 'Natural Language Processing', 'Computer Vision', 'Research', 'Startups']), + +('Replicate', 'replicate', 'aiml', 1, 99.9, true, false, true, + ARRAY['Model hosting', 'API access', 'Custom training'], + ARRAY['SOC 2'], + ARRAY['Machine learning inference', 'Image generation', 'Text processing'], + ARRAY['Easy deployment', 'Pay per use', 'Version control', 'API simplicity'], + ARRAY['Limited customization', 'Model availability', 'Cost predictability', 'Enterprise features'], + false, + ARRAY['Machine Learning Inference', 'Image Generation', 'Text Processing', 'Prototyping', 'Creative Applications']), + +('OpenAI API', 'openai', 'aiml', 1, 99.9, true, true, false, + ARRAY['GPT models', 'DALL-E', 'Whisper', 'Embeddings'], + ARRAY['SOC 2'], + ARRAY['Natural language processing', 'Text generation', 'Image generation', 'Audio processing'], + ARRAY['State-of-the-art models', 'Easy integration', 'Comprehensive APIs', 'Documentation'], + ARRAY['Cost', 'Rate limits', 'Model updates', 'Data privacy concerns'], + false, + ARRAY['Natural Language Processing', 'Text Generation', 'Image Generation', 'Chatbots', 'Content Creation']), + +-- Storage Services +('Backblaze B2', 'backblaze', 'storage', 1, 99.9, false, false, false, + ARRAY['S3-compatible API', 'Lifecycle policies', 'Object versioning'], + ARRAY['SOC 2'], + ARRAY['Backup storage', 'Archive storage', 'Content distribution'], + ARRAY['Low cost', 'Simple pricing', 'S3 compatibility', 'Good performance'], + ARRAY['Limited features', 'Single region', 'Less enterprise support', 'Smaller ecosystem'], + false, + ARRAY['Backup Storage', 'Archive Storage', 'Content Distribution', 'Cost-sensitive Workloads', 'SMB']), + +('Wasabi', 'wasabi', 'storage', 6, 99.9, false, false, false, + ARRAY['S3-compatible API', 'Immutable storage', 'Object versioning'], + ARRAY['SOC 2'], + ARRAY['Cloud storage', 'Backup', 'Archive', 'Content distribution'], + ARRAY['Predictable pricing', 'No egress fees', 'S3 compatibility', 'Performance'], + ARRAY['Limited regions', 'Minimum storage period', 'Less features', 'Enterprise limitations'], + false, + ARRAY['Cloud Storage', 'Backup', 'Archive', 'Media Storage', 'Data Migration']), + +-- Specialized Platforms +('Shopify', 'shopify', 'ecommerce', 6, 99.99, true, false, false, + ARRAY['Payment processing', 'Inventory management', 'Theme store', 'App ecosystem'], + ARRAY['PCI DSS', 'SOC 2'], + ARRAY['E-commerce', 'Online stores', 'Drop shipping', 'Point of sale'], + ARRAY['E-commerce focused', 'Easy setup', 'App ecosystem', 'Payment integration'], + ARRAY['Transaction fees', 'Customization limits', 'Vendor lock-in', 'Advanced features cost'], + true, + ARRAY['E-commerce', 'Online Stores', 'Drop Shipping', 'Retail', 'Small Business']), + +('Stripe', 'stripe', 'payments', 42, 99.99, true, true, false, + ARRAY['Payment processing', 'Subscriptions', 'Connect', 'Radar fraud detection'], + ARRAY['PCI DSS', 'SOC 2'], + ARRAY['Payment processing', 'Subscription billing', 'Marketplace payments', 'Financial services'], + ARRAY['Developer friendly', 'Global reach', 'Feature rich', 'Documentation'], + ARRAY['Transaction fees', 'Complexity', 'Account restrictions', 'Support response'], + false, + ARRAY['Payment Processing', 'Subscription Billing', 'Marketplace Payments', 'E-commerce', 'Fintech']), + +('Twilio', 'twilio', 'communications', 1, 99.95, true, true, false, + ARRAY['Programmable Voice', 'SMS', 'WhatsApp API', 'Video'], + ARRAY['SOC 2', 'HIPAA', 'PCI DSS'], + ARRAY['Communications', 'SMS/Voice', 'Customer engagement', 'Notifications'], + ARRAY['Comprehensive APIs', 'Global reach', 'Developer tools', 'Scalability'], + ARRAY['Cost', 'Complexity', 'Compliance challenges', 'Account management'], + false, + ARRAY['Communications', 'Customer Engagement', 'Notifications', 'Call Centers', 'Healthcare']), + +('SendGrid', 'twilio', 'communications', 1, 99.9, true, false, false, + ARRAY['Email API', 'Marketing campaigns', 'Analytics', 'Templates'], + ARRAY['SOC 2', 'HIPAA'], + ARRAY['Transactional email', 'Email marketing', 'Notifications'], + ARRAY['Reliable delivery', 'Analytics', 'Template system', 'API simplicity'], + ARRAY['Cost at scale', 'Deliverability issues', 'Limited customization', 'Account restrictions'], + true, + ARRAY['Transactional Email', 'Email Marketing', 'Notifications', 'SaaS Applications', 'E-commerce']), + +('Auth0', 'okta', 'identity', 35, 99.99, true, false, false, + ARRAY['Universal Login', 'Social connections', 'MFA', 'Rules engine'], + ARRAY['SOC 2', 'ISO 27001', 'HIPAA'], + ARRAY['Authentication', 'Identity management', 'Single sign-on', 'User management'], + ARRAY['Developer friendly', 'Extensive integrations', 'Scalable', 'Security features'], + ARRAY['Cost', 'Complexity', 'Lock-in risk', 'Learning curve'], + true, + ARRAY['Authentication', 'Identity Management', 'Single Sign-On', 'B2B SaaS', 'Enterprise']), + +('Firebase', 'google', 'baas', 1, 99.95, true, true, false, + ARRAY['Realtime Database', 'Authentication', 'Cloud Functions', 'Hosting'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Mobile applications', 'Web applications', 'Real-time features'], + ARRAY['Rapid development', 'Real-time sync', 'Google integration', 'Easy scaling'], + ARRAY['Google lock-in', 'Cost at scale', 'Limited backend control', 'NoSQL limitations'], + true, + ARRAY['Mobile Applications', 'Web Applications', 'Real-time Features', 'Startups', 'Prototyping']), + +('Contentful', 'contentful', 'cms', 6, 99.9, true, false, false, + ARRAY['Content API', 'Media management', 'Webhooks', 'Multi-language support'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Content management', 'Headless CMS', 'Multi-channel publishing'], + ARRAY['Developer friendly', 'API-first', 'Scalable', 'Multi-platform'], + ARRAY['Cost', 'Learning curve', 'Limited free tier', 'Complex pricing'], + true, + ARRAY['Content Management', 'Headless CMS', 'Multi-channel Publishing', 'E-commerce', 'Marketing']), + +('Sanity', 'sanity', 'cms', 5, 99.9, true, false, false, + ARRAY['Real-time editing', 'GROQ query language', 'Asset management', 'Webhooks'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['Content management', 'Structured content', 'Collaborative editing'], + ARRAY['Real-time collaboration', 'Flexible schema', 'Developer experience', 'Customizable'], + ARRAY['Learning curve', 'Limited templates', 'Query language complexity', 'Cost scaling'], + true, + ARRAY['Content Management', 'Structured Content', 'Collaborative Editing', 'Media', 'Publishing']), + +('Strapi', 'strapi', 'cms', 3, 99.9, true, false, true, + ARRAY['Admin panel', 'Content API', 'Plugin system', 'Role-based access'], + ARRAY['GDPR compliant'], + ARRAY['Headless CMS', 'API development', 'Content management'], + ARRAY['Open source', 'Customizable', 'Self-hosted option', 'Developer friendly'], + ARRAY['Self-hosting complexity', 'Limited cloud features', 'Scaling challenges', 'Enterprise limitations'], + true, + ARRAY['Headless CMS', 'API Development', 'Content Management', 'Startups', 'Small Teams']), + +-- Analytics and Monitoring +('New Relic', 'newrelic', 'monitoring', 16, 99.99, true, false, false, + ARRAY['APM', 'Infrastructure monitoring', 'Browser monitoring', 'Synthetics'], + ARRAY['SOC 2', 'FedRAMP'], + ARRAY['Application monitoring', 'Performance monitoring', 'Error tracking'], + ARRAY['Comprehensive monitoring', 'Real-time insights', 'AI-powered analysis', 'Integrations'], + ARRAY['Cost', 'Complexity', 'Data retention limits', 'Learning curve'], + true, + ARRAY['Application Monitoring', 'Performance Monitoring', 'DevOps', 'Enterprise', 'E-commerce']), + +('Datadog', 'datadog', 'monitoring', 19, 99.9, true, false, false, + ARRAY['Infrastructure monitoring', 'APM', 'Log management', 'Security monitoring'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Infrastructure monitoring', 'Application monitoring', 'Security monitoring'], + ARRAY['Unified platform', 'Rich visualizations', 'Machine learning', 'Integrations'], + ARRAY['Cost', 'Data volume pricing', 'Complexity', 'Alert fatigue'], + true, + ARRAY['Infrastructure Monitoring', 'Application Monitoring', 'Security Monitoring', 'DevOps', 'Enterprise']), + +('Sentry', 'sentry', 'monitoring', 10, 99.9, true, false, false, + ARRAY['Error tracking', 'Performance monitoring', 'Release tracking', 'Alerts'], + ARRAY['SOC 2'], + ARRAY['Error tracking', 'Performance monitoring', 'Debugging'], + ARRAY['Developer focused', 'Real-time alerts', 'Context-rich errors', 'Integrations'], + ARRAY['Cost at scale', 'Limited infrastructure monitoring', 'Alert noise', 'Data retention'], + true, + ARRAY['Error Tracking', 'Performance Monitoring', 'Debugging', 'Development Teams', 'SaaS']), + +('LogRocket', 'logrocket', 'monitoring', 4, 99.9, true, false, false, + ARRAY['Session replay', 'Performance monitoring', 'Error tracking', 'User analytics'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['Frontend monitoring', 'User experience', 'Bug reproduction'], + ARRAY['Session replay', 'User context', 'Performance insights', 'Easy integration'], + ARRAY['Privacy concerns', 'Data storage', 'Cost', 'Mobile limitations'], + true, + ARRAY['Frontend Monitoring', 'User Experience', 'Bug Reproduction', 'E-commerce', 'SaaS']), + +('Mixpanel', 'mixpanel', 'analytics', 5, 99.9, true, false, false, + ARRAY['Event tracking', 'Funnel analysis', 'Cohort analysis', 'A/B testing'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Product analytics', 'User behavior analysis', 'Growth tracking'], + ARRAY['Event-based tracking', 'Real-time analytics', 'Behavioral insights', 'Segmentation'], + ARRAY['Implementation complexity', 'Cost', 'Learning curve', 'Data modeling'], + true, + ARRAY['Product Analytics', 'User Behavior Analysis', 'Growth Tracking', 'Mobile Apps', 'SaaS']), + +('Amplitude', 'amplitude', 'analytics', 3, 99.9, true, false, false, + ARRAY['Behavioral cohorts', 'Pathfinder', 'Retention analysis', 'Revenue analytics'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Product analytics', 'User journey analysis', 'Growth optimization'], + ARRAY['Advanced analytics', 'Machine learning insights', 'Collaboration features', 'Data governance'], + ARRAY['Cost', 'Complexity', 'Learning curve', 'Integration challenges'], + true, + ARRAY['Product Analytics', 'User Journey Analysis', 'Growth Optimization', 'Enterprise', 'Mobile']), + +-- CI/CD and DevOps +('GitHub Actions', 'github', 'cicd', 1, 99.9, true, false, true, + ARRAY['Workflow automation', 'Matrix builds', 'Secrets management', 'Marketplace'], + ARRAY['SOC 2'], + ARRAY['CI/CD', 'Automation', 'Testing', 'Deployment'], + ARRAY['GitHub integration', 'Free for public repos', 'Marketplace ecosystem', 'Easy setup'], + ARRAY['Cost for private repos', 'Vendor lock-in', 'Limited enterprise features', 'Queue times'], + true, + ARRAY['CI/CD', 'Automation', 'Testing', 'Open Source', 'Development Teams']), + +('GitLab CI/CD', 'gitlab', 'cicd', 1, 99.95, true, false, true, + ARRAY['Auto DevOps', 'Review apps', 'Container registry', 'Security scanning'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['CI/CD', 'DevSecOps', 'Container deployment', 'Security scanning'], + ARRAY['Integrated platform', 'DevSecOps focus', 'Self-hosted option', 'Comprehensive features'], + ARRAY['Complexity', 'Resource intensive', 'Learning curve', 'Cost'], + true, + ARRAY['CI/CD', 'DevSecOps', 'Container Deployment', 'Enterprise', 'Security-focused']), + +('CircleCI', 'circleci', 'cicd', 1, 99.9, true, false, true, + ARRAY['Parallelism', 'Docker support', 'Orbs', 'Insights'], + ARRAY['SOC 2', 'FedRAMP'], + ARRAY['CI/CD', 'Testing', 'Deployment automation', 'Mobile development'], + ARRAY['Fast builds', 'Docker-first', 'Orbs ecosystem', 'Parallelization'], + ARRAY['Cost', 'Credit system', 'Learning curve', 'Limited free tier'], + true, + ARRAY['CI/CD', 'Testing', 'Deployment Automation', 'Mobile Development', 'Docker']), + +('Jenkins', 'jenkins', 'cicd', 1, 99.9, false, false, true, + ARRAY['Plugin ecosystem', 'Pipeline as code', 'Distributed builds'], + ARRAY['Open source'], + ARRAY['CI/CD', 'Build automation', 'Testing', 'Legacy systems'], + ARRAY['Open source', 'Highly customizable', 'Large plugin ecosystem', 'Self-hosted'], + ARRAY['Maintenance overhead', 'Security management', 'UI/UX', 'Configuration complexity'], + true, + ARRAY['CI/CD', 'Build Automation', 'Testing', 'Legacy Systems', 'On-premise']), + +('TeamCity', 'jetbrains', 'cicd', 1, 99.9, true, false, true, + ARRAY['Build chains', 'Test reporting', 'Code quality gates', 'Docker support'], + ARRAY['ISO 27001'], + ARRAY['CI/CD', 'Testing', 'Code quality', 'Enterprise builds'], + ARRAY['JetBrains integration', 'Build chains', 'Test reporting', 'Enterprise features'], + ARRAY['Cost', 'JetBrains ecosystem focus', 'Complexity', 'Resource usage'], + true, + ARRAY['CI/CD', 'Testing', 'Code Quality', 'Enterprise', 'JetBrains Ecosystem']), + +-- Security Services +('Okta', 'okta', 'identity', 19, 99.99, true, false, false, + ARRAY['Single sign-on', 'Multi-factor auth', 'Lifecycle management', 'API access management'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Identity management', 'Access control', 'SSO', 'Compliance'], + ARRAY['Enterprise focus', 'Comprehensive features', 'Integrations', 'Scalability'], + ARRAY['Cost', 'Complexity', 'Learning curve', 'Over-engineering for SMBs'], + false, + ARRAY['Identity Management', 'Access Control', 'SSO', 'Enterprise', 'Compliance']), + +('Vault', 'hashicorp', 'security', 6, 99.95, true, false, true, + ARRAY['Secret management', 'Dynamic secrets', 'Encryption as a service', 'PKI'], + ARRAY['SOC 2', 'FedRAMP'], + ARRAY['Secret management', 'Key management', 'Certificate management'], + ARRAY['Open source', 'Dynamic secrets', 'Multi-cloud', 'Enterprise grade'], + ARRAY['Complexity', 'Learning curve', 'Operational overhead', 'High availability setup'], + true, + ARRAY['Secret Management', 'Key Management', 'Certificate Management', 'DevOps', 'Enterprise']), + +('1Password', '1password', 'security', 14, 99.9, false, false, false, + ARRAY['Secret management', 'Team sharing', 'CLI integration', 'Audit logs'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Password management', 'Secret sharing', 'Team collaboration'], + ARRAY['User friendly', 'Team features', 'Security focus', 'Cross-platform'], + ARRAY['Limited enterprise features', 'Cost scaling', 'API limitations', 'Integration complexity'], + true, + ARRAY['Password Management', 'Secret Sharing', 'Team Collaboration', 'Small Teams', 'Security']), + +-- Development Tools +('Linear', 'linear', 'project-management', 1, 99.9, true, false, false, + ARRAY['Issue tracking', 'Project planning', 'Git integration', 'API'], + ARRAY['SOC 2', 'GDPR compliant'], + ARRAY['Project management', 'Issue tracking', 'Team collaboration'], + ARRAY['Fast performance', 'Clean interface', 'Git integration', 'API-first'], + ARRAY['Limited customization', 'Newer platform', 'Feature gaps', 'Cost'], + true, + ARRAY['Project Management', 'Issue Tracking', 'Team Collaboration', 'Software Development', 'Startups']), + +('Notion', 'notion', 'productivity', 1, 99.9, false, false, false, + ARRAY['Databases', 'Templates', 'Collaboration', 'API'], + ARRAY['SOC 2'], + ARRAY['Documentation', 'Knowledge management', 'Project planning', 'Team collaboration'], + ARRAY['Flexible structure', 'All-in-one platform', 'Collaboration features', 'Template ecosystem'], + ARRAY['Performance at scale', 'Learning curve', 'Limited offline', 'Complex permissions'], + true, + ARRAY['Documentation', 'Knowledge Management', 'Project Planning', 'Team Collaboration', 'Startups']), + +('Figma', 'figma', 'design', 1, 99.9, false, false, false, + ARRAY['Real-time collaboration', 'Component systems', 'Prototyping', 'Developer handoff'], + ARRAY['SOC 2'], + ARRAY['UI/UX design', 'Prototyping', 'Design systems', 'Collaboration'], + ARRAY['Browser-based', 'Real-time collaboration', 'Component systems', 'Developer tools'], + ARRAY['Performance with large files', 'Internet dependency', 'Limited offline', 'Feature complexity'], + true, + ARRAY['UI/UX Design', 'Prototyping', 'Design Systems', 'Team Collaboration', 'Product Design']), + +('Miro', 'miro', 'collaboration', 3, 99.9, false, false, false, + ARRAY['Infinite canvas', 'Templates', 'Video chat', 'Integrations'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Visual collaboration', 'Brainstorming', 'Workshops', 'Planning'], + ARRAY['Infinite canvas', 'Template library', 'Collaboration features', 'Integrations'], + ARRAY['Performance at scale', 'Cost', 'Learning curve', 'Mobile limitations'], + true, + ARRAY['Visual Collaboration', 'Brainstorming', 'Workshops', 'Remote Teams', 'Design Thinking']), + +-- Backup and Disaster Recovery +('Veeam', 'veeam', 'backup', 1, 99.99, false, false, false, + ARRAY['VM backup', 'Cloud backup', 'Replication', 'Recovery orchestration'], + ARRAY['ISO 27001'], + ARRAY['Backup', 'Disaster recovery', 'Data protection', 'VM management'], + ARRAY['VM expertise', 'Enterprise features', 'Recovery capabilities', 'Hybrid support'], + ARRAY['Cost', 'Complexity', 'Learning curve', 'Resource intensive'], + false, + ARRAY['Backup', 'Disaster Recovery', 'VM Management', 'Enterprise', 'Data Protection']), + +('Acronis', 'acronis', 'backup', 1, 99.9, false, false, false, + ARRAY['Cyber backup', 'Anti-malware', 'Blockchain notarization', 'Universal restore'], + ARRAY['ISO 27001'], + ARRAY['Backup', 'Cyber protection', 'Disaster recovery', 'Endpoint protection'], + ARRAY['Cyber protection', 'Easy deployment', 'Universal restore', 'Comprehensive solution'], + ARRAY['Cost', 'Resource usage', 'Complexity', 'Performance impact'], + false, + ARRAY['Backup', 'Cyber Protection', 'Disaster Recovery', 'Endpoint Protection', 'SMB']), + +-- Low-code/No-code Platforms +('Bubble', 'bubble', 'no-code', 1, 99.9, true, false, false, + ARRAY['Visual programming', 'Database', 'Workflows', 'Plugin ecosystem'], + ARRAY['SOC 2'], + ARRAY['Web application development', 'MVP creation', 'No-code development'], + ARRAY['No coding required', 'Full-stack capabilities', 'Community', 'Plugin ecosystem'], + ARRAY['Performance limitations', 'Scaling challenges', 'Learning curve', 'Customization limits'], + true, + ARRAY['Web Application Development', 'MVP Creation', 'No-code Development', 'Startups', 'Prototyping']), + +('Webflow', 'webflow', 'no-code', 1, 99.9, true, false, false, + ARRAY['Visual CSS', 'CMS', 'E-commerce', 'Hosting'], + ARRAY['SOC 2'], + ARRAY['Website development', 'Landing pages', 'E-commerce', 'Marketing sites'], + ARRAY['Design control', 'No coding needed', 'SEO friendly', 'Hosting included'], + ARRAY['Learning curve', 'Cost', 'Limited backend', 'E-commerce limitations'], + true, + ARRAY['Website Development', 'Landing Pages', 'E-commerce', 'Marketing Sites', 'Design Agencies']), + +('Zapier', 'zapier', 'automation', 1, 99.9, true, false, false, + ARRAY['App integrations', 'Multi-step workflows', 'Webhooks', 'Code steps'], + ARRAY['SOC 2'], + ARRAY['Workflow automation', 'App integration', 'Business process automation'], + ARRAY['Easy setup', 'Extensive integrations', 'No coding required', 'Scalable workflows'], + ARRAY['Cost at scale', 'Complexity limits', 'Debugging difficulty', 'Vendor dependency'], + true, + ARRAY['Workflow Automation', 'App Integration', 'Business Process Automation', 'Productivity', 'SMB']), + +-- Video and Streaming +('Vimeo', 'vimeo', 'video', 1, 99.9, false, false, false, + ARRAY['Video hosting', 'Live streaming', 'Video analytics', 'Custom players'], + ARRAY['SOC 2'], + ARRAY['Video hosting', 'Live streaming', 'Video marketing', 'Corporate communications'], + ARRAY['High quality', 'Professional features', 'No ads', 'Customization'], + ARRAY['Cost', 'Storage limits', 'Limited social features', 'Smaller audience'], + true, + ARRAY['Video Hosting', 'Live Streaming', 'Video Marketing', 'Corporate Communications', 'Creative Industry']), + +('Wistia', 'wistia', 'video', 1, 99.9, false, false, false, + ARRAY['Video hosting', 'Video analytics', 'Lead generation', 'Customizable players'], + ARRAY['SOC 2'], + ARRAY['Business video hosting', 'Video marketing', 'Lead generation', 'Training videos'], + ARRAY['Business focus', 'Analytics', 'Lead generation', 'Customization'], + ARRAY['Cost', 'Limited free tier', 'Feature complexity', 'Learning curve'], + true, + ARRAY['Business Video Hosting', 'Video Marketing', 'Lead Generation', 'Training Videos', 'B2B']), + +('Mux', 'mux', 'video', 1, 99.99, true, false, false, + ARRAY['Video API', 'Live streaming', 'Video analytics', 'Adaptive bitrate'], + ARRAY['SOC 2'], + ARRAY['Video infrastructure', 'Live streaming', 'Video analytics', 'Developer tools'], + ARRAY['Developer focused', 'Scalable infrastructure', 'Analytics', 'Global delivery'], + ARRAY['Technical complexity', 'Cost', 'Developer required', 'Limited UI tools'], + false, + ARRAY['Video Infrastructure', 'Live Streaming', 'Developer Tools', 'Media Companies', 'SaaS Platforms']), + +-- IoT and Edge Computing +('AWS IoT Core', 'amazon', 'iot', 25, 99.99, true, false, false, + ARRAY['Device management', 'Message routing', 'Device shadows', 'Greengrass'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['IoT applications', 'Device management', 'Data collection', 'Edge computing'], + ARRAY['Comprehensive platform', 'AWS integration', 'Scalability', 'Security'], + ARRAY['Complexity', 'Cost', 'AWS lock-in', 'Learning curve'], + true, + ARRAY['IoT Applications', 'Device Management', 'Industrial IoT', 'Smart Cities', 'Agriculture']), + +('ThingSpeak', 'mathworks', 'iot', 1, 99.9, false, false, false, + ARRAY['Data collection', 'Visualization', 'Analytics', 'MATLAB integration'], + ARRAY['SOC 2'], + ARRAY['IoT data collection', 'Sensor monitoring', 'Research projects', 'Prototyping'], + ARRAY['Easy setup', 'MATLAB integration', 'Free tier', 'Academic friendly'], + ARRAY['Limited scalability', 'Basic features', 'Performance', 'Enterprise limitations'], + true, + ARRAY['IoT Data Collection', 'Sensor Monitoring', 'Research Projects', 'Education', 'Prototyping']), + +-- Search and Discovery +('Algolia', 'algolia', 'search', 17, 99.99, true, false, false, + ARRAY['Search API', 'Analytics', 'A/B testing', 'Personalization'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Search functionality', 'E-commerce search', 'Content discovery', 'Mobile search'], + ARRAY['Fast search', 'Developer friendly', 'Typo tolerance', 'Analytics'], + ARRAY['Cost', 'Complexity', 'Vendor lock-in', 'Index size limits'], + true, + ARRAY['Search Functionality', 'E-commerce Search', 'Content Discovery', 'Mobile Applications', 'Media']), + +('Elasticsearch Service', 'elastic', 'search', 50, 99.9, true, false, true, + ARRAY['Full-text search', 'Log analytics', 'APM', 'Security'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Search', 'Log analytics', 'Observability', 'Security monitoring'], + ARRAY['Open source', 'Scalable', 'Real-time', 'Analytics capabilities'], + ARRAY['Complexity', 'Resource intensive', 'Management overhead', 'Cost'], + true, + ARRAY['Search', 'Log Analytics', 'Observability', 'Security Monitoring', 'Enterprise']), + +-- Game Development +('Unity Cloud Build', 'unity', 'game-dev', 1, 99.9, true, false, true, + ARRAY['Automated builds', 'Multi-platform', 'Version control integration', 'Distribution'], + ARRAY['ISO 27001'], + ARRAY['Game development', 'Mobile games', 'Multi-platform deployment'], + ARRAY['Unity integration', 'Multi-platform', 'Automated workflows', 'Asset management'], + ARRAY['Unity-specific', 'Cost', 'Learning curve', 'Limited customization'], + true, + ARRAY['Game Development', 'Mobile Games', 'Multi-platform Development', 'Indie Games', 'Studios']), + +('PlayFab', 'microsoft', 'game-dev', 6, 99.9, true, true, false, + ARRAY['Player management', 'Analytics', 'Multiplayer', 'LiveOps'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Game backend', 'Player analytics', 'Multiplayer games', 'Live operations'], + ARRAY['Game-focused', 'Scalable', 'Analytics', 'LiveOps tools'], + ARRAY['Gaming-specific', 'Complexity', 'Cost at scale', 'Learning curve'], + true, + ARRAY['Game Backend', 'Player Analytics', 'Multiplayer Games', 'Live Operations', 'Mobile Gaming']), + +-- Final entries to reach 200 +('Airtable', 'airtable', 'database', 1, 99.9, false, false, false, + ARRAY['Spreadsheet-database hybrid', 'Forms', 'Automations', 'Views'], + ARRAY['SOC 2'], + ARRAY['Database', 'Project management', 'Content management', 'CRM'], + ARRAY['User friendly', 'Flexible structure', 'Collaboration', 'No coding required'], + ARRAY['Performance limits', 'Cost scaling', 'Limited relational features', 'Mobile limitations'], + true, + ARRAY['Database', 'Project Management', 'Content Management', 'Small Teams', 'Non-technical Users']), + +('Retool', 'retool', 'low-code', 1, 99.9, false, false, false, + ARRAY['Drag-drop UI builder', 'Database connections', 'API integrations', 'Custom code'], + ARRAY['SOC 2'], + ARRAY['Internal tools', 'Admin panels', 'Dashboards', 'CRUD applications'], + ARRAY['Rapid development', 'Database integrations', 'Custom code support', 'Professional UI'], + ARRAY['Cost', 'Learning curve', 'Customization limits', 'Performance'], + true, + ARRAY['Internal Tools', 'Admin Panels', 'Dashboards', 'CRUD Applications', 'Operations Teams']), + +('Postman', 'postman', 'api-tools', 1, 99.9, false, false, false, + ARRAY['API testing', 'Documentation', 'Monitoring', 'Mock servers'], + ARRAY['SOC 2'], + ARRAY['API development', 'API testing', 'Team collaboration', 'Documentation'], + ARRAY['Industry standard', 'Comprehensive features', 'Team collaboration', 'Easy to use'], + ARRAY['Performance with large collections', 'Cost for teams', 'Learning curve for advanced features', 'Desktop dependency'], + true, + ARRAY['API Development', 'API Testing', 'Team Collaboration', 'Documentation', 'Developer Tools']), + +('Insomnia', 'kong', 'api-tools', 1, 99.9, false, false, false, + ARRAY['API testing', 'GraphQL support', 'Environment management', 'Code generation'], + ARRAY['SOC 2'], + ARRAY['API testing', 'GraphQL development', 'REST API development'], + ARRAY['Clean interface', 'GraphQL support', 'Open source', 'Plugin system'], + ARRAY['Smaller ecosystem', 'Limited team features', 'Less market adoption', 'Feature gaps'], + true, + ARRAY['API Testing', 'GraphQL Development', 'REST API Development', 'Individual Developers', 'Open Source']), + +('Prisma', 'prisma', 'database', 1, 99.9, true, false, false, + ARRAY['Database toolkit', 'Type-safe client', 'Migrations', 'Studio GUI'], + ARRAY['SOC 2'], + ARRAY['Database access', 'Type-safe development', 'Database migrations'], + ARRAY['Type safety', 'Developer experience', 'Auto-generated client', 'Migration system'], + ARRAY['Learning curve', 'Abstraction overhead', 'Limited database features', 'Framework coupling'], + true, + ARRAY['Database Access', 'Type-safe Development', 'Modern Web Development', 'Full-stack Applications', 'TypeScript']), + +('Sumo Logic', 'sumologic', 'monitoring', 16, 99.9, true, false, false, + ARRAY['Log analytics', 'Security analytics', 'Infrastructure monitoring', 'Compliance'], + ARRAY['SOC 2', 'FedRAMP', 'HIPAA'], + ARRAY['Log management', 'Security monitoring', 'Compliance', 'DevOps'], + ARRAY['Cloud-native', 'Machine learning', 'Real-time analytics', 'Compliance ready'], + ARRAY['Cost', 'Learning curve', 'Data volume pricing', 'Complex queries'], + true, + ARRAY['Log Management', 'Security Monitoring', 'Compliance', 'DevOps', 'Enterprise']), + +('Splunk', 'splunk', 'monitoring', 1, 99.99, true, false, false, + ARRAY['Search and analytics', 'Machine learning', 'SIEM', 'IT operations'], + ARRAY['SOC 2', 'FedRAMP', 'HIPAA'], + ARRAY['Log analytics', 'Security monitoring', 'IT operations', 'Business intelligence'], + ARRAY['Powerful search', 'Enterprise grade', 'Extensive integrations', 'Market leader'], + ARRAY['High cost', 'Complexity', 'Resource intensive', 'Learning curve'], + false, + ARRAY['Log Analytics', 'Security Monitoring', 'IT Operations', 'Enterprise', 'SIEM']), + +('Elasticsearch Cloud', 'elastic', 'monitoring', 50, 99.9, true, false, true, + ARRAY['Search analytics', 'Observability', 'Security', 'Enterprise search'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Search', 'Observability', 'Security analytics', 'Enterprise search'], + ARRAY['Open source foundation', 'Scalable', 'Real-time', 'Flexible'], + ARRAY['Complexity', 'Resource usage', 'Management overhead', 'Pricing'], + true, + ARRAY['Search', 'Observability', 'Security Analytics', 'Enterprise Search', 'DevOps']), + +-- Additional Cloud Storage Services +('Box', 'box', 'storage', 1, 99.9, false, false, false, + ARRAY['File sharing', 'Collaboration', 'Workflow automation', 'Security controls'], + ARRAY['SOC 2', 'FedRAMP', 'HIPAA'], + ARRAY['File storage', 'Team collaboration', 'Document management', 'Enterprise content'], + ARRAY['Enterprise focus', 'Security features', 'Collaboration tools', 'Compliance'], + ARRAY['Cost', 'Limited personal use', 'Mobile app limitations', 'Integration complexity'], + true, + ARRAY['File Storage', 'Team Collaboration', 'Document Management', 'Enterprise', 'Healthcare']), + +('Dropbox', 'dropbox', 'storage', 1, 99.9, false, false, false, + ARRAY['File sync', 'Smart Sync', 'Paper', 'HelloSign integration'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['File storage', 'File sync', 'Team collaboration', 'Document sharing'], + ARRAY['User friendly', 'Reliable sync', 'Cross-platform', 'Integration ecosystem'], + ARRAY['Storage limits', 'Cost for business', 'Security concerns', 'Limited enterprise features'], + true, + ARRAY['File Storage', 'File Sync', 'Team Collaboration', 'Small Business', 'Creative Teams']), + +('Google Drive', 'google', 'storage', 1, 99.9, false, false, false, + ARRAY['Real-time collaboration', 'Office suite integration', 'AI-powered search', 'Version history'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['File storage', 'Document collaboration', 'Office productivity', 'Team workspaces'], + ARRAY['Google ecosystem', 'Real-time collaboration', 'Generous free tier', 'AI features'], + ARRAY['Privacy concerns', 'Google dependency', 'Limited offline', 'Enterprise limitations'], + true, + ARRAY['File Storage', 'Document Collaboration', 'Office Productivity', 'Education', 'Small Teams']), + +-- Additional Database Services +('FaunaDB', 'fauna', 'database', 18, 99.9, true, false, false, + ARRAY['ACID transactions', 'Multi-region', 'GraphQL', 'Temporal queries'], + ARRAY['SOC 2', 'HIPAA'], + ARRAY['Serverless database', 'Global applications', 'Real-time applications'], + ARRAY['ACID compliance', 'Global consistency', 'Serverless scaling', 'Multi-model'], + ARRAY['Learning curve', 'Cost predictability', 'Query complexity', 'Limited tooling'], + true, + ARRAY['Serverless Database', 'Global Applications', 'Real-time Applications', 'JAMstack', 'Modern Web']), + +('Redis Cloud', 'redis', 'database', 100, 99.99, true, false, false, + ARRAY['In-memory database', 'Caching', 'Real-time analytics', 'JSON support'], + ARRAY['SOC 2', 'HIPAA', 'PCI DSS'], + ARRAY['Caching', 'Session storage', 'Real-time analytics', 'Message queuing'], + ARRAY['High performance', 'Versatile data structures', 'Pub/Sub messaging', 'Global distribution'], + ARRAY['Memory-based cost', 'Data persistence complexity', 'Memory limitations', 'Clustering complexity'], + true, + ARRAY['Caching', 'Session Storage', 'Real-time Analytics', 'Gaming', 'E-commerce']), + +('Amazon DynamoDB', 'amazon', 'database', 25, 99.999, true, false, false, + ARRAY['NoSQL database', 'Global tables', 'DynamoDB Streams', 'On-demand scaling'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['NoSQL applications', 'Serverless backends', 'IoT data', 'Gaming'], + ARRAY['Serverless scaling', 'Low latency', 'AWS integration', 'Global replication'], + ARRAY['Query limitations', 'Cost complexity', 'AWS lock-in', 'Learning curve'], + true, + ARRAY['NoSQL Applications', 'Serverless Backends', 'IoT Data', 'Gaming', 'Mobile Apps']), + +-- Additional API and Integration Services +('Kong', 'kong', 'api-gateway', 1, 99.99, true, false, true, + ARRAY['API gateway', 'Rate limiting', 'Authentication', 'Analytics'], + ARRAY['SOC 2'], + ARRAY['API management', 'Microservices', 'API security', 'Traffic control'], + ARRAY['Open source', 'High performance', 'Plugin ecosystem', 'Enterprise features'], + ARRAY['Configuration complexity', 'Learning curve', 'Enterprise cost', 'Management overhead'], + true, + ARRAY['API Management', 'Microservices', 'API Security', 'Enterprise', 'DevOps']), + +('Apigee', 'google', 'api-gateway', 24, 99.99, true, false, false, + ARRAY['API management', 'Developer portal', 'Analytics', 'Monetization'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['API management', 'Developer ecosystems', 'API monetization', 'Enterprise APIs'], + ARRAY['Enterprise grade', 'Developer portal', 'Analytics', 'Monetization features'], + ARRAY['Cost', 'Complexity', 'Google dependency', 'Learning curve'], + false, + ARRAY['API Management', 'Developer Ecosystems', 'API Monetization', 'Enterprise', 'Digital Transformation']), + +('MuleSoft', 'salesforce', 'integration', 1, 99.99, true, false, false, + ARRAY['Integration platform', 'API management', 'Data integration', 'B2B integration'], + ARRAY['SOC 2', 'ISO 27001', 'HIPAA'], + ARRAY['System integration', 'API management', 'Data transformation', 'Legacy modernization'], + ARRAY['Enterprise focus', 'Comprehensive platform', 'Salesforce integration', 'Hybrid deployment'], + ARRAY['High cost', 'Complexity', 'Learning curve', 'Over-engineering for SMB'], + false, + ARRAY['System Integration', 'API Management', 'Data Transformation', 'Enterprise', 'Legacy Modernization']), + +-- Additional Communication Services +('Zoom', 'zoom', 'communications', 1, 99.99, true, false, false, + ARRAY['Video conferencing', 'Webinars', 'Phone system', 'Rooms'], + ARRAY['SOC 2', 'FedRAMP', 'HIPAA'], + ARRAY['Video conferencing', 'Remote meetings', 'Webinars', 'Business communications'], + ARRAY['Reliable video quality', 'Easy to use', 'Scale capability', 'Integration ecosystem'], + ARRAY['Security concerns', 'Cost for features', 'Bandwidth requirements', 'Privacy concerns'], + true, + ARRAY['Video Conferencing', 'Remote Meetings', 'Webinars', 'Business Communications', 'Education']), + +('Slack', 'salesforce', 'communications', 1, 99.99, false, false, false, + ARRAY['Team messaging', 'File sharing', 'Workflow automation', 'App integrations'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Team communication', 'Remote work', 'Project collaboration', 'Internal communications'], + ARRAY['User friendly', 'Rich integrations', 'Search capabilities', 'Workflow automation'], + ARRAY['Cost scaling', 'Information overload', 'Thread management', 'Distraction potential'], + true, + ARRAY['Team Communication', 'Remote Work', 'Project Collaboration', 'Software Teams', 'Startups']), + +('Discord', 'discord', 'communications', 1, 99.9, false, false, false, + ARRAY['Voice/video chat', 'Text messaging', 'Screen sharing', 'Bot integrations'], + ARRAY['SOC 2'], + ARRAY['Community building', 'Gaming communication', 'Team coordination', 'Social interaction'], + ARRAY['Free tier', 'Low latency voice', 'Community features', 'Bot ecosystem'], + ARRAY['Gaming focus', 'Limited business features', 'Moderation challenges', 'Professional perception'], + true, + ARRAY['Community Building', 'Gaming Communication', 'Team Coordination', 'Open Source Communities', 'Education']), + +-- Additional Security and Compliance Services +('CrowdStrike', 'crowdstrike', 'security', 1, 99.99, true, false, false, + ARRAY['Endpoint protection', 'Threat intelligence', 'Incident response', 'Cloud security'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Endpoint security', 'Threat detection', 'Incident response', 'Cloud workload protection'], + ARRAY['AI-powered detection', 'Cloud-native', 'Threat intelligence', 'Rapid response'], + ARRAY['Cost', 'Complexity', 'False positives', 'Resource usage'], + false, + ARRAY['Endpoint Security', 'Threat Detection', 'Incident Response', 'Enterprise', 'Government']), + +('Qualys', 'qualys', 'security', 1, 99.99, true, false, false, + ARRAY['Vulnerability management', 'Compliance', 'Web app security', 'Container security'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP'], + ARRAY['Vulnerability assessment', 'Compliance monitoring', 'Security scanning', 'Risk management'], + ARRAY['Comprehensive platform', 'Cloud-based', 'Compliance focus', 'Global reach'], + ARRAY['Cost', 'Interface complexity', 'Learning curve', 'Report customization'], + false, + ARRAY['Vulnerability Assessment', 'Compliance Monitoring', 'Security Scanning', 'Enterprise', 'Healthcare']), + +-- Final specialized services to reach 200 +('LaunchDarkly', 'launchdarkly', 'feature-flags', 1, 99.99, true, false, false, + ARRAY['Feature flags', 'A/B testing', 'Progressive delivery', 'Analytics'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Feature management', 'Progressive delivery', 'A/B testing', 'Risk mitigation'], + ARRAY['Enterprise grade', 'Real-time updates', 'Targeting capabilities', 'Analytics'], + ARRAY['Cost', 'Complexity for simple use cases', 'Learning curve', 'Vendor dependency'], + true, + ARRAY['Feature Management', 'Progressive Delivery', 'A/B Testing', 'DevOps', 'Product Teams']), + +('Segment', 'twilio', 'analytics', 1, 99.9, true, false, false, + ARRAY['Customer data platform', 'Event tracking', 'Integrations', 'Profiles'], + ARRAY['SOC 2', 'HIPAA', 'GDPR compliant'], + ARRAY['Customer data management', 'Analytics integration', 'Personalization', 'Marketing automation'], + ARRAY['Unified data collection', 'Easy integrations', 'Real-time streaming', 'Data governance'], + ARRAY['Cost', 'Data volume limits', 'Integration complexity', 'Vendor lock-in'], + true, + ARRAY['Customer Data Management', 'Analytics Integration', 'Personalization', 'Marketing Automation', 'E-commerce']), + +('Intercom', 'intercom', 'customer-support', 1, 99.9, true, false, false, + ARRAY['Live chat', 'Help desk', 'Knowledge base', 'Product tours'], + ARRAY['SOC 2', 'ISO 27001', 'GDPR compliant'], + ARRAY['Customer support', 'Live chat', 'Customer engagement', 'Help desk'], + ARRAY['Easy integration', 'Modern interface', 'Automation features', 'Multi-channel support'], + ARRAY['Cost scaling', 'Feature complexity', 'Learning curve', 'Customization limits'], + true, + ARRAY['Customer Support', 'Live Chat', 'Customer Engagement', 'SaaS', 'E-commerce']), + +('Zendesk', 'zendesk', 'customer-support', 1, 99.9, false, false, false, + ARRAY['Ticket management', 'Knowledge base', 'Chat', 'Analytics'], + ARRAY['SOC 2', 'ISO 27001', 'HIPAA'], + ARRAY['Customer support', 'Help desk', 'Ticket management', 'Knowledge management'], + ARRAY['Comprehensive platform', 'Customizable', 'Reporting', 'Integration ecosystem'], + ARRAY['Cost', 'Complexity', 'Interface dated', 'Learning curve'], + true, + ARRAY['Customer Support', 'Help Desk', 'Ticket Management', 'Enterprise', 'Service Organizations']), + +('Freshworks', 'freshworks', 'customer-support', 1, 99.9, false, false, false, + ARRAY['Customer service', 'Sales CRM', 'Marketing automation', 'Phone support'], + ARRAY['SOC 2', 'ISO 27001', 'GDPR compliant'], + ARRAY['Customer support', 'CRM', 'Marketing automation', 'Sales management'], + ARRAY['All-in-one platform', 'Affordable pricing', 'Easy setup', 'Modern interface'], + ARRAY['Feature depth', 'Customization limits', 'Enterprise scalability', 'Integration gaps'], + true, + ARRAY['Customer Support', 'CRM', 'Marketing Automation', 'SMB', 'Sales Teams']), + +('HubSpot', 'hubspot', 'crm', 1, 99.9, false, false, false, + ARRAY['CRM', 'Marketing automation', 'Sales tools', 'Content management'], + ARRAY['SOC 2', 'ISO 27001', 'GDPR compliant'], + ARRAY['Inbound marketing', 'Sales automation', 'Customer relationship management', 'Content marketing'], + ARRAY['Free tier', 'All-in-one platform', 'Easy to use', 'Strong community'], + ARRAY['Cost scaling', 'Customization limits', 'Advanced features cost', 'Lock-in concerns'], + true, + ARRAY['Inbound Marketing', 'Sales Automation', 'Customer Relationship Management', 'SMB', 'Marketing Teams']), + +('Salesforce', 'salesforce', 'crm', 1, 99.99, false, false, false, + ARRAY['Sales Cloud', 'Service Cloud', 'Marketing Cloud', 'Platform'], + ARRAY['SOC 2', 'ISO 27001', 'FedRAMP', 'HIPAA'], + ARRAY['Customer relationship management', 'Sales automation', 'Service management', 'Marketing automation'], + ARRAY['Market leader', 'Comprehensive platform', 'Customization', 'Ecosystem'], + ARRAY['Cost', 'Complexity', 'Learning curve', 'Over-engineering for SMB'], + false, + ARRAY['Customer Relationship Management', 'Sales Automation', 'Enterprise', 'Service Management', 'Large Organizations']), + +('Pipedrive', 'pipedrive', 'crm', 1, 99.9, false, false, false, + ARRAY['Pipeline management', 'Sales automation', 'Email sync', 'Reporting'], + ARRAY['SOC 2', 'ISO 27001', 'GDPR compliant'], + ARRAY['Sales management', 'Pipeline tracking', 'Lead management', 'Sales reporting'], + ARRAY['Sales-focused', 'Easy to use', 'Visual pipeline', 'Mobile app'], + ARRAY['Limited marketing features', 'Customization constraints', 'Advanced reporting', 'Integration limits'], + true, + ARRAY['Sales Management', 'Pipeline Tracking', 'Lead Management', 'SMB', 'Sales Teams']), + +('Monday.com', 'monday', 'project-management', 1, 99.9, false, false, false, + ARRAY['Project boards', 'Time tracking', 'Automations', 'Dashboard'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Project management', 'Team collaboration', 'Workflow management', 'Resource planning'], + ARRAY['Visual interface', 'Customizable', 'Automation features', 'Template library'], + ARRAY['Cost scaling', 'Complexity for simple needs', 'Mobile limitations', 'Learning curve'], + true, + ARRAY['Project Management', 'Team Collaboration', 'Workflow Management', 'Marketing Teams', 'Creative Agencies']), + +('Asana', 'asana', 'project-management', 1, 99.9, false, false, false, + ARRAY['Task management', 'Project tracking', 'Team collaboration', 'Reporting'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Project management', 'Task tracking', 'Team coordination', 'Goal tracking'], + ARRAY['User friendly', 'Multiple views', 'Good free tier', 'Mobile apps'], + ARRAY['Advanced features cost', 'Customization limits', 'Reporting constraints', 'Large project limitations'], + true, + ARRAY['Project Management', 'Task Tracking', 'Team Coordination', 'Small Teams', 'Startups']), + +('Trello', 'atlassian', 'project-management', 1, 99.9, false, false, false, + ARRAY['Kanban boards', 'Cards and lists', 'Power-Ups', 'Team collaboration'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Task management', 'Project organization', 'Team collaboration', 'Visual workflow'], + ARRAY['Simple interface', 'Visual organization', 'Free tier', 'Easy adoption'], + ARRAY['Limited advanced features', 'Scaling challenges', 'Reporting limitations', 'Complex project constraints'], + true, + ARRAY['Task Management', 'Project Organization', 'Visual Workflow', 'Small Teams', 'Personal Productivity']), + +('Jira', 'atlassian', 'project-management', 1, 99.95, false, false, false, + ARRAY['Issue tracking', 'Agile boards', 'Reporting', 'Workflow automation'], + ARRAY['SOC 2', 'ISO 27001'], + ARRAY['Software development', 'Issue tracking', 'Agile project management', 'Bug tracking'], + ARRAY['Agile-focused', 'Customizable workflows', 'Comprehensive reporting', 'Atlassian ecosystem'], + ARRAY['Complexity', 'Learning curve', 'Cost', 'Over-engineering for simple needs'], + true, + ARRAY['Software Development', 'Issue Tracking', 'Agile Project Management', 'Development Teams', 'Enterprise']); + +-- ===================================================== +-- DATA INSERTION - TESTING TECHNOLOGIES +-- ===================================================== + + INSERT INTO testing_technologies ( + name, testing_type, framework_support, automation_level, ci_cd_integration, + browser_support, mobile_testing, api_testing, performance_testing, + primary_use_cases, strengths, weaknesses, license_type, domain +) VALUES +('Mocha', 'unit', ARRAY['Node.js', 'JavaScript', 'TypeScript'], 'full', true, + ARRAY['Node.js'], false, true, false, + ARRAY['Unit testing', 'Integration testing', 'Asynchronous testing', 'Browser testing'], + ARRAY['Flexible', 'Rich ecosystem', 'Good for async code', 'Extensible reporters'], + ARRAY['Requires assertion library', 'Setup complexity', 'Slower than Jest', 'Less built-in features'], + 'MIT', + ARRAY['Node.js Applications', 'JavaScript Testing', 'Backend Services', 'API Testing', 'CI/CD Pipelines']), +('Chai', 'assertion', ARRAY['JavaScript', 'TypeScript', 'Node.js'], 'partial', true, + ARRAY['Node.js'], false, true, false, + ARRAY['Assertion library', 'Unit testing', 'Integration testing', 'API testing'], + ARRAY['Readable syntax', 'Chainable interface', 'Multiple styles', 'Good documentation'], + ARRAY['Not a test runner', 'Requires setup', 'Learning curve', 'Dependency management'], + 'MIT', + ARRAY['JavaScript Development', 'Node.js Applications', 'API Testing', 'Unit Testing', 'Integration Testing']), +('Sinon', 'mocking', ARRAY['JavaScript', 'TypeScript', 'Node.js'], 'full', true, + ARRAY['Node.js'], false, true, false, + ARRAY['Mocking', 'Stubbing', 'Spying', 'Fake timers'], + ARRAY['Comprehensive mocking', 'Easy to use', 'Good documentation', 'Standalone'], + ARRAY['Complex API', 'Learning curve', 'Setup overhead', 'Performance impact'], + 'BSD-3-Clause', + ARRAY['JavaScript Testing', 'Node.js Applications', 'Unit Testing', 'Integration Testing', 'Mocking']), +('Supertest', 'api', ARRAY['Node.js', 'Express', 'JavaScript'], 'full', true, + ARRAY['Node.js'], false, true, false, + ARRAY['API testing', 'HTTP testing', 'Integration testing', 'Endpoint testing'], + ARRAY['Easy HTTP assertions', 'Good for Express', 'Comprehensive', 'Well documented'], + ARRAY['Node.js only', 'Limited to HTTP', 'Requires test runner', 'Setup complexity'], + 'MIT', + ARRAY['API Testing', 'Node.js Applications', 'Express Apps', 'HTTP Services', 'Integration Testing']), +('Puppeteer', 'e2e', ARRAY['JavaScript', 'TypeScript', 'Node.js'], 'full', true, + ARRAY['Chrome'], false, false, false, + ARRAY['Browser automation', 'Web scraping', 'UI testing', 'Screenshot testing'], + ARRAY['Headless Chrome', 'Fast execution', 'Good API', 'Google backing'], + ARRAY['Chrome only', 'Resource intensive', 'Limited browser support', 'Setup complexity'], + 'Apache 2.0', + ARRAY['Web Testing', 'Browser Automation', 'UI Testing', 'Web Scraping', 'Chrome Applications']), +('TestCafe', 'e2e', ARRAY['JavaScript', 'TypeScript', 'CoffeeScript'], 'full', true, + ARRAY['Chrome', 'Firefox', 'Safari', 'Edge'], false, false, false, + ARRAY['Cross-browser testing', 'End-to-end testing', 'Functional testing', 'Regression testing'], + ARRAY['No WebDriver', 'Easy setup', 'Good reporting', 'Stable tests'], + ARRAY['Slower execution', 'Limited mobile', 'Resource usage', 'Learning curve'], + 'MIT', + ARRAY['Cross-browser Testing', 'Web Applications', 'E-commerce', 'SaaS Platforms', 'Regression Testing']), +('Nightwatch', 'e2e', ARRAY['JavaScript', 'TypeScript', 'Node.js'], 'full', true, + ARRAY['Chrome', 'Firefox', 'Safari', 'Edge'], false, false, false, + ARRAY['End-to-end testing', 'Cross-browser testing', 'Regression testing', 'UI testing'], + ARRAY['Selenium-based', 'Good syntax', 'Extensible', 'Cloud integration'], + ARRAY['Selenium dependency', 'Setup complexity', 'Flaky tests', 'Performance issues'], + 'MIT', + ARRAY['Web Testing', 'Cross-browser Testing', 'UI Testing', 'Regression Testing', 'Cloud Testing']), +('WebdriverIO', 'e2e', ARRAY['JavaScript', 'TypeScript', 'Python'], 'full', true, + ARRAY['Chrome', 'Firefox', 'Safari', 'Edge'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'API testing', 'Component testing'], + ARRAY['WebDriver standard', 'Multi-language', 'Good ecosystem', 'Cloud support'], + ARRAY['Complex setup', 'Learning curve', 'Performance overhead', 'Maintenance'], + 'MIT', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Web Applications', 'Enterprise Testing', 'Cloud Testing']), +('Cucumber', 'bdd', ARRAY['Java', 'JavaScript', 'Ruby', 'Python'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['BDD testing', 'Acceptance testing', 'Integration testing', 'Documentation'], + ARRAY['Readable syntax', 'Business-friendly', 'Multi-language', 'Good reporting'], + ARRAY['Verbose', 'Learning curve', 'Setup complexity', 'Performance overhead'], + 'MIT', + ARRAY['BDD Testing', 'Acceptance Testing', 'Agile Teams', 'Documentation', 'Business Applications']), +('RSpec', 'bdd', ARRAY['Ruby', 'Rails'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['BDD testing', 'Unit testing', 'Integration testing', 'Acceptance testing'], + ARRAY['Readable syntax', 'Rich features', 'Good ecosystem', 'Rails integration'], + ARRAY['Ruby only', 'Learning curve', 'Setup complexity', 'Performance issues'], + 'MIT', + ARRAY['Ruby Development', 'Rails Applications', 'BDD Testing', 'Unit Testing', 'Integration Testing']), +('PHPUnit', 'unit', ARRAY['PHP', 'Laravel', 'Symfony'], 'full', true, + ARRAY['PHP'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Functional testing', 'Regression testing'], + ARRAY['PHP standard', 'Good documentation', 'Rich features', 'Framework integration'], + ARRAY['PHP only', 'Setup complexity', 'Learning curve', 'Performance issues'], + 'BSD-3-Clause', + ARRAY['PHP Development', 'Laravel Applications', 'Symfony Apps', 'Unit Testing', 'Integration Testing']), +('Codeception', 'bdd', ARRAY['PHP', 'Laravel', 'Symfony'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['BDD testing', 'Acceptance testing', 'Functional testing', 'API testing'], + ARRAY['Multiple testing types', 'Good documentation', 'Framework integration', 'Modular'], + ARRAY['PHP only', 'Complex setup', 'Learning curve', 'Performance overhead'], + 'MIT', + ARRAY['PHP Testing', 'BDD Testing', 'Acceptance Testing', 'API Testing', 'Functional Testing']), +('PyTest', 'unit', ARRAY['Python', 'Django', 'Flask'], 'full', true, + ARRAY['Python'], false, true, false, + ARRAY['Unit testing', 'Integration testing', 'Functional testing', 'API testing'], + ARRAY['Simple syntax', 'Powerful fixtures', 'Good plugins', 'Fast execution'], + ARRAY['Python only', 'Limited features', 'Setup complexity', 'Learning curve'], + 'MIT', + ARRAY['Python Development', 'Django Applications', 'Flask Apps', 'Unit Testing', 'API Testing']), +('Unittest', 'unit', ARRAY['Python'], 'full', true, + ARRAY['Python'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Test discovery', 'Test organization'], + ARRAY['Built-in', 'Simple', 'Standard library', 'No dependencies'], + ARRAY['Basic features', 'Verbose syntax', 'Limited functionality', 'Python only'], + 'Python Software Foundation', + ARRAY['Python Development', 'Unit Testing', 'Integration Testing', 'Standard Library', 'Educational']), +('Robot Framework', 'bdd', ARRAY['Python', 'Java', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Acceptance testing', 'BDD testing', 'Robot testing', 'Integration testing'], + ARRAY['Keyword-driven', 'Easy to learn', 'Good reporting', 'Extensible'], + ARRAY['Learning curve', 'Setup complexity', 'Performance overhead', 'Limited features'], + 'Apache 2.0', + ARRAY['Acceptance Testing', 'BDD Testing', 'Robot Testing', 'Integration Testing', 'Enterprise Testing']), +('Jasmine', 'unit', ARRAY['JavaScript', 'TypeScript', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'BDD testing', 'Behavior testing'], + ARRAY['No dependencies', 'Easy setup', 'Good syntax', 'Angular integration'], + ARRAY['Limited features', 'Basic assertions', 'Performance issues', 'Learning curve'], + 'MIT', + ARRAY['JavaScript Testing', 'Angular Applications', 'Unit Testing', 'BDD Testing', 'Frontend Testing']), +('Karma', 'runner', ARRAY['JavaScript', 'TypeScript', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Test runner', 'Cross-browser testing', 'CI integration', 'Test execution'], + ARRAY['Multiple browsers', 'Good integration', 'Real-time testing', 'Framework support'], + ARRAY['Setup complexity', 'Performance overhead', 'Learning curve', 'Configuration'], + 'MIT', + ARRAY['JavaScript Testing', 'Angular Applications', 'Cross-browser Testing', 'CI/CD Pipelines', 'Frontend Testing']), +('Protractor', 'e2e', ARRAY['JavaScript', 'TypeScript', 'Angular'], 'full', true, + ARRAY['Chrome', 'Firefox'], false, false, false, + ARRAY['End-to-end testing', 'Angular testing', 'Web testing', 'UI testing'], + ARRAY['Angular optimized', 'Good integration', 'Auto-wait', 'Selenium-based'], + ARRAY['Angular only', 'Deprecated', 'Setup complexity', 'Performance issues'], + 'MIT', + ARRAY['Angular Testing', 'End-to-end Testing', 'Web Applications', 'UI Testing', 'Frontend Testing']), + +('Detox', 'e2e', ARRAY['JavaScript', 'TypeScript', 'React Native'], 'full', true, + ARRAY['Mobile'], true, false, false, + ARRAY['Mobile testing', 'React Native testing', 'End-to-end testing', 'UI testing'], + ARRAY['Gray box testing', 'Fast execution', 'Good debugging', 'React Native optimized'], + ARRAY['Mobile only', 'React Native only', 'Setup complexity', 'Learning curve'], + 'MIT', + ARRAY['Mobile Testing', 'React Native Applications', 'End-to-end Testing', 'UI Testing', 'Mobile Apps']), +('Appium', 'e2e', ARRAY['JavaScript', 'Java', 'Python', 'C#'], 'full', true, + ARRAY['Mobile'], true, false, false, + ARRAY['Mobile testing', 'Cross-platform testing', 'End-to-end testing', 'UI testing'], + ARRAY['Cross-platform', 'Multi-language', 'Good ecosystem', 'Cloud support'], + ARRAY['Setup complexity', 'Performance issues', 'Flaky tests', 'Learning curve'], + 'Apache 2.0', + ARRAY['Mobile Testing', 'Cross-platform Testing', 'End-to-end Testing', 'UI Testing', 'Mobile Apps']), +('XCUITest', 'e2e', ARRAY['Swift', 'Objective-C'], 'full', true, + ARRAY['iOS'], true, false, false, + ARRAY['iOS testing', 'UI testing', 'End-to-end testing', 'Mobile testing'], + ARRAY['Apple native', 'Good integration', 'Fast execution', 'Reliable'], + ARRAY['iOS only', 'Apple only', 'Limited features', 'Learning curve'], + 'Apple', + ARRAY['iOS Testing', 'Mobile Testing', 'UI Testing', 'End-to-end Testing', 'Apple Applications']), +('Espresso', 'e2e', ARRAY['Java', 'Kotlin'], 'full', true, + ARRAY['Android'], true, false, false, + ARRAY['Android testing', 'UI testing', 'End-to-end testing', 'Mobile testing'], + ARRAY['Google native', 'Good integration', 'Fast execution', 'Reliable'], + ARRAY['Android only', 'Google only', 'Limited features', 'Learning curve'], + 'Apache 2.0', + ARRAY['Android Testing', 'Mobile Testing', 'UI Testing', 'End-to-end Testing', 'Google Applications']), +('Postman', 'api', ARRAY['JavaScript', 'REST', 'GraphQL'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['API testing', 'Integration testing', 'Documentation', 'Monitoring'], + ARRAY['User-friendly', 'Good UI', 'Collaboration', 'Comprehensive'], + ARRAY['Limited automation', 'Performance issues', 'Cost', 'Learning curve'], + 'Postman', + ARRAY['API Testing', 'Integration Testing', 'Documentation', 'Monitoring', 'REST Services']), +('Insomnia', 'api', ARRAY['JavaScript', 'REST', 'GraphQL'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['API testing', 'Integration testing', 'Documentation', 'Debugging'], + ARRAY['Clean UI', 'Good performance', 'Open source', 'Extensible'], + ARRAY['Limited features', 'Basic automation', 'Learning curve', 'Limited collaboration'], + 'MIT', + ARRAY['API Testing', 'Integration Testing', 'Documentation', 'Debugging', 'REST Services']), +('SoapUI', 'api', ARRAY['Java', 'SOAP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, false, + ARRAY['API testing', 'SOAP testing', 'Web services testing', 'Integration testing'], + ARRAY['Comprehensive', 'Good for SOAP', 'Enterprise features', 'Good reporting'], + ARRAY['Java dependency', 'Complex setup', 'Performance issues', 'Cost'], + 'SoapUI', + ARRAY['API Testing', 'SOAP Testing', 'Web Services', 'Integration Testing', 'Enterprise Applications']), +('RestAssured', 'api', ARRAY['Java', 'REST', 'Spring'], 'full', true, + ARRAY['Java'], false, true, false, + ARRAY['API testing', 'REST testing', 'Integration testing', 'Java testing'], + ARRAY['Java native', 'Good syntax', 'Spring integration', 'Comprehensive'], + ARRAY['Java only', 'REST only', 'Setup complexity', 'Learning curve'], + 'Apache 2.0', + ARRAY['API Testing', 'REST Testing', 'Java Applications', 'Spring Apps', 'Integration Testing']), +('JUnit', 'unit', ARRAY['Java', 'Spring', 'Android'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Regression testing', 'Java testing'], + ARRAY['Java standard', 'Good ecosystem', 'Framework integration', 'Reliable'], + ARRAY['Java only', 'Basic features', 'Setup complexity', 'Learning curve'], + 'Eclipse Public License', + ARRAY['Java Development', 'Unit Testing', 'Integration Testing', 'Spring Applications', 'Android Apps']), +('TestNG', 'unit', ARRAY['Java', 'Spring', 'Selenium'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Functional testing', 'Data-driven testing'], + ARRAY['Advanced features', 'Good reporting', 'Data-driven', 'Flexible'], + ARRAY['Java only', 'Complex setup', 'Learning curve', 'Performance issues'], + 'Apache 2.0', + ARRAY['Java Development', 'Unit Testing', 'Integration Testing', 'Functional Testing', 'Data-driven Testing']), +('Mockito', 'mocking', ARRAY['Java', 'Spring', 'Android'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Mocking', 'Stubbing', 'Unit testing', 'Integration testing'], + ARRAY['Easy to use', 'Good syntax', 'Java native', 'Comprehensive'], + ARRAY['Java only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Java Testing', 'Mocking', 'Unit Testing', 'Integration Testing', 'Spring Applications']), +('PowerMock', 'mocking', ARRAY['Java', 'Spring', 'JUnit'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Mocking', 'Stubbing', 'Static methods', 'Private methods'], + ARRAY['Powerful mocking', 'Static methods', 'Private methods', 'JUnit integration'], + ARRAY['Java only', 'Complex setup', 'Learning curve', 'Performance issues'], + 'Apache 2.0', + ARRAY['Java Testing', 'Mocking', 'Unit Testing', 'Static Methods', 'Private Methods']), +('Hamcrest', 'assertion', ARRAY['Java', 'JUnit', 'TestNG'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Assertion library', 'Unit testing', 'Integration testing', 'Java testing'], + ARRAY['Readable syntax', 'Comprehensive', 'Extensible', 'Good documentation'], + ARRAY['Java only', 'Learning curve', 'Setup complexity', 'Limited features'], + 'BSD-3-Clause', + ARRAY['Java Testing', 'Assertion Library', 'Unit Testing', 'Integration Testing', 'Java Development']), +('AssertJ', 'assertion', ARRAY['Java', 'JUnit', 'Spring'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Assertion library', 'Unit testing', 'Integration testing', 'Java testing'], + ARRAY['Fluent API', 'Good syntax', 'Comprehensive', 'Java native'], + ARRAY['Java only', 'Learning curve', 'Setup complexity', 'Limited features'], + 'Apache 2.0', + ARRAY['Java Testing', 'Assertion Library', 'Unit Testing', 'Integration Testing', 'Java Development']), +('Selenide', 'e2e', ARRAY['Java', 'JavaScript', 'Selenium'], 'full', true, + ARRAY['Chrome', 'Firefox', 'Safari'], false, false, false, + ARRAY['End-to-end testing', 'Web testing', 'UI testing', 'Integration testing'], + ARRAY['Concise API', 'Good syntax', 'Selenium-based', 'Reliable'], + ARRAY['Java only', 'Limited features', 'Setup complexity', 'Learning curve'], + 'MIT', + ARRAY['Web Testing', 'End-to-end Testing', 'UI Testing', 'Java Applications', 'Selenium Testing']), +('Gatling', 'performance', ARRAY['Java', 'Scala', 'HTTP'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'HTTP testing'], + ARRAY['High performance', 'Good reporting', 'Scala native', 'Comprehensive'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Java dependency'], + 'Apache 2.0', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'HTTP Services', 'Enterprise Applications']), +('JMeter', 'performance', ARRAY['Java', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'API testing'], + ARRAY['Comprehensive', 'Good UI', 'Protocol support', 'Extensible'], + ARRAY['Java dependency', 'Resource intensive', 'Complex setup', 'Learning curve'], + 'Apache 2.0', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'API Testing', 'Enterprise Applications']), +('Locust', 'performance', ARRAY['Python', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'API testing'], + ARRAY['Python native', 'Easy to use', 'Good reporting', 'Scalable'], + ARRAY['Python only', 'Limited features', 'Setup complexity', 'Learning curve'], + 'MIT', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'API Testing', 'Python Applications']), +('K6', 'performance', ARRAY['JavaScript', 'TypeScript', 'HTTP'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'API testing'], + ARRAY['JavaScript native', 'Good performance', 'Cloud integration', 'Modern'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Cost'], + 'AGPL-3.0', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'API Testing', 'JavaScript Applications']), + +('Artillery', 'performance', ARRAY['JavaScript', 'Node.js', 'HTTP'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'API testing'], + ARRAY['JavaScript native', 'Easy setup', 'Good performance', 'Extensible'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Limited documentation'], + 'MPL-2.0', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'API Testing', 'JavaScript Applications']), +('Tsung', 'performance', ARRAY['Erlang', 'HTTP', 'XMPP'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'Stress testing', 'Protocol testing'], + ARRAY['High performance', 'Protocol support', 'Scalable', 'Reliable'], + ARRAY['Erlang dependency', 'Complex setup', 'Learning curve', 'Limited UI'], + 'GPL-2.0', + ARRAY['Performance Testing', 'Load Testing', 'Stress Testing', 'Protocol Testing', 'Enterprise Applications']), +('Vegeta', 'performance', ARRAY['Go', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'API testing'], + ARRAY['Go native', 'Fast execution', 'Simple', 'Reliable'], + ARRAY['Limited features', 'Basic reporting', 'Learning curve', 'Go only'], + 'MIT', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'API Testing', 'Go Applications']), +('Fortio', 'performance', ARRAY['Go', 'HTTP', 'gRPC'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'gRPC testing'], + ARRAY['Go native', 'gRPC support', 'Good UI', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Go only'], + 'Apache 2.0', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'gRPC Testing', 'Go Applications']), +('Wrk', 'performance', ARRAY['C', 'HTTP', 'Lua'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'Benchmarking'], + ARRAY['High performance', 'Fast execution', 'Lua scripting', 'Reliable'], + ARRAY['Limited features', 'Basic UI', 'Learning curve', 'C dependency'], + 'Apache 2.0', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'Benchmarking', 'High-performance Systems']), +('Apache Bench', 'performance', ARRAY['C', 'HTTP', 'Apache'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'Benchmarking'], + ARRAY['Built-in', 'Simple', 'Reliable', 'Apache native'], + ARRAY['Limited features', 'Basic reporting', 'HTTP only', 'Apache only'], + 'Apache 2.0', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'Benchmarking', 'Apache Applications']), +('Siege', 'performance', ARRAY['C', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'Stress testing'], + ARRAY['Simple', 'Reliable', 'Good reporting', 'Configurable'], + ARRAY['Limited features', 'Basic UI', 'Learning curve', 'C dependency'], + 'GPL-3.0', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'Stress Testing', 'Web Applications']), +('Loader.io', 'performance', ARRAY['Cloud', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'Cloud testing'], + ARRAY['Cloud-based', 'Easy setup', 'Good reporting', 'No installation'], + ARRAY['Cost', 'Limited control', 'Internet dependency', 'Limited features'], + 'Loader.io', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'Cloud Testing', 'Web Applications']), +('BlazeMeter', 'performance', ARRAY['Cloud', 'HTTP', 'REST'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance testing', 'Load testing', 'HTTP testing', 'Cloud testing'], + ARRAY['Cloud-based', 'Comprehensive', 'Good reporting', 'JMeter integration'], + ARRAY['Cost', 'Complex setup', 'Internet dependency', 'Learning curve'], + 'BlazeMeter', + ARRAY['Performance Testing', 'Load Testing', 'HTTP Testing', 'Cloud Testing', 'Enterprise Applications']), +('New Relic', 'monitoring', ARRAY['Cloud', 'APM', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'APM', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real-time'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Internet dependency'], + 'New Relic', + ARRAY['Performance Monitoring', 'APM', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Datadog', 'monitoring', ARRAY['Cloud', 'APM', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'APM', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Comprehensive', 'Good integration', 'Cloud-based', 'Real-time'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Internet dependency'], + 'Datadog', + ARRAY['Performance Monitoring', 'APM', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Prometheus', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Metrics collection', 'Cloud monitoring', 'Time series'], + ARRAY['Open source', 'Powerful', 'Extensible', 'Good ecosystem'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited UI'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'Metrics Collection', 'Cloud Monitoring', 'Time Series', 'Enterprise Applications']), +('Grafana', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Visualization', 'Dashboarding', 'Cloud monitoring'], + ARRAY['Good UI', 'Extensible', 'Cloud-based', 'Comprehensive'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited features'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'Visualization', 'Dashboarding', 'Cloud Monitoring', 'Enterprise Applications']), +('Jaeger', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Distributed tracing', 'Cloud monitoring', 'Microservices'], + ARRAY['Open source', 'Distributed tracing', 'Good integration', 'Cloud-based'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited UI'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'Distributed Tracing', 'Cloud Monitoring', 'Microservices', 'Enterprise Applications']), +('Zipkin', 'monitoring', ARRAY['Java', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Distributed tracing', 'Cloud monitoring', 'Microservices'], + ARRAY['Open source', 'Simple', 'Good integration', 'Cloud-based'], + ARRAY['Limited features', 'Basic UI', 'Learning curve', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'Distributed Tracing', 'Cloud Monitoring', 'Microservices', 'Enterprise Applications']), +('Elastic APM', 'monitoring', ARRAY['Java', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'APM', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Comprehensive', 'Good integration', 'Cloud-based', 'Real-time'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Cost'], + 'Elastic License', + ARRAY['Performance Monitoring', 'APM', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Sentry', 'monitoring', ARRAY['JavaScript', 'Python', 'Ruby'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Good UI', 'Real-time', 'Multi-language', 'Cloud-based'], + ARRAY['Cost', 'Limited features', 'Internet dependency', 'Learning curve'], + 'BSL-1.0', + ARRAY['Error Monitoring', 'Performance Monitoring', 'Application Monitoring', 'Cloud Monitoring', 'Multi-language Apps']), + +('Munin', 'monitoring', ARRAY['Perl', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'System monitoring', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Open source', 'Simple', 'Reliable', 'Good plugins'], + ARRAY['Perl dependency', 'Limited UI', 'Learning curve', 'Resource intensive'], + 'GPL-2.0', + ARRAY['Performance Monitoring', 'System Monitoring', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Collectd', 'monitoring', ARRAY['C', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'System monitoring', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Open source', 'Lightweight', 'Reliable', 'Good plugins'], + ARRAY['C dependency', 'Limited UI', 'Learning curve', 'Complex setup'], + 'GPL-2.0', + ARRAY['Performance Monitoring', 'System Monitoring', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Telegraf', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'System monitoring', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Open source', 'Lightweight', 'Reliable', 'Good plugins'], + ARRAY['Go dependency', 'Limited UI', 'Learning curve', 'Complex setup'], + 'MIT', + ARRAY['Performance Monitoring', 'System Monitoring', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('InfluxDB', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Time series', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Open source', 'Fast', 'Reliable', 'Good ecosystem'], + ARRAY['Go dependency', 'Limited UI', 'Learning curve', 'Complex setup'], + 'MIT', + ARRAY['Performance Monitoring', 'Time Series', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('VictoriaMetrics', 'monitoring', ARRAY['Go', 'Cloud', 'Monitoring'], 'full', true, + ARRAY['All browsers'], false, true, true, + ARRAY['Performance monitoring', 'Time series', 'Application monitoring', 'Cloud monitoring'], + ARRAY['Open source', 'Fast', 'Reliable', 'Good ecosystem'], + ARRAY['Go dependency', 'Limited UI', 'Learning curve', 'Complex setup'], + 'Apache 2.0', + ARRAY['Performance Monitoring', 'Time Series', 'Application Monitoring', 'Cloud Monitoring', 'Enterprise Applications']), +('Cypress Dashboard', 'e2e', ARRAY['JavaScript', 'TypeScript', 'React'], 'full', true, + ARRAY['Chrome', 'Firefox', 'Edge'], false, true, false, + ARRAY['End-to-end testing', 'Test management', 'Visual testing', 'Test reporting'], + ARRAY['Good UI', 'Real-time', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Limited features', 'Internet dependency', 'Learning curve'], + 'Cypress', + ARRAY['End-to-end Testing', 'Test Management', 'Visual Testing', 'Test Reporting', 'Cloud Testing']), +('BrowserStack', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'BrowserStack', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('Sauce Labs', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Sauce Labs', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('LambdaTest', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'LambdaTest', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('CrossBrowserTesting', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'CrossBrowserTesting', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('TestingBot', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'TestingBot', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('Perfecto', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Perfecto', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('Kobiton', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Kobiton', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('Experitest', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Experitest', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('HeadSpin', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Comprehensive', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'HeadSpin', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Enterprise Applications']), +('AWS Device Farm', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['AWS integration', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'AWS dependency', 'Limited control', 'Learning curve'], + 'AWS', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'AWS Applications']), +('Firebase Test Lab', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Firebase integration', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Firebase dependency', 'Limited control', 'Learning curve'], + 'Firebase', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Firebase Applications']), +('Google Cloud Testing', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Google Cloud integration', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Google Cloud dependency', 'Limited control', 'Learning curve'], + 'Google Cloud', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Google Cloud Applications']), +('Azure DevTest Labs', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Cross-browser testing', 'Mobile testing', 'Real device testing', 'Cloud testing'], + ARRAY['Azure integration', 'Good UI', 'Cloud-based', 'Real devices'], + ARRAY['Cost', 'Azure dependency', 'Limited control', 'Learning curve'], + 'Azure', + ARRAY['Cross-browser Testing', 'Mobile Testing', 'Real Device Testing', 'Cloud Testing', 'Azure Applications']), +('Tricentis Tosca', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Model-based', 'Enterprise features'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Tricentis', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), + +('Micro Focus UFT', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Micro Focus', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('IBM Rational Functional Tester', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'IBM', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('HP QuickTest Professional', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'HP', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('TestComplete', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'SmartBear', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('Ranorex', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Ranorex', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('Leapwork', 'e2e', ARRAY['Java', 'C#', 'JavaScript'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Comprehensive', 'Good UI', 'Enterprise features', 'Reliable'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Leapwork', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'Enterprise Applications']), +('Autify', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['AI-powered', 'Good UI', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Autify', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'AI Applications']), +('Mabl', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['AI-powered', 'Good UI', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Mabl', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'AI Applications']), +('Functionize', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['AI-powered', 'Good UI', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Functionize', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'AI Applications']), +('Testim', 'e2e', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['End-to-end testing', 'Test automation', 'Cross-browser testing', 'Mobile testing'], + ARRAY['AI-powered', 'Good UI', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Testim', + ARRAY['End-to-end Testing', 'Test Automation', 'Cross-browser Testing', 'Mobile Testing', 'AI Applications']), +('Applitools', 'visual', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Visual testing', 'UI testing', 'Cross-browser testing', 'Mobile testing'], + ARRAY['AI-powered', 'Good UI', 'Cloud-based', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Applitools', + ARRAY['Visual Testing', 'UI Testing', 'Cross-browser Testing', 'Mobile Testing', 'AI Applications']), +('Percy', 'visual', ARRAY['JavaScript', 'Java', 'Python'], 'full', true, + ARRAY['All browsers'], true, true, false, + ARRAY['Visual testing', 'UI testing', 'Cross-browser testing', 'Mobile testing'], + ARRAY['Good UI', 'Cloud-based', 'Easy integration', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Percy', + ARRAY['Visual Testing', 'UI Testing', 'Cross-browser Testing', 'Mobile Testing', 'Cloud Applications']), +('BackstopJS', 'visual', ARRAY['JavaScript', 'Node.js'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Regression testing', 'Screenshot testing'], + ARRAY['Open source', 'Configurable', 'Good integration', 'Reliable'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited UI'], + 'MIT', + ARRAY['Visual Testing', 'UI Testing', 'Regression Testing', 'Screenshot Testing', 'JavaScript Applications']), +('Wraith', 'visual', ARRAY['Ruby', 'Cloud'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Regression testing', 'Screenshot testing'], + ARRAY['Open source', 'Simple', 'Configurable', 'Reliable'], + ARRAY['Ruby dependency', 'Limited features', 'Learning curve', 'Resource intensive'], + 'MIT', + ARRAY['Visual Testing', 'UI Testing', 'Regression Testing', 'Screenshot Testing', 'Ruby Applications']), +('Galen', 'visual', ARRAY['Java', 'JavaScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Layout testing', 'Regression testing'], + ARRAY['Open source', 'Layout testing', 'Good syntax', 'Reliable'], + ARRAY['Java dependency', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Visual Testing', 'UI Testing', 'Layout Testing', 'Regression Testing', 'Java Applications']), +('Spectre', 'visual', ARRAY['JavaScript', 'Node.js'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Regression testing', 'Screenshot testing'], + ARRAY['Open source', 'Simple', 'Configurable', 'Reliable'], + ARRAY['Limited features', 'Learning curve', 'Resource intensive', 'Limited UI'], + 'MIT', + ARRAY['Visual Testing', 'UI Testing', 'Regression Testing', 'Screenshot Testing', 'JavaScript Applications']), +('Happo', 'visual', ARRAY['JavaScript', 'Node.js'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Regression testing', 'Screenshot testing'], + ARRAY['Good UI', 'Cloud-based', 'Easy integration', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Happo', + ARRAY['Visual Testing', 'UI Testing', 'Regression Testing', 'Screenshot Testing', 'Cloud Applications']), +('Chromatic', 'visual', ARRAY['JavaScript', 'React', 'Vue'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Regression testing', 'Screenshot testing'], + ARRAY['Good UI', 'Cloud-based', 'Easy integration', 'Comprehensive'], + ARRAY['Cost', 'Internet dependency', 'Limited control', 'Learning curve'], + 'Chromatic', + ARRAY['Visual Testing', 'UI Testing', 'Regression Testing', 'Screenshot Testing', 'Cloud Applications']), + +('Storybook', 'visual', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Visual testing', 'UI testing', 'Component testing', 'Documentation'], + ARRAY['Good UI', 'Component-based', 'Documentation', 'Comprehensive'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited features'], + 'MIT', + ARRAY['Visual Testing', 'UI Testing', 'Component Testing', 'Documentation', 'JavaScript Applications']), +('React Testing Library', 'unit', ARRAY['JavaScript', 'React', 'TypeScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'React testing'], + ARRAY['React native', 'Good practices', 'Simple API', 'Reliable'], + ARRAY['React only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'React Testing', 'JavaScript Applications']), +('Vue Test Utils', 'unit', ARRAY['JavaScript', 'Vue', 'TypeScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Vue testing'], + ARRAY['Vue native', 'Good practices', 'Simple API', 'Reliable'], + ARRAY['Vue only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Vue Testing', 'JavaScript Applications']), +('Angular Testing', 'unit', ARRAY['JavaScript', 'Angular', 'TypeScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Angular testing'], + ARRAY['Angular native', 'Good practices', 'Simple API', 'Reliable'], + ARRAY['Angular only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Angular Testing', 'JavaScript Applications']), +('Svelte Testing', 'unit', ARRAY['JavaScript', 'Svelte', 'TypeScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Svelte testing'], + ARRAY['Svelte native', 'Good practices', 'Simple API', 'Reliable'], + ARRAY['Svelte only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Svelte Testing', 'JavaScript Applications']), +('Enzyme', 'unit', ARRAY['JavaScript', 'React', 'TypeScript'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'React testing'], + ARRAY['React native', 'Good API', 'Comprehensive', 'Reliable'], + ARRAY['React only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'React Testing', 'JavaScript Applications']), +('Testing Library', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'Good practices', 'Simple API', 'Reliable'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('Cypress Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'Good UI', 'Real-time', 'Comprehensive'], + ARRAY['Cost', 'Limited features', 'Learning curve', 'Setup complexity'], + 'Cypress', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('Playwright Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'Fast execution', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('TestCafe Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'No WebDriver', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('WebdriverIO Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'WebDriver standard', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('Nightwatch Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'Selenium-based', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('Puppeteer Component Testing', 'unit', ARRAY['JavaScript', 'React', 'Vue', 'Angular'], 'full', true, + ARRAY['All browsers'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Framework testing'], + ARRAY['Multi-framework', 'Headless Chrome', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Framework Testing', 'JavaScript Applications']), +('Detox Component Testing', 'unit', ARRAY['JavaScript', 'React Native', 'TypeScript'], 'full', true, + ARRAY['Mobile'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'React Native testing'], + ARRAY['React Native native', 'Gray box testing', 'Good API', 'Comprehensive'], + ARRAY['React Native only', 'Limited features', 'Learning curve', 'Setup complexity'], + 'MIT', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'React Native Testing', 'Mobile Applications']), +('Appium Component Testing', 'unit', ARRAY['JavaScript', 'Java', 'Python', 'C#'], 'full', true, + ARRAY['Mobile'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Mobile testing'], + ARRAY['Multi-platform', 'Multi-language', 'Good API', 'Comprehensive'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Mobile Testing', 'Mobile Applications']), +('XCUITest Component Testing', 'unit', ARRAY['Swift', 'Objective-C'], 'full', true, + ARRAY['iOS'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'iOS testing'], + ARRAY['Apple native', 'Good integration', 'Fast execution', 'Reliable'], + ARRAY['iOS only', 'Apple only', 'Limited features', 'Learning curve'], + 'Apple', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'iOS Testing', 'Apple Applications']), +('Espresso Component Testing', 'unit', ARRAY['Java', 'Kotlin'], 'full', true, + ARRAY['Android'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Component testing', 'Android testing'], + ARRAY['Google native', 'Good integration', 'Fast execution', 'Reliable'], + ARRAY['Android only', 'Google only', 'Limited features', 'Learning curve'], + 'Apache 2.0', + ARRAY['Unit Testing', 'Integration Testing', 'Component Testing', 'Android Testing', 'Google Applications']), +('JUnit 5', 'unit', ARRAY['Java', 'Spring', 'Android'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'Regression testing', 'Java testing'], + ARRAY['Java standard', 'Modern features', 'Good ecosystem', 'Framework integration'], + ARRAY['Java only', 'Basic features', 'Setup complexity', 'Learning curve'], + 'Eclipse Public License', + ARRAY['Unit Testing', 'Integration Testing', 'Regression Testing', 'Java Testing', 'Java Applications']), +('Spock', 'unit', ARRAY['Java', 'Groovy', 'Spring'], 'full', true, + ARRAY['Java'], false, false, false, + ARRAY['Unit testing', 'Integration testing', 'BDD testing', 'Data-driven testing'], + ARRAY['Groovy syntax', 'Readable', 'Good features', 'Spring integration'], + ARRAY['Java only', 'Groovy dependency', 'Learning curve', 'Setup complexity'], + 'Apache 2.0', + ARRAY['Unit Testing', 'Integration Testing', 'BDD Testing', 'Data-driven Testing', 'Java Applications']); + + INSERT INTO mobile_technologies ( + name, platform_support, development_approach, language_base, performance_rating, + learning_curve, ui_native_feel, code_sharing_percentage, primary_use_cases, + strengths, weaknesses, license_type, domain +) VALUES +('React Native', ARRAY['ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 90, + ARRAY['Cross-platform mobile apps', 'Rapid prototyping', 'Code sharing with web', 'MVP development'], + ARRAY['Code reusability', 'Fast development', 'Large community', 'Hot reloading', 'Native module access'], + ARRAY['Performance limitations', 'Platform-specific bugs', 'Bridge overhead', 'Frequent updates'], + 'MIT', + ARRAY['E-commerce', 'Social Media', 'Startups', 'Prototyping', 'Cross-platform Apps']), +('Flutter', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'dart', 90, 'medium', 88, 95, + ARRAY['Cross-platform apps', 'High-performance mobile apps', 'Custom UI designs', 'Enterprise apps'], + ARRAY['Excellent performance', 'Single codebase', 'Custom widgets', 'Google backing', 'Fast rendering'], + ARRAY['Large app size', 'Limited third-party libraries', 'Dart learning curve', 'Newer ecosystem'], + 'BSD', + ARRAY['Enterprise Apps', 'Gaming', 'E-commerce', 'Custom UI Apps', 'Cross-platform Apps']), +('Ionic', ARRAY['ios', 'android', 'web'], 'hybrid', 'typescript', 75, 'easy', 70, 85, + ARRAY['Hybrid mobile apps', 'Progressive web apps', 'Rapid prototyping', 'Web-based mobile apps'], + ARRAY['Web technologies', 'Fast development', 'Single codebase', 'Large plugin ecosystem', 'Easy learning'], + ARRAY['Performance limitations', 'WebView dependency', 'Less native feel', 'Battery usage'], + 'MIT', + ARRAY['Prototyping', 'Small Business Apps', 'Progressive Web Apps', 'Startups', 'Content Management']), +('Swift (iOS)', ARRAY['ios'], 'native', 'swift', 98, 'medium', 100, 0, + ARRAY['iOS native apps', 'High-performance apps', 'Apple ecosystem integration', 'Complex mobile apps'], + ARRAY['Best iOS performance', 'Full platform access', 'Latest iOS features', 'Apple support', 'Excellent tooling'], + ARRAY['iOS only', 'Requires Mac', 'Separate Android development', 'Higher development cost'], + 'Apache 2.0', + ARRAY['Enterprise Apps', 'Gaming', 'Financial Services', 'Healthcare', 'iOS Native Apps']), +('Kotlin (Android)', ARRAY['android'], 'native', 'kotlin', 98, 'medium', 100, 0, + ARRAY['Android native apps', 'High-performance apps', 'Google services integration', 'Complex mobile apps'], + ARRAY['Best Android performance', 'Full platform access', 'Google backing', 'Java interoperability', 'Modern language'], + ARRAY['Android only', 'Separate iOS development', 'Higher development cost', 'Platform fragmentation'], + 'Apache 2.0', + ARRAY['Enterprise Apps', 'Gaming', 'E-commerce', 'Android Native Apps', 'Financial Services']), +('Xamarin', ARRAY['ios', 'android'], 'cross-platform', 'csharp', 85, 'medium', 85, 95, + ARRAY['Cross-platform apps', 'Enterprise mobile apps', 'Windows integration', 'Business apps'], + ARRAY['Microsoft backing', 'Native performance', 'C# ecosystem', 'Visual Studio integration', 'Full API access'], + ARRAY['Microsoft dependency', 'Larger app size', 'Complex setup', 'Limited community'], + 'MIT', + ARRAY['Enterprise Apps', 'Business Apps', 'Windows Integration', 'Cross-platform Apps', 'Financial Services']), +('Unity', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'csharp', 88, 'hard', 90, 85, + ARRAY['Mobile games', 'AR/VR apps', '3D applications', 'Interactive experiences'], + ARRAY['Excellent graphics', 'Cross-platform', 'Large asset store', 'Professional tools', 'Multi-platform'], + ARRAY['Large app size', 'Complex learning', 'Resource intensive', 'Cost for commercial use'], + 'Unity', + ARRAY['Gaming', 'AR/VR', '3D Applications', 'Interactive Experiences', 'Entertainment']), +('Unreal Engine', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'c++', 95, 'hard', 95, 80, + ARRAY['High-end games', 'AR/VR apps', '3D applications', 'Cinematic experiences'], + ARRAY['Best graphics', 'Professional tools', 'Blueprint visual scripting', 'High performance', 'Multi-platform'], + ARRAY['Very large app size', 'Steep learning curve', 'Resource intensive', 'High cost'], + 'Unreal', + ARRAY['AAA Gaming', 'AR/VR', '3D Applications', 'Cinematic Experiences', 'Entertainment']), +('NativeScript', ARRAY['ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 85, 90, + ARRAY['Cross-platform apps', 'JavaScript-based apps', 'Native performance', 'Web developer transition'], + ARRAY['Direct native access', 'JavaScript/TypeScript', 'No WebView', 'Angular support', 'Fast development'], + ARRAY['Smaller community', 'Limited plugins', 'Debugging complexity', 'Platform-specific issues'], + 'Apache 2.0', + ARRAY['Cross-platform Apps', 'JavaScript Apps', 'Enterprise Apps', 'Startups', 'Web Developer Transition']), +('PWA', ARRAY['ios', 'android', 'web'], 'hybrid', 'javascript', 70, 'easy', 60, 95, + ARRAY['Progressive web apps', 'Web-based mobile apps', 'Offline functionality', 'Cross-platform web apps'], + ARRAY['No app store', 'Instant updates', 'Web technologies', 'Low cost', 'Cross-platform'], + ARRAY['Limited native features', 'Browser dependency', 'Performance limitations', 'Platform restrictions'], + 'Open Web', + ARRAY['Web Apps', 'Progressive Web Apps', 'Startups', 'Content Management', 'E-commerce']), +('Capacitor', ARRAY['ios', 'android', 'web'], 'hybrid', 'typescript', 78, 'easy', 75, 88, + ARRAY['Hybrid mobile apps', 'Progressive web apps', 'Web-to-native apps', 'Cross-platform development'], + ARRAY['Modern web technologies', 'Native plugins', 'Easy deployment', 'Good documentation', 'Ionic integration'], + ARRAY['WebView dependency', 'Performance limitations', 'Limited native features', 'Battery usage'], + 'MIT', + ARRAY['Hybrid Apps', 'Progressive Web Apps', 'Startups', 'Content Management', 'Cross-platform Apps']), +('PhoneGap', ARRAY['ios', 'android', 'web'], 'hybrid', 'javascript', 72, 'easy', 65, 80, + ARRAY['Hybrid mobile apps', 'Web-based mobile apps', 'Rapid prototyping', 'Cross-platform web apps'], + ARRAY['Web technologies', 'Easy learning', 'Cross-platform', 'Adobe backing', 'Plugin ecosystem'], + ARRAY['Performance limitations', 'WebView dependency', 'Less native feel', 'Aging technology'], + 'Apache 2.0', + ARRAY['Hybrid Apps', 'Prototyping', 'Startups', 'Content Management', 'Cross-platform Apps']), +('Cordova', ARRAY['ios', 'android', 'web'], 'hybrid', 'javascript', 72, 'easy', 65, 80, + ARRAY['Hybrid mobile apps', 'Web-based mobile apps', 'Rapid prototyping', 'Cross-platform web apps'], + ARRAY['Open source', 'Web technologies', 'Plugin ecosystem', 'Cross-platform', 'Easy deployment'], + ARRAY['Performance limitations', 'WebView dependency', 'Less native feel', 'Limited native features'], + 'Apache 2.0', + ARRAY['Hybrid Apps', 'Prototyping', 'Startups', 'Content Management', 'Cross-platform Apps']), +('Expo', ARRAY['ios', 'android'], 'cross-platform', 'javascript', 82, 'easy', 78, 92, + ARRAY['React Native apps', 'Rapid development', 'Simplified deployment', 'Beginner-friendly apps'], + ARRAY['Easy setup', 'Managed workflow', 'Good documentation', 'Built-in services', 'Fast development'], + ARRAY['Limited native modules', 'Expo dependency', 'Performance overhead', 'Less flexibility'], + 'MIT', + ARRAY['React Native Apps', 'Startups', 'Prototyping', 'Beginner Projects', 'Cross-platform Apps']), +('React Native for Web', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 75, 95, + ARRAY['Web apps', 'Cross-platform apps', 'Code sharing', 'React-based applications'], + ARRAY['Code reusability', 'React ecosystem', 'Cross-platform', 'Single codebase', 'Fast development'], + ARRAY['Web limitations', 'Platform differences', 'Complex setup', 'Limited web features'], + 'MIT', + ARRAY['Web Apps', 'Cross-platform Apps', 'React Apps', 'Code Sharing', 'Enterprise Apps']), +('Maui', ARRAY['ios', 'android', 'windows'], 'cross-platform', 'csharp', 87, 'medium', 88, 95, + ARRAY['Cross-platform apps', 'Enterprise mobile apps', 'Windows integration', 'Business apps'], + ARRAY['Microsoft backing', 'Modern .NET', 'Single project', 'Hot reload', 'Full API access'], + ARRAY['Microsoft dependency', 'New technology', 'Limited community', 'Complex setup'], + 'MIT', + ARRAY['Enterprise Apps', 'Business Apps', 'Windows Integration', 'Cross-platform Apps', 'Financial Services']), +('Jetpack Compose', ARRAY['android'], 'native', 'kotlin', 96, 'medium', 100, 0, + ARRAY['Android native apps', 'Modern UI development', 'Declarative UI', 'Android apps'], + ARRAY['Google backing', 'Modern approach', 'Kotlin native', 'Excellent tooling', 'Fast development'], + ARRAY['Android only', 'New technology', 'Learning curve', 'Limited documentation'], + 'Apache 2.0', + ARRAY['Android Native Apps', 'Modern UI Apps', 'Enterprise Apps', 'Gaming', 'Google Applications']), +('SwiftUI', ARRAY['ios', 'macos'], 'native', 'swift', 96, 'medium', 100, 85, + ARRAY['iOS native apps', 'macOS apps', 'Declarative UI', 'Apple ecosystem apps'], + ARRAY['Apple backing', 'Modern approach', 'Swift native', 'Excellent tooling', 'Fast development'], + ARRAY['Apple only', 'New technology', 'Learning curve', 'Limited to Apple platforms'], + 'Apache 2.0', + ARRAY['iOS Native Apps', 'macOS Apps', 'Apple Ecosystem', 'Enterprise Apps', 'Apple Applications']), +('Objective-C (iOS)', ARRAY['ios'], 'native', 'objective-c', 95, 'hard', 100, 0, + ARRAY['Legacy iOS apps', 'iOS native apps', 'Apple ecosystem integration', 'Complex mobile apps'], + ARRAY['Mature technology', 'Full platform access', 'Apple support', 'Excellent performance', 'Legacy support'], + ARRAY['Outdated syntax', 'Steep learning curve', 'iOS only', 'Complex memory management'], + 'Apple', + ARRAY['Legacy Apps', 'iOS Native Apps', 'Enterprise Apps', 'Financial Services', 'Apple Applications']), + +('Java (Android)', ARRAY['android'], 'native', 'java', 95, 'medium', 100, 0, + ARRAY['Android native apps', 'Legacy Android apps', 'Enterprise mobile apps', 'Complex mobile apps'], + ARRAY['Mature technology', 'Large ecosystem', 'Full platform access', 'Google support', 'Excellent tooling'], + ARRAY['Verbose syntax', 'Legacy code', 'Android only', 'Memory management'], + 'GPL-2.0', + ARRAY['Legacy Apps', 'Android Native Apps', 'Enterprise Apps', 'Financial Services', 'Google Applications']), +('Kotlin Multiplatform', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'kotlin', 88, 'hard', 90, 80, + ARRAY['Cross-platform apps', 'Shared business logic', 'Multi-platform apps', 'Enterprise apps'], + ARRAY['Code sharing', 'Kotlin benefits', 'Multi-platform', 'Modern language', 'Google backing'], + ARRAY['Complex setup', 'Learning curve', 'Limited UI sharing', 'Platform-specific code'], + 'Apache 2.0', + ARRAY['Cross-platform Apps', 'Enterprise Apps', 'Multi-platform Apps', 'Business Logic', 'Financial Services']), +('Flutter for Web', ARRAY['web', 'ios', 'android'], 'cross-platform', 'dart', 85, 'medium', 80, 95, + ARRAY['Web apps', 'Cross-platform apps', 'Flutter-based web apps', 'Single codebase apps'], + ARRAY['Single codebase', 'Flutter ecosystem', 'Good performance', 'Modern web', 'Google backing'], + ARRAY['Web limitations', 'Large bundle size', 'Limited web features', 'SEO challenges'], + 'BSD', + ARRAY['Web Apps', 'Cross-platform Apps', 'Flutter Apps', 'Single Codebase', 'Enterprise Apps']), +('React Native for Desktop', ARRAY['windows', 'macos', 'linux'], 'cross-platform', 'javascript', 80, 'medium', 85, 90, + ARRAY['Desktop apps', 'Cross-platform desktop', 'React-based desktop', 'Enterprise desktop apps'], + ARRAY['Code reusability', 'React ecosystem', 'Cross-platform', 'Single codebase', 'Fast development'], + ARRAY['Desktop limitations', 'Platform differences', 'Complex setup', 'Limited desktop features'], + 'MIT', + ARRAY['Desktop Apps', 'Cross-platform Apps', 'React Apps', 'Enterprise Apps', 'Business Applications']), +('Electron', ARRAY['windows', 'macos', 'linux'], 'cross-platform', 'javascript', 75, 'easy', 70, 95, + ARRAY['Desktop apps', 'Cross-platform desktop', 'Web-based desktop', 'Enterprise desktop apps'], + ARRAY['Web technologies', 'Easy development', 'Cross-platform', 'Large ecosystem', 'Fast deployment'], + ARRAY['Resource intensive', 'Large app size', 'Performance limitations', 'Memory usage'], + 'MIT', + ARRAY['Desktop Apps', 'Cross-platform Apps', 'Web Apps', 'Enterprise Apps', 'Business Applications']), +('Tauri', ARRAY['windows', 'macos', 'linux'], 'cross-platform', 'rust', 85, 'medium', 80, 90, + ARRAY['Desktop apps', 'Cross-platform desktop', 'Lightweight desktop', 'Enterprise desktop apps'], + ARRAY['Lightweight', 'Fast performance', 'Rust benefits', 'Small app size', 'Modern approach'], + ARRAY['Rust learning curve', 'Limited ecosystem', 'New technology', 'Complex setup'], + 'MIT', + ARRAY['Desktop Apps', 'Cross-platform Apps', 'Lightweight Apps', 'Enterprise Apps', 'Business Applications']), +('Qt', ARRAY['windows', 'macos', 'linux', 'ios', 'android'], 'cross-platform', 'cpp', 90, 'hard', 95, 85, + ARRAY['Cross-platform apps', 'Desktop apps', 'Mobile apps', 'Enterprise apps', 'Embedded systems'], + ARRAY['Excellent performance', 'Cross-platform', 'Mature technology', 'Professional tools', 'Multi-platform'], + ARRAY['Steep learning curve', 'Complex setup', 'High cost', 'Resource intensive'], + 'LGPL', + ARRAY['Cross-platform Apps', 'Desktop Apps', 'Mobile Apps', 'Enterprise Apps', 'Embedded Systems']), +('Godot', ARRAY['windows', 'macos', 'linux', 'ios', 'android', 'web'], 'cross-platform', 'gdscript', 85, 'medium', 90, 90, + ARRAY['Mobile games', 'Desktop games', 'Web games', '2D/3D games', 'Indie games'], + ARRAY['Open source', 'Lightweight', 'Fast development', 'Good 2D support', 'Multi-platform'], + ARRAY['Limited 3D features', 'Smaller ecosystem', 'Newer technology', 'Learning curve'], + 'MIT', + ARRAY['Gaming', '2D Games', '3D Games', 'Indie Games', 'Cross-platform Games']), +('Defold', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'lua', 85, 'medium', 88, 90, + ARRAY['Mobile games', 'Desktop games', 'Web games', '2D games', 'Indie games'], + ARRAY['Open source', 'Fast performance', 'Good 2D support', 'Multi-platform', 'King backing'], + ARRAY['Lua dependency', 'Limited 3D features', 'Smaller ecosystem', 'Learning curve'], + 'Apache 2.0', + ARRAY['Gaming', '2D Games', 'Mobile Games', 'Indie Games', 'Cross-platform Games']), +('Cocos2d-x', ARRAY['ios', 'android', 'windows', 'macos', 'linux'], 'cross-platform', 'cpp', 85, 'hard', 90, 85, + ARRAY['Mobile games', 'Desktop games', '2D games', 'Cross-platform games', 'Indie games'], + ARRAY['Open source', 'Excellent 2D support', 'Fast performance', 'Multi-platform', 'Mature technology'], + ARRAY['C++ complexity', 'Limited 3D features', 'Steep learning curve', 'Resource intensive'], + 'MIT', + ARRAY['Gaming', '2D Games', 'Mobile Games', 'Cross-platform Games', 'Indie Games']), +('LibGDX', ARRAY['ios', 'android', 'windows', 'macos', 'linux'], 'cross-platform', 'java', 85, 'medium', 88, 90, + ARRAY['Mobile games', 'Desktop games', '2D/3D games', 'Cross-platform games', 'Indie games'], + ARRAY['Open source', 'Java ecosystem', 'Multi-platform', 'Good performance', 'Mature technology'], + ARRAY['Java dependency', 'Limited high-end graphics', 'Learning curve', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Gaming', '2D Games', '3D Games', 'Mobile Games', 'Cross-platform Games']), +('Phaser', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'easy', 75, 95, + ARRAY['Web games', 'Mobile games', '2D games', 'HTML5 games', 'Browser games'], + ARRAY['Web technologies', 'Easy learning', 'Fast development', 'Large ecosystem', 'Cross-platform'], + ARRAY['Web limitations', 'Performance limitations', 'Browser dependency', 'Limited 3D'], + 'MIT', + ARRAY['Web Games', '2D Games', 'HTML5 Games', 'Browser Games', 'Mobile Games']), +('Three.js', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web 3D graphics', '3D games', 'WebGL applications', 'Interactive 3D', 'Browser-based 3D'], + ARRAY['Web technologies', 'Excellent 3D support', 'Large ecosystem', 'Cross-platform', 'Fast development'], + ARRAY['Web limitations', 'Performance limitations', 'Browser dependency', 'Complex learning'], + 'MIT', + ARRAY['3D Graphics', 'Web Games', 'WebGL Apps', 'Interactive 3D', 'Browser Applications']), +('Babylon.js', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web 3D graphics', '3D games', 'WebGL applications', 'Interactive 3D', 'Browser-based 3D'], + ARRAY['Web technologies', 'Excellent 3D support', 'Microsoft backing', 'Cross-platform', 'Professional tools'], + ARRAY['Web limitations', 'Performance limitations', 'Browser dependency', 'Complex learning'], + 'Apache 2.0', + ARRAY['3D Graphics', 'Web Games', 'WebGL Apps', 'Interactive 3D', 'Browser Applications']), +('A-Frame', ARRAY['web', 'ios', 'android', 'vr'], 'cross-platform', 'javascript', 80, 'easy', 75, 95, + ARRAY['Web VR', 'AR applications', '3D web experiences', 'Interactive 3D', 'Browser-based VR'], + ARRAY['Web technologies', 'Easy VR development', 'Cross-platform', 'HTML-based', 'Fast development'], + ARRAY['Web limitations', 'Performance limitations', 'Browser dependency', 'Limited features'], + 'MIT', + ARRAY['VR Applications', 'AR Applications', '3D Web', 'Interactive 3D', 'Browser Applications']), +('ARKit', ARRAY['ios'], 'native', 'swift', 95, 'hard', 100, 0, + ARRAY['iOS AR apps', 'Augmented reality', '3D applications', 'Interactive experiences'], + ARRAY['Apple backing', 'Excellent performance', 'Full iOS integration', 'Professional tools', 'Latest AR features'], + ARRAY['iOS only', 'Requires newer devices', 'Complex development', 'Limited to Apple ecosystem'], + 'Apple', + ARRAY['AR Applications', 'iOS Apps', '3D Applications', 'Interactive Experiences', 'Apple Applications']), +('ARCore', ARRAY['android'], 'native', 'java', 95, 'hard', 100, 0, + ARRAY['Android AR apps', 'Augmented reality', '3D applications', 'Interactive experiences'], + ARRAY['Google backing', 'Excellent performance', 'Full Android integration', 'Professional tools', 'Latest AR features'], + ARRAY['Android only', 'Requires newer devices', 'Complex development', 'Limited to Google ecosystem'], + 'Apache 2.0', + ARRAY['AR Applications', 'Android Apps', '3D Applications', 'Interactive Experiences', 'Google Applications']), +('Vuforia', ARRAY['ios', 'android'], 'cross-platform', 'csharp', 90, 'hard', 95, 85, + ARRAY['Cross-platform AR', 'Image recognition', 'Object tracking', 'Enterprise AR'], + ARRAY['Cross-platform', 'Excellent tracking', 'Enterprise features', 'Professional tools', 'Good documentation'], + ARRAY['High cost', 'Complex setup', 'Steep learning curve', 'Resource intensive'], + 'Vuforia', + ARRAY['AR Applications', 'Image Recognition', 'Object Tracking', 'Enterprise AR', 'Cross-platform Apps']), +('Unity AR Foundation', ARRAY['ios', 'android'], 'cross-platform', 'csharp', 92, 'hard', 95, 90, + ARRAY['Cross-platform AR', 'Unity-based AR', '3D AR applications', 'Interactive experiences'], + ARRAY['Unity ecosystem', 'Cross-platform', 'Excellent tools', 'Large asset store', 'Professional features'], + ARRAY['Unity dependency', 'Complex setup', 'Steep learning curve', 'Resource intensive'], + 'Unity', + ARRAY['AR Applications', 'Unity Apps', '3D Applications', 'Interactive Experiences', 'Cross-platform Apps']), + +('Unreal AR', ARRAY['ios', 'android'], 'cross-platform', 'c++', 95, 'hard', 98, 80, + ARRAY['High-end AR', '3D AR applications', 'Cinematic AR', 'Enterprise AR'], + ARRAY['Best graphics', 'Professional tools', 'Multi-platform', 'High performance', 'Blueprint scripting'], + ARRAY['Very large app size', 'Steep learning curve', 'Resource intensive', 'High cost'], + 'Unreal', + ARRAY['High-end AR', '3D Applications', 'Cinematic Experiences', 'Enterprise AR', 'Entertainment']), +('OpenCV', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'cpp', 90, 'hard', 85, 85, + ARRAY['Computer vision', 'Image processing', 'AR applications', 'Mobile vision apps'], + ARRAY['Open source', 'Excellent CV features', 'Multi-platform', 'Large ecosystem', 'Professional tools'], + ARRAY['Steep learning curve', 'Complex setup', 'Resource intensive', 'C++ complexity'], + 'BSD', + ARRAY['Computer Vision', 'Image Processing', 'AR Applications', 'Mobile Vision', 'Enterprise Apps']), +('TensorFlow Lite', ARRAY['ios', 'android', 'web'], 'cross-platform', 'python', 88, 'hard', 80, 85, + ARRAY['Mobile ML', 'On-device AI', 'ML applications', 'Edge computing'], + ARRAY['Google backing', 'On-device processing', 'Multi-platform', 'Large model support', 'Fast inference'], + ARRAY['Complex setup', 'Steep learning curve', 'Resource intensive', 'Limited model size'], + 'Apache 2.0', + ARRAY['Mobile ML', 'On-device AI', 'ML Applications', 'Edge Computing', 'AI Applications']), +('Core ML', ARRAY['ios'], 'native', 'swift', 92, 'medium', 100, 0, + ARRAY['iOS ML apps', 'On-device AI', 'ML applications', 'Apple ecosystem AI'], + ARRAY['Apple backing', 'Excellent performance', 'Full iOS integration', 'Easy deployment', 'Good tooling'], + ARRAY['iOS only', 'Limited to Apple ecosystem', 'Apple dependency', 'Limited model support'], + 'Apple', + ARRAY['iOS ML', 'On-device AI', 'ML Applications', 'Apple Ecosystem', 'AI Applications']), +('ML Kit', ARRAY['android'], 'native', 'java', 92, 'medium', 100, 0, + ARRAY['Android ML apps', 'On-device AI', 'ML applications', 'Google ecosystem AI'], + ARRAY['Google backing', 'Excellent performance', 'Full Android integration', 'Easy deployment', 'Good tooling'], + ARRAY['Android only', 'Limited to Google ecosystem', 'Google dependency', 'Limited model support'], + 'Apache 2.0', + ARRAY['Android ML', 'On-device AI', 'ML Applications', 'Google Ecosystem', 'AI Applications']), +('PyTorch Mobile', ARRAY['ios', 'android'], 'cross-platform', 'python', 85, 'hard', 80, 85, + ARRAY['Mobile ML', 'On-device AI', 'ML applications', 'Edge computing'], + ARRAY['Open source', 'Python ecosystem', 'Multi-platform', 'Dynamic graphs', 'Research-friendly'], + ARRAY['Steep learning curve', 'Complex setup', 'Resource intensive', 'Python dependency'], + 'BSD', + ARRAY['Mobile ML', 'On-device AI', 'ML Applications', 'Edge Computing', 'AI Applications']), +('Firebase ML', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 85, 'medium', 80, 90, + ARRAY['Mobile ML', 'Cloud AI', 'ML applications', 'Firebase ecosystem'], + ARRAY['Google backing', 'Cloud-based', 'Multi-platform', 'Easy integration', 'Good documentation'], + ARRAY['Internet dependency', 'Google dependency', 'Limited on-device features', 'Cost for scale'], + 'Firebase', + ARRAY['Mobile ML', 'Cloud AI', 'ML Applications', 'Firebase Ecosystem', 'AI Applications']), +('AWS Amplify', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 85, 'medium', 80, 90, + ARRAY['Mobile apps', 'Web apps', 'Full-stack apps', 'Serverless apps'], + ARRAY['AWS integration', 'Multi-platform', 'Full-stack features', 'Easy deployment', 'Good tooling'], + ARRAY['AWS dependency', 'Internet dependency', 'Cost for scale', 'Complex setup'], + 'Apache 2.0', + ARRAY['Mobile Apps', 'Web Apps', 'Full-stack Apps', 'Serverless Apps', 'AWS Applications']), +('Azure Mobile Apps', ARRAY['ios', 'android', 'web'], 'cross-platform', 'csharp', 85, 'medium', 80, 90, + ARRAY['Mobile apps', 'Web apps', 'Full-stack apps', 'Enterprise apps'], + ARRAY['Azure integration', 'Multi-platform', 'Full-stack features', 'Easy deployment', 'Good tooling'], + ARRAY['Azure dependency', 'Internet dependency', 'Cost for scale', 'Complex setup'], + 'Microsoft', + ARRAY['Mobile Apps', 'Web Apps', 'Full-stack Apps', 'Enterprise Apps', 'Azure Applications']), +('Google Cloud Mobile', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 85, 'medium', 80, 90, + ARRAY['Mobile apps', 'Web apps', 'Full-stack apps', 'Cloud-based apps'], + ARRAY['Google Cloud integration', 'Multi-platform', 'Full-stack features', 'Easy deployment', 'Good tooling'], + ARRAY['Google dependency', 'Internet dependency', 'Cost for scale', 'Complex setup'], + 'Google Cloud', + ARRAY['Mobile Apps', 'Web Apps', 'Full-stack Apps', 'Cloud Apps', 'Google Applications']), +('Realm Database', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 90, 'easy', 85, 95, + ARRAY['Mobile databases', 'Offline-first apps', 'Real-time sync', 'Cross-platform data'], + ARRAY['Excellent performance', 'Cross-platform', 'Real-time sync', 'Easy integration', 'Good tooling'], + ARRAY['Limited features', 'Learning curve', 'Setup complexity', 'Cost for enterprise'], + 'Apache 2.0', + ARRAY['Mobile Databases', 'Offline-first Apps', 'Real-time Sync', 'Cross-platform Data', 'Enterprise Apps']), +('SQLite', ARRAY['ios', 'android', 'web', 'desktop'], 'cross-platform', 'c', 95, 'easy', 90, 100, + ARRAY['Mobile databases', 'Embedded databases', 'Offline storage', 'Cross-platform data'], + ARRAY['Lightweight', 'Fast performance', 'Cross-platform', 'Reliable', 'No server needed'], + ARRAY['Limited features', 'No real-time sync', 'Basic SQL', 'Limited scalability'], + 'Public Domain', + ARRAY['Mobile Databases', 'Embedded Databases', 'Offline Storage', 'Cross-platform Data', 'Enterprise Apps']), +('Firebase Realtime Database', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 88, 'easy', 85, 95, + ARRAY['Real-time databases', 'Mobile apps', 'Web apps', 'Collaborative apps'], + ARRAY['Real-time sync', 'Google backing', 'Multi-platform', 'Easy integration', 'Good documentation'], + ARRAY['Internet dependency', 'Google dependency', 'Cost for scale', 'Limited querying'], + 'Firebase', + ARRAY['Real-time Databases', 'Mobile Apps', 'Web Apps', 'Collaborative Apps', 'Google Applications']), +('Firestore', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 90, 'easy', 85, 95, + ARRAY['NoSQL databases', 'Mobile apps', 'Web apps', 'Real-time apps'], + ARRAY['Real-time sync', 'Google backing', 'Multi-platform', 'Good querying', 'Scalable'], + ARRAY['Internet dependency', 'Google dependency', 'Cost for scale', 'Complex setup'], + 'Firebase', + ARRAY['NoSQL Databases', 'Mobile Apps', 'Web Apps', 'Real-time Apps', 'Google Applications']), +('MongoDB Realm', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['NoSQL databases', 'Mobile apps', 'Web apps', 'Real-time apps'], + ARRAY['Real-time sync', 'Multi-platform', 'Good querying', 'Scalable', 'Easy integration'], + ARRAY['Internet dependency', 'Cost for scale', 'Complex setup', 'Learning curve'], + 'MongoDB', + ARRAY['NoSQL Databases', 'Mobile Apps', 'Web Apps', 'Real-time Apps', 'Enterprise Apps']), +('Couchbase Lite', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['NoSQL databases', 'Mobile apps', 'Offline-first apps', 'Real-time sync'], + ARRAY['Real-time sync', 'Multi-platform', 'Good performance', 'Scalable', 'Enterprise features'], + ARRAY['Complex setup', 'Learning curve', 'Cost for enterprise', 'Limited community'], + 'Apache 2.0', + ARRAY['NoSQL Databases', 'Mobile Apps', 'Offline-first Apps', 'Real-time Sync', 'Enterprise Apps']), +('PouchDB', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 85, 'easy', 80, 95, + ARRAY['NoSQL databases', 'Mobile apps', 'Offline-first apps', 'Web apps'], + ARRAY['Offline-first', 'Multi-platform', 'Easy integration', 'CouchDB sync', 'Lightweight'], + ARRAY['Limited features', 'Performance limitations', 'Learning curve', 'Limited scalability'], + 'Apache 2.0', + ARRAY['NoSQL Databases', 'Mobile Apps', 'Offline-first Apps', 'Web Apps', 'Enterprise Apps']), +('IndexedDB', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 75, 95, + ARRAY['Web databases', 'Mobile web apps', 'Offline storage', 'Browser-based storage'], + ARRAY['Browser native', 'No installation', 'Good performance', 'Web standard', 'Easy access'], + ARRAY['Browser dependency', 'Limited features', 'Complex API', 'Limited storage'], + 'W3C', + ARRAY['Web Databases', 'Mobile Web Apps', 'Offline Storage', 'Browser Storage', 'Web Applications']), +('WebSQL', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 75, 'easy', 70, 95, + ARRAY['Web databases', 'Mobile web apps', 'Offline storage', 'Browser-based storage'], + ARRAY['SQL interface', 'Browser native', 'No installation', 'Easy access', 'Familiar syntax'], + ARRAY['Deprecated', 'Browser dependency', 'Limited features', 'Limited support'], + 'W3C', + ARRAY['Web Databases', 'Mobile Web Apps', 'Offline Storage', 'Browser Storage', 'Web Applications']), + +('Local Storage', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 70, 'easy', 65, 95, + ARRAY['Web storage', 'Mobile web apps', 'Offline storage', 'Browser-based storage'], + ARRAY['Simple API', 'Browser native', 'No installation', 'Easy access', 'Good for small data'], + ARRAY['Limited storage', 'Browser dependency', 'No querying', 'Limited features'], + 'W3C', + ARRAY['Web Storage', 'Mobile Web Apps', 'Offline Storage', 'Browser Storage', 'Web Applications']), +('Session Storage', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 70, 'easy', 65, 95, + ARRAY['Web storage', 'Mobile web apps', 'Temporary storage', 'Browser-based storage'], + ARRAY['Simple API', 'Browser native', 'No installation', 'Easy access', 'Session-based'], + ARRAY['Limited storage', 'Browser dependency', 'Temporary only', 'Limited features'], + 'W3C', + ARRAY['Web Storage', 'Mobile Web Apps', 'Temporary Storage', 'Browser Storage', 'Web Applications']), +('Cookies', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 65, 'easy', 60, 95, + ARRAY['Web storage', 'Mobile web apps', 'User tracking', 'Browser-based storage'], + ARRAY['Universal support', 'Simple API', 'Browser native', 'Easy access', 'HTTP integration'], + ARRAY['Limited storage', 'Security issues', 'Browser dependency', 'Performance impact'], + 'W3C', + ARRAY['Web Storage', 'Mobile Web Apps', 'User Tracking', 'Browser Storage', 'Web Applications']), +('Web Workers', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 75, 95, + ARRAY['Web processing', 'Mobile web apps', 'Background tasks', 'Browser-based processing'], + ARRAY['Background processing', 'Browser native', 'No UI blocking', 'Multi-threading', 'Good performance'], + ARRAY['Browser dependency', 'Limited features', 'Complex setup', 'Learning curve'], + 'W3C', + ARRAY['Web Processing', 'Mobile Web Apps', 'Background Tasks', 'Browser Processing', 'Web Applications']), +('Service Workers', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web processing', 'Mobile web apps', 'Offline functionality', 'PWA features'], + ARRAY['Offline support', 'Background sync', 'Push notifications', 'PWA features', 'Browser native'], + ARRAY['Browser dependency', 'Complex setup', 'Learning curve', 'Limited browser support'], + 'W3C', + ARRAY['Web Processing', 'Mobile Web Apps', 'Offline Functionality', 'PWA Features', 'Web Applications']), +('Push Notifications', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web notifications', 'Mobile notifications', 'User engagement', 'Browser-based notifications'], + ARRAY['User engagement', 'Browser native', 'Cross-platform', 'Easy integration', 'Good reach'], + ARRAY['Browser dependency', 'User permission required', 'Limited features', 'Privacy concerns'], + 'W3C', + ARRAY['Web Notifications', 'Mobile Notifications', 'User Engagement', 'Browser Notifications', 'Web Applications']), +('Geolocation API', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'easy', 80, 95, + ARRAY['Location services', 'Mobile web apps', 'GPS applications', 'Browser-based location'], + ARRAY['Browser native', 'Easy integration', 'Cross-platform', 'Good accuracy', 'User permission'], + ARRAY['Browser dependency', 'User permission required', 'Privacy concerns', 'Battery usage'], + 'W3C', + ARRAY['Location Services', 'Mobile Web Apps', 'GPS Applications', 'Browser Location', 'Web Applications']), +('Camera API', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 75, 95, + ARRAY['Camera applications', 'Mobile web apps', 'Photo capture', 'Browser-based camera'], + ARRAY['Browser native', 'Easy integration', 'Cross-platform', 'Good quality', 'User permission'], + ARRAY['Browser dependency', 'User permission required', 'Limited features', 'Privacy concerns'], + 'W3C', + ARRAY['Camera Applications', 'Mobile Web Apps', 'Photo Capture', 'Browser Camera', 'Web Applications']), +('Microphone API', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'medium', 75, 95, + ARRAY['Audio applications', 'Mobile web apps', 'Voice recording', 'Browser-based audio'], + ARRAY['Browser native', 'Easy integration', 'Cross-platform', 'Good quality', 'User permission'], + ARRAY['Browser dependency', 'User permission required', 'Limited features', 'Privacy concerns'], + 'W3C', + ARRAY['Audio Applications', 'Mobile Web Apps', 'Voice Recording', 'Browser Audio', 'Web Applications']), +('Bluetooth API', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 75, 'medium', 70, 95, + ARRAY['Bluetooth applications', 'Mobile web apps', 'IoT integration', 'Browser-based Bluetooth'], + ARRAY['Browser native', 'Easy integration', 'Cross-platform', 'IoT support', 'User permission'], + ARRAY['Browser dependency', 'User permission required', 'Limited features', 'Security concerns'], + 'W3C', + ARRAY['Bluetooth Applications', 'Mobile Web Apps', 'IoT Integration', 'Browser Bluetooth', 'Web Applications']), +('NFC API', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 75, 'medium', 70, 95, + ARRAY['NFC applications', 'Mobile web apps', 'Contactless payments', 'Browser-based NFC'], + ARRAY['Browser native', 'Easy integration', 'Cross-platform', 'Payment support', 'User permission'], + ARRAY['Browser dependency', 'User permission required', 'Limited features', 'Security concerns'], + 'W3C', + ARRAY['NFC Applications', 'Mobile Web Apps', 'Contactless Payments', 'Browser NFC', 'Web Applications']), +('WebRTC', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Video chat', 'Audio chat', 'Real-time communication', 'Browser-based communication'], + ARRAY['Browser native', 'Real-time communication', 'Cross-platform', 'No plugins', 'Good quality'], + ARRAY['Browser dependency', 'Complex setup', 'Learning curve', 'Security concerns'], + 'W3C', + ARRAY['Video Chat', 'Audio Chat', 'Real-time Communication', 'Browser Communication', 'Web Applications']), +('WebSockets', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Real-time apps', 'Live updates', 'Chat applications', 'Browser-based communication'], + ARRAY['Real-time communication', 'Browser native', 'Cross-platform', 'Low latency', 'Good performance'], + ARRAY['Browser dependency', 'Complex setup', 'Learning curve', 'Security concerns'], + 'W3C', + ARRAY['Real-time Apps', 'Live Updates', 'Chat Applications', 'Browser Communication', 'Web Applications']), +('WebAssembly', ARRAY['web', 'ios', 'android'], 'cross-platform', 'c', 90, 'hard', 85, 95, + ARRAY['High-performance web', 'Web games', 'Web applications', 'Browser-based computing'], + ARRAY['Near-native performance', 'Browser native', 'Multi-language support', 'Cross-platform', 'Good performance'], + ARRAY['Browser dependency', 'Complex setup', 'Learning curve', 'Limited debugging'], + 'W3C', + ARRAY['High-performance Web', 'Web Games', 'Web Applications', 'Browser Computing', 'Web Applications']), +('Progressive Web Apps', ARRAY['web', 'ios', 'android'], 'hybrid', 'javascript', 80, 'medium', 75, 95, + ARRAY['Web applications', 'Mobile applications', 'Offline functionality', 'Cross-platform apps'], + ARRAY['Cross-platform', 'No app store', 'Instant updates', 'Web technologies', 'Offline support'], + ARRAY['Limited native features', 'Browser dependency', 'Performance limitations', 'Platform restrictions'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Applications', 'Offline Functionality', 'Cross-platform Apps', 'Web Apps']), +('Hybrid Apps', ARRAY['ios', 'android', 'web'], 'hybrid', 'javascript', 75, 'easy', 70, 85, + ARRAY['Mobile applications', 'Web-based apps', 'Cross-platform apps', 'Rapid development'], + ARRAY['Cross-platform', 'Web technologies', 'Fast development', 'Single codebase', 'Easy deployment'], + ARRAY['Performance limitations', 'WebView dependency', 'Less native feel', 'Battery usage'], + 'Various', + ARRAY['Mobile Applications', 'Web-based Apps', 'Cross-platform Apps', 'Rapid Development', 'Enterprise Apps']), +('Native Apps', ARRAY['ios', 'android'], 'native', 'various', 98, 'hard', 100, 0, + ARRAY['Mobile applications', 'High-performance apps', 'Platform-specific apps', 'Enterprise apps'], + ARRAY['Best performance', 'Full platform access', 'Native features', 'Excellent UX', 'Platform optimization'], + ARRAY['Platform-specific', 'Higher cost', 'Longer development', 'Separate codebases'], + 'Various', + ARRAY['Mobile Applications', 'High-performance Apps', 'Platform-specific Apps', 'Enterprise Apps', 'Native Apps']), +('Cross-platform Apps', ARRAY['ios', 'android', 'web'], 'cross-platform', 'various', 85, 'medium', 85, 90, + ARRAY['Mobile applications', 'Web applications', 'Single codebase apps', 'Enterprise apps'], + ARRAY['Single codebase', 'Cost-effective', 'Faster development', 'Cross-platform', 'Good performance'], + ARRAY['Performance limitations', 'Platform-specific bugs', 'Limited native features', 'Complex setup'], + 'Various', + ARRAY['Mobile Applications', 'Web Applications', 'Single Codebase Apps', 'Enterprise Apps', 'Cross-platform Apps']), + +('Single Page Applications', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Interactive web apps', 'Cross-platform apps'], + ARRAY['Fast user experience', 'Cross-platform', 'Web technologies', 'Good performance', 'Modern UX'], + ARRAY['SEO challenges', 'Browser dependency', 'Complex setup', 'Learning curve'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Interactive Web Apps', 'Cross-platform Apps', 'Web Apps']), +('Multi-page Applications', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 80, 'easy', 75, 95, + ARRAY['Web applications', 'Mobile web apps', 'Content-based apps', 'Cross-platform apps'], + ARRAY['SEO friendly', 'Cross-platform', 'Web technologies', 'Easy navigation', 'Good for content'], + ARRAY['Slower navigation', 'Browser dependency', 'Less interactive', 'Loading times'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Content-based Apps', 'Cross-platform Apps', 'Web Apps']), +('Server-side Rendering', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'SEO-friendly apps', 'Cross-platform apps'], + ARRAY['SEO friendly', 'Fast initial load', 'Cross-platform', 'Good performance', 'Modern approach'], + ARRAY['Complex setup', 'Server dependency', 'Learning curve', 'Resource intensive'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'SEO-friendly Apps', 'Cross-platform Apps', 'Web Apps']), +('Static Site Generation', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 90, 'easy', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'Content sites', 'Cross-platform apps'], + ARRAY['Fast performance', 'SEO friendly', 'Cross-platform', 'Easy deployment', 'Good security'], + ARRAY['Limited dynamic content', 'Build time dependency', 'Learning curve', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Content Sites', 'Cross-platform Apps', 'Web Apps']), +('Jamstack', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'Modern web apps', 'Cross-platform apps'], + ARRAY['Fast performance', 'Good security', 'Cross-platform', 'Modern approach', 'Scalable'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Modern Web Apps', 'Cross-platform Apps', 'Web Apps']), +('Headless CMS', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Content management', 'Web applications', 'Mobile apps', 'Cross-platform apps'], + ARRAY['Content-focused', 'Cross-platform', 'API-driven', 'Flexible frontend', 'Good scalability'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Various', + ARRAY['Content Management', 'Web Applications', 'Mobile Apps', 'Cross-platform Apps', 'Enterprise Apps']), +('Content Management Systems', ARRAY['web', 'ios', 'android'], 'cross-platform', 'php', 80, 'easy', 75, 85, + ARRAY['Content management', 'Web applications', 'Blogs', 'Cross-platform apps'], + ARRAY['Easy content management', 'Cross-platform', 'Large ecosystem', 'Good documentation', 'Easy setup'], + ARRAY['Performance limitations', 'Security concerns', 'Learning curve', 'Setup complexity'], + 'Various', + ARRAY['Content Management', 'Web Applications', 'Blogs', 'Cross-platform Apps', 'Enterprise Apps']), +('E-commerce Platforms', ARRAY['web', 'ios', 'android'], 'cross-platform', 'php', 85, 'medium', 80, 85, + ARRAY['E-commerce', 'Web applications', 'Mobile commerce', 'Cross-platform apps'], + ARRAY['E-commerce features', 'Cross-platform', 'Large ecosystem', 'Good documentation', 'Payment integration'], + ARRAY['Performance limitations', 'Security concerns', 'Learning curve', 'Setup complexity'], + 'Various', + ARRAY['E-commerce', 'Web Applications', 'Mobile Commerce', 'Cross-platform Apps', 'Enterprise Apps']), +('Headless E-commerce', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['E-commerce', 'Web applications', 'Mobile commerce', 'Cross-platform apps'], + ARRAY['API-driven', 'Cross-platform', 'Flexible frontend', 'Good performance', 'Scalable'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Various', + ARRAY['E-commerce', 'Web Applications', 'Mobile Commerce', 'Cross-platform Apps', 'Enterprise Apps']), +('Mobile Backend as a Service', ARRAY['ios', 'android', 'web'], 'cross-platform', 'javascript', 85, 'easy', 80, 95, + ARRAY['Mobile backends', 'Web backends', 'Serverless apps', 'Cross-platform apps'], + ARRAY['No server management', 'Cross-platform', 'Easy integration', 'Good scalability', 'Fast development'], + ARRAY['Vendor dependency', 'Cost for scale', 'Limited customization', 'Internet dependency'], + 'Various', + ARRAY['Mobile Backends', 'Web Backends', 'Serverless Apps', 'Cross-platform Apps', 'Enterprise Apps']), +('Serverless Functions', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web backends', 'Mobile backends', 'Serverless apps', 'Cross-platform apps'], + ARRAY['No server management', 'Cross-platform', 'Good scalability', 'Pay-per-use', 'Fast development'], + ARRAY['Vendor dependency', 'Cost for scale', 'Limited customization', 'Internet dependency'], + 'Various', + ARRAY['Web Backends', 'Mobile Backends', 'Serverless Apps', 'Cross-platform Apps', 'Enterprise Apps']), +('Container-based Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 90, 'hard', 85, 90, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['Consistent environment', 'Good scalability', 'Cross-platform', 'Easy deployment', 'Good isolation'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Setup complexity'], + 'Various', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'Container Apps']), +('Microservices Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'hard', 85, 90, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['Scalable architecture', 'Cross-platform', 'Independent deployment', 'Good performance', 'Flexible'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Setup complexity'], + 'Various', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'Microservices Apps']), +('Monolithic Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'easy', 80, 90, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['Simple architecture', 'Easy development', 'Cross-platform', 'Good performance', 'Easy deployment'], + ARRAY['Limited scalability', 'Complex maintenance', 'Single point of failure', 'Setup complexity'], + 'Various', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'Monolithic Apps']), +('API-first Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['API-driven', 'Cross-platform', 'Flexible frontend', 'Good scalability', 'Modern approach'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Various', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'API-first Apps']), +('GraphQL Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['Flexible queries', 'Cross-platform', 'Good performance', 'Modern approach', 'Developer friendly'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'GraphQL Apps']), +('REST API Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'easy', 80, 95, + ARRAY['Web applications', 'Mobile apps', 'Enterprise apps', 'Cross-platform apps'], + ARRAY['Standard approach', 'Cross-platform', 'Good performance', 'Easy integration', 'Well documented'], + ARRAY['Limited flexibility', 'Complex setup', 'Multiple requests', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Apps', 'Enterprise Apps', 'Cross-platform Apps', 'REST API Apps']), +('WebSocket Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Real-time apps', 'Chat applications', 'Live updates', 'Cross-platform apps'], + ARRAY['Real-time communication', 'Cross-platform', 'Good performance', 'Modern approach', 'Interactive'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Setup complexity'], + 'Open Web', + ARRAY['Real-time Apps', 'Chat Applications', 'Live Updates', 'Cross-platform Apps', 'WebSocket Apps']), +('WebRTC Apps', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Video chat', 'Audio chat', 'Real-time communication', 'Cross-platform apps'], + ARRAY['Real-time communication', 'Cross-platform', 'No plugins', 'Good quality', 'Browser native'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Setup complexity'], + 'Open Web', + ARRAY['Video Chat', 'Audio Chat', 'Real-time Communication', 'Cross-platform Apps', 'WebRTC Apps']), + +('Progressive Enhancement', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Accessible apps', 'Cross-platform apps'], + ARRAY['Accessibility focus', 'Cross-platform', 'Graceful degradation', 'Good performance', 'Inclusive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Accessible Apps', 'Cross-platform Apps', 'Web Apps']), +('Mobile-first Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'easy', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'Responsive apps', 'Cross-platform apps'], + ARRAY['Mobile focus', 'Cross-platform', 'Good performance', 'Modern approach', 'User-friendly'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Responsive Apps', 'Cross-platform Apps', 'Web Apps']), +('Responsive Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Adaptive apps', 'Cross-platform apps'], + ARRAY['Multi-device support', 'Cross-platform', 'Good user experience', 'Modern approach', 'Flexible'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Adaptive Apps', 'Cross-platform Apps', 'Web Apps']), +('Adaptive Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'medium', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Device-specific apps', 'Cross-platform apps'], + ARRAY['Device-specific', 'Cross-platform', 'Good performance', 'Modern approach', 'User-friendly'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Device-specific Apps', 'Cross-platform Apps', 'Web Apps']), +('Touch-optimized Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'medium', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'Touch apps', 'Cross-platform apps'], + ARRAY['Touch-friendly', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intuitive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Touch Apps', 'Cross-platform Apps', 'Web Apps']), +('Gesture-based Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Gesture apps', 'Cross-platform apps'], + ARRAY['Gesture support', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intuitive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Gesture Apps', 'Cross-platform Apps', 'Web Apps']), +('Voice-activated Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Voice apps', 'Cross-platform apps'], + ARRAY['Voice support', 'Cross-platform', 'Good user experience', 'Modern approach', 'Accessible'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Voice Apps', 'Cross-platform Apps', 'Web Apps']), +('AI-powered Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'hard', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'AI apps', 'Cross-platform apps'], + ARRAY['AI features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intelligent'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'AI Apps', 'Cross-platform Apps', 'AI Applications']), +('Machine Learning Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'hard', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'ML apps', 'Cross-platform apps'], + ARRAY['ML features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intelligent'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'ML Apps', 'Cross-platform Apps', 'ML Applications']), +('Deep Learning Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 88, 'hard', 85, 95, + ARRAY['Web applications', 'Mobile web apps', 'Deep learning apps', 'Cross-platform apps'], + ARRAY['Deep learning features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intelligent'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Deep Learning Apps', 'Cross-platform Apps', 'Deep Learning Apps']), +('Computer Vision Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'CV apps', 'Cross-platform apps'], + ARRAY['CV features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intelligent'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'CV Apps', 'Cross-platform Apps', 'CV Applications']), +('Natural Language Processing Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'NLP apps', 'Cross-platform apps'], + ARRAY['NLP features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Intelligent'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'NLP Apps', 'Cross-platform Apps', 'NLP Applications']), +('Augmented Reality Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'AR apps', 'Cross-platform apps'], + ARRAY['AR features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Immersive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'AR Apps', 'Cross-platform Apps', 'AR Applications']), +('Virtual Reality Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'VR apps', 'Cross-platform apps'], + ARRAY['VR features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Immersive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'VR Apps', 'Cross-platform Apps', 'VR Applications']), +('Mixed Reality Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'MR apps', 'Cross-platform apps'], + ARRAY['MR features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Immersive'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'MR Apps', 'Cross-platform Apps', 'MR Applications']), +('IoT-enabled Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'IoT apps', 'Cross-platform apps'], + ARRAY['IoT features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Connected'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'IoT Apps', 'Cross-platform Apps', 'IoT Applications']), +('Blockchain-enabled Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Blockchain apps', 'Cross-platform apps'], + ARRAY['Blockchain features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Secure'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Blockchain Apps', 'Cross-platform Apps', 'Blockchain Applications']), +('Cryptocurrency-enabled Design', ARRAY['web', 'ios', 'android'], 'cross-platform', 'javascript', 85, 'hard', 80, 95, + ARRAY['Web applications', 'Mobile web apps', 'Cryptocurrency apps', 'Cross-platform apps'], + ARRAY['Cryptocurrency features', 'Cross-platform', 'Good user experience', 'Modern approach', 'Financial'], + ARRAY['Complex setup', 'Learning curve', 'Multiple technologies', 'Setup complexity'], + 'Open Web', + ARRAY['Web Applications', 'Mobile Web Apps', 'Cryptocurrency Apps', 'Cross-platform Apps', 'Cryptocurrency Applications']); + + INSERT INTO devops_technologies ( + name, category, complexity_level, scalability_support, cloud_native, enterprise_ready, + automation_capabilities, integration_options, primary_use_cases, strengths, weaknesses, + license_type, domain +) VALUES +('Docker', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container orchestration', 'Image building', 'Registry management', 'Multi-stage builds'], + ARRAY['Kubernetes', 'CI/CD pipelines', 'Cloud platforms', 'Monitoring tools'], + ARRAY['Application containerization', 'Development environments', 'Microservices deployment', 'CI/CD pipelines'], + ARRAY['Consistent environments', 'Easy deployment', 'Resource efficiency', 'Version control for infrastructure'], + ARRAY['Learning curve', 'Security considerations', 'Networking complexity', 'Storage management'], + 'Apache 2.0', + ARRAY['Microservices', 'CI/CD Pipelines', 'Cloud Infrastructure', 'Development Environments', 'Enterprise Deployments']), +('GitHub Actions', 'ci-cd', 'easy', 'good', true, true, + ARRAY['Workflow automation', 'Build and test', 'Deployment', 'Scheduled tasks'], + ARRAY['GitHub repositories', 'Cloud services', 'Third-party tools', 'Slack notifications'], + ARRAY['CI/CD pipelines', 'Automated testing', 'Deployment automation', 'Code quality checks'], + ARRAY['GitHub integration', 'Free for public repos', 'Easy setup', 'Large marketplace', 'YAML configuration'], + ARRAY['GitHub dependency', 'Limited minutes on free tier', 'Less advanced than Jenkins', 'Vendor lock-in'], + 'MIT', + ARRAY['CI/CD Pipelines', 'Startups', 'Open Source Projects', 'Web Development', 'SaaS Platforms']), +('Jenkins', 'ci-cd', 'hard', 'excellent', false, true, + ARRAY['Build automation', 'Testing integration', 'Deployment pipelines', 'Plugin ecosystem'], + ARRAY['Multiple SCMs', 'Cloud platforms', 'Testing frameworks', 'Notification systems'], + ARRAY['Enterprise CI/CD', 'Complex pipelines', 'Legacy system integration', 'Custom workflows'], + ARRAY['Highly customizable', 'Large plugin ecosystem', 'Self-hosted', 'Enterprise features', 'Open source'], + ARRAY['Complex setup', 'Maintenance overhead', 'Security management', 'Plugin dependencies'], + 'MIT', + ARRAY['Enterprise CI/CD', 'Legacy Systems', 'Complex Pipelines', 'Financial Services', 'Large-scale Deployments']), +('Kubernetes', 'orchestration', 'hard', 'excellent', true, true, + ARRAY['Container orchestration', 'Auto-scaling', 'Service discovery', 'Rolling deployments'], + ARRAY['Docker', 'Cloud providers', 'CI/CD tools', 'Monitoring solutions'], + ARRAY['Container orchestration', 'Microservices management', 'Auto-scaling applications', 'High availability systems'], + ARRAY['Industry standard', 'Powerful orchestration', 'Self-healing', 'Horizontal scaling', 'Cloud native'], + ARRAY['Steep learning curve', 'Operational complexity', 'Resource overhead', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Microservices', 'Cloud Infrastructure', 'High Availability Systems', 'Enterprise Deployments', 'Big Data']), +('Terraform', 'infrastructure', 'medium', 'excellent', true, true, + ARRAY['Infrastructure provisioning', 'State management', 'Resource planning', 'Multi-cloud support'], + ARRAY['AWS', 'Azure', 'GCP', 'Version control', 'CI/CD pipelines'], + ARRAY['Infrastructure as Code', 'Multi-cloud deployments', 'Resource management', 'Infrastructure automation'], + ARRAY['Multi-cloud support', 'Declarative syntax', 'State management', 'Plan before apply', 'Large provider ecosystem'], + ARRAY['State file management', 'Learning curve', 'Provider limitations', 'Version compatibility'], + 'MPL 2.0', + ARRAY['Cloud Infrastructure', 'Multi-cloud Deployments', 'Enterprise Infrastructure', 'DevOps Automation', 'Data Centers']), +('Zabbix', 'monitoring', 'hard', 'good', false, true, + ARRAY['System monitoring', 'Network monitoring', 'Alerting', 'Reporting'], + ARRAY['Network devices', 'Servers', 'Cloud platforms', 'Notification systems'], + ARRAY['System monitoring', 'Network monitoring', 'Enterprise IT', 'DevOps workflows'], + ARRAY['Comprehensive monitoring', 'Good documentation', 'Large community', 'Enterprise features', 'Reliable'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited modern features'], + 'GPLv2', + ARRAY['System Monitoring', 'Network Monitoring', 'Enterprise IT', 'DevOps Workflows', 'Large Organizations']), +('Datadog', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'Infrastructure monitoring', 'APM', 'Log management'], + ARRAY['Cloud platforms', 'Applications', 'Databases', 'Third-party tools'], + ARRAY['Application monitoring', 'Infrastructure monitoring', 'APM', 'DevOps workflows'], + ARRAY['Comprehensive platform', 'Good integration', 'Easy setup', 'Cloud native', 'Enterprise features'], + ARRAY['Cost', 'Vendor lock-in', 'Learning curve', 'Resource intensive'], + 'Commercial', + ARRAY['Application Monitoring', 'Infrastructure Monitoring', 'APM', 'DevOps Workflows', 'Enterprise Cloud']), +('New Relic', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'APM', 'Infrastructure monitoring', 'Browser monitoring'], + ARRAY['Cloud platforms', 'Applications', 'Databases', 'Third-party tools'], + ARRAY['Application monitoring', 'APM', 'Infrastructure monitoring', 'DevOps workflows'], + ARRAY['Comprehensive APM', 'Good integration', 'Easy setup', 'Cloud native', 'Enterprise features'], + ARRAY['Cost', 'Vendor lock-in', 'Learning curve', 'Resource intensive'], + 'Commercial', + ARRAY['Application Monitoring', 'APM', 'Infrastructure Monitoring', 'DevOps Workflows', 'Enterprise Cloud']), +('Splunk', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Log management', 'SIEM', 'Monitoring', 'Analytics'], + ARRAY['Cloud platforms', 'Applications', 'Network devices', 'Security tools'], + ARRAY['Log management', 'SIEM', 'Security monitoring', 'DevOps workflows'], + ARRAY['Powerful analytics', 'Good integration', 'Enterprise features', 'Large community', 'Comprehensive'], + ARRAY['Cost', 'Complex setup', 'Learning curve', 'Resource intensive'], + 'Commercial', + ARRAY['Log Management', 'SIEM', 'Security Monitoring', 'DevOps Workflows', 'Enterprise IT']), +('ELK Stack', 'monitoring', 'hard', 'excellent', true, true, + ARRAY['Log management', 'Search', 'Analytics', 'Visualization'], + ARRAY['Applications', 'Servers', 'Network devices', 'Cloud platforms'], + ARRAY['Log management', 'Search analytics', 'Monitoring', 'DevOps workflows'], + ARRAY['Open source', 'Powerful search', 'Good integration', 'Scalable', 'Flexible'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Multiple components'], + 'Apache 2.0', + ARRAY['Log Management', 'Search Analytics', 'Monitoring', 'DevOps Workflows', 'Enterprise IT']), +('Consul', 'service-discovery', 'medium', 'excellent', true, true, + ARRAY['Service discovery', 'Configuration management', 'Health checking', 'Key-value store'], + ARRAY['Kubernetes', 'Docker', 'Cloud platforms', 'Applications'], + ARRAY['Service discovery', 'Configuration management', 'Health checking', 'DevOps workflows'], + ARRAY['Service discovery', 'Good integration', 'Open source', 'Cloud native', 'Enterprise features'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'MPL 2.0', + ARRAY['Service Discovery', 'Configuration Management', 'Health Checking', 'DevOps Workflows', 'Microservices']), +('etcd', 'service-discovery', 'medium', 'excellent', true, true, + ARRAY['Key-value store', 'Service discovery', 'Configuration management', 'Distributed coordination'], + ARRAY['Kubernetes', 'Docker', 'Cloud platforms', 'Applications'], + ARRAY['Service discovery', 'Configuration management', 'Distributed coordination', 'DevOps workflows'], + ARRAY['Distributed coordination', 'Good integration', 'Open source', 'Cloud native', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Limited features'], + 'Apache 2.0', + ARRAY['Service Discovery', 'Configuration Management', 'Distributed Coordination', 'DevOps Workflows', 'Microservices']), +('ZooKeeper', 'service-discovery', 'hard', 'good', false, true, + ARRAY['Distributed coordination', 'Configuration management', 'Service discovery', 'Synchronization'], + ARRAY['Kafka', 'Hadoop', 'Cloud platforms', 'Applications'], + ARRAY['Distributed coordination', 'Configuration management', 'Service discovery', 'DevOps workflows'], + ARRAY['Mature ecosystem', 'Good documentation', 'Large community', 'Reliable', 'Enterprise features'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Limited modern features'], + 'Apache 2.0', + ARRAY['Distributed Coordination', 'Configuration Management', 'Service Discovery', 'DevOps Workflows', 'Big Data']), +('Vault', 'security', 'medium', 'excellent', true, true, + ARRAY['Secret management', 'Encryption', 'Identity management', 'Key rotation'], + ARRAY['Cloud platforms', 'Applications', 'Databases', 'DevOps tools'], + ARRAY['Secret management', 'Encryption', 'Identity management', 'DevOps workflows'], + ARRAY['Secret management', 'Good integration', 'Open source', 'Cloud native', 'Enterprise features'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'MPL 2.0', + ARRAY['Secret Management', 'Encryption', 'Identity Management', 'DevOps Workflows', 'Enterprise Security']), +('Keycloak', 'security', 'medium', 'excellent', true, true, + ARRAY['Identity management', 'Single sign-on', 'Authentication', 'Authorization'], + ARRAY['Applications', 'Cloud platforms', 'Databases', 'DevOps tools'], + ARRAY['Identity management', 'Single sign-on', 'Authentication', 'DevOps workflows'], + ARRAY['Open source', 'Good integration', 'Standards compliant', 'Cloud native', 'Enterprise features'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Identity Management', 'Single Sign-on', 'Authentication', 'DevOps Workflows', 'Enterprise Security']), +('Okta', 'security', 'easy', 'excellent', true, true, + ARRAY['Identity management', 'Single sign-on', 'Multi-factor authentication', 'User management'], + ARRAY['Applications', 'Cloud platforms', 'Databases', 'DevOps tools'], + ARRAY['Identity management', 'Single sign-on', 'Multi-factor authentication', 'DevOps workflows'], + ARRAY['Comprehensive platform', 'Good integration', 'Easy setup', 'Cloud native', 'Enterprise features'], + ARRAY['Cost', 'Vendor lock-in', 'Learning curve', 'Resource intensive'], + 'Commercial', + ARRAY['Identity Management', 'Single Sign-on', 'Multi-factor Authentication', 'DevOps Workflows', 'Enterprise Security']), +('Auth0', 'security', 'easy', 'excellent', true, true, + ARRAY['Authentication', 'Authorization', 'Single sign-on', 'User management'], + ARRAY['Applications', 'Cloud platforms', 'Databases', 'DevOps tools'], + ARRAY['Authentication', 'Authorization', 'Single sign-on', 'DevOps workflows'], + ARRAY['Comprehensive platform', 'Good integration', 'Easy setup', 'Cloud native', 'Enterprise features'], + ARRAY['Cost', 'Vendor lock-in', 'Learning curve', 'Resource intensive'], + 'Commercial', + ARRAY['Authentication', 'Authorization', 'Single Sign-on', 'DevOps Workflows', 'Enterprise Security']), +('Let''s Encrypt', 'security', 'easy', 'excellent', true, true, + ARRAY['Certificate management', 'SSL/TLS', 'Automation', 'Renewal'], + ARRAY['Web servers', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Certificate management', 'SSL/TLS', 'Automation', 'DevOps workflows'], + ARRAY['Free certificates', 'Automation', 'Good integration', 'Open source', 'Easy to use'], + ARRAY['Limited features', 'Renewal requirements', 'Rate limits', 'Learning curve'], + 'Apache 2.0', + ARRAY['Certificate Management', 'SSL/TLS', 'Automation', 'DevOps Workflows', 'Web Security']), +('Certbot', 'security', 'easy', 'good', true, true, + ARRAY['Certificate management', 'SSL/TLS', 'Automation', 'Renewal'], + ARRAY['Web servers', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Certificate management', 'SSL/TLS', 'Automation', 'DevOps workflows'], + ARRAY['Free tool', 'Automation', 'Good integration', 'Open source', 'Easy to use'], + ARRAY['Limited features', 'Renewal requirements', 'Learning curve', 'Manual setup'], + 'Apache 2.0', + ARRAY['Certificate Management', 'SSL/TLS', 'Automation', 'DevOps Workflows', 'Web Security']), +('OpenSSL', 'security', 'hard', 'good', false, true, + ARRAY['Certificate management', 'SSL/TLS', 'Encryption', 'Key management'], + ARRAY['Web servers', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Certificate management', 'SSL/TLS', 'Encryption', 'DevOps workflows'], + ARRAY['Comprehensive tool', 'Open source', 'Good documentation', 'Large community', 'Reliable'], + ARRAY['Complex usage', 'Learning curve', 'Security risks', 'Manual configuration'], + 'Apache 2.0', + ARRAY['Certificate Management', 'SSL/TLS', 'Encryption', 'DevOps Workflows', 'Web Security']), +('Helm', 'deployment', 'medium', 'excellent', true, true, + ARRAY['Package management', 'Deployment', 'Configuration management', 'Release management'], + ARRAY['Kubernetes', 'Cloud platforms', 'CI/CD tools', 'DevOps tools'], + ARRAY['Package management', 'Deployment', 'Configuration management', 'DevOps workflows'], + ARRAY['Kubernetes packages', 'Good integration', 'Open source', 'Cloud native', 'Easy to use'], + ARRAY['Learning curve', 'Complex charts', 'Dependency management', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Package Management', 'Deployment', 'Configuration Management', 'DevOps Workflows', 'Kubernetes']), +('Istio', 'deployment', 'hard', 'excellent', true, true, + ARRAY['Service mesh', 'Traffic management', 'Security', 'Observability'], + ARRAY['Kubernetes', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Service mesh', 'Traffic management', 'Security', 'DevOps workflows'], + ARRAY['Service mesh', 'Good integration', 'Open source', 'Cloud native', 'Enterprise features'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Service Mesh', 'Traffic Management', 'Security', 'DevOps Workflows', 'Microservices']), +('Linkerd', 'deployment', 'hard', 'excellent', true, true, + ARRAY['Service mesh', 'Traffic management', 'Security', 'Observability'], + ARRAY['Kubernetes', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Service mesh', 'Traffic management', 'Security', 'DevOps workflows'], + ARRAY['Service mesh', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Service Mesh', 'Traffic Management', 'Security', 'DevOps Workflows', 'Microservices']), +('Argo CD', 'deployment', 'medium', 'excellent', true, true, + ARRAY['GitOps', 'Deployment', 'Synchronization', 'Rollback'], + ARRAY['Kubernetes', 'Git repositories', 'Cloud platforms', 'DevOps tools'], + ARRAY['GitOps', 'Deployment', 'Synchronization', 'DevOps workflows'], + ARRAY['GitOps workflow', 'Good integration', 'Open source', 'Cloud native', 'Easy to use'], + ARRAY['Learning curve', 'Complex setup', 'Git dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['GitOps', 'Deployment', 'Synchronization', 'DevOps Workflows', 'Kubernetes']), +('Flux', 'deployment', 'medium', 'excellent', true, true, + ARRAY['GitOps', 'Deployment', 'Synchronization', 'Automation'], + ARRAY['Kubernetes', 'Git repositories', 'Cloud platforms', 'DevOps tools'], + ARRAY['GitOps', 'Deployment', 'Synchronization', 'DevOps workflows'], + ARRAY['GitOps workflow', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Git dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['GitOps', 'Deployment', 'Synchronization', 'DevOps Workflows', 'Kubernetes']), + +('Spinnaker', 'deployment', 'hard', 'excellent', true, true, + ARRAY['Multi-cloud deployment', 'Canary deployments', 'Pipeline management', 'Rollback'], + ARRAY['Cloud platforms', 'CI/CD tools', 'Kubernetes', 'Docker'], + ARRAY['Multi-cloud deployment', 'Canary deployments', 'Pipeline management', 'DevOps workflows'], + ARRAY['Multi-cloud support', 'Canary deployments', 'Good integration', 'Enterprise features', 'Reliable'], + ARRAY['Complex setup', 'Learning curve', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Multi-cloud Deployment', 'Canary Deployments', 'Pipeline Management', 'DevOps Workflows', 'Enterprise Cloud']), +('Argo Rollouts', 'deployment', 'medium', 'excellent', true, true, + ARRAY['Progressive delivery', 'Canary deployments', 'Blue-green deployments', 'Rollback'], + ARRAY['Kubernetes', 'CI/CD tools', 'Cloud platforms', 'DevOps tools'], + ARRAY['Progressive delivery', 'Canary deployments', 'Blue-green deployments', 'DevOps workflows'], + ARRAY['Progressive delivery', 'Good integration', 'Open source', 'Cloud native', 'Easy to use'], + ARRAY['Learning curve', 'Complex setup', 'Kubernetes dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Progressive Delivery', 'Canary Deployments', 'Blue-green Deployments', 'DevOps Workflows', 'Kubernetes']), +('Flagger', 'deployment', 'medium', 'excellent', true, true, + ARRAY['Progressive delivery', 'Canary deployments', 'A/B testing', 'Rollback'], + ARRAY['Kubernetes', 'CI/CD tools', 'Cloud platforms', 'DevOps tools'], + ARRAY['Progressive delivery', 'Canary deployments', 'A/B testing', 'DevOps workflows'], + ARRAY['Progressive delivery', 'Good integration', 'Open source', 'Cloud native', 'Easy to use'], + ARRAY['Learning curve', 'Complex setup', 'Kubernetes dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Progressive Delivery', 'Canary Deployments', 'A/B Testing', 'DevOps Workflows', 'Kubernetes']), +('Keel', 'deployment', 'medium', 'excellent', true, true, + ARRAY['Automated deployment', 'GitOps', 'Continuous deployment', 'Rollback'], + ARRAY['Kubernetes', 'Git repositories', 'Cloud platforms', 'DevOps tools'], + ARRAY['Automated deployment', 'GitOps', 'Continuous deployment', 'DevOps workflows'], + ARRAY['Automated deployment', 'Good integration', 'Open source', 'Cloud native', 'Easy to use'], + ARRAY['Learning curve', 'Complex setup', 'Kubernetes dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Automated Deployment', 'GitOps', 'Continuous Deployment', 'DevOps Workflows', 'Kubernetes']), +('Tekton', 'ci-cd', 'medium', 'excellent', true, true, + ARRAY['CI/CD pipelines', 'Build automation', 'Testing', 'Deployment'], + ARRAY['Kubernetes', 'Git repositories', 'Cloud platforms', 'DevOps tools'], + ARRAY['CI/CD pipelines', 'Build automation', 'Testing', 'DevOps workflows'], + ARRAY['Cloud native', 'Good integration', 'Open source', 'Kubernetes native', 'Flexible'], + ARRAY['Learning curve', 'Complex setup', 'Kubernetes dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['CI/CD Pipelines', 'Build Automation', 'Testing', 'DevOps Workflows', 'Kubernetes']), +('Buildah', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container building', 'Image management', 'Registry management', 'Multi-stage builds'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Container building', 'Image management', 'Registry management', 'DevOps workflows'], + ARRAY['Container building', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Docker compatibility', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Building', 'Image Management', 'Registry Management', 'DevOps Workflows', 'Containerization']), +('Podman', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container management', 'Image building', 'Pod management', 'Multi-stage builds'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Container management', 'Image building', 'Pod management', 'DevOps workflows'], + ARRAY['Daemonless', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Docker compatibility', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Management', 'Image Building', 'Pod Management', 'DevOps Workflows', 'Containerization']), +('Skopeo', 'containerization', 'medium', 'good', true, true, + ARRAY['Image management', 'Registry management', 'Image copying', 'Image inspection'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Image management', 'Registry management', 'Image copying', 'DevOps workflows'], + ARRAY['Image management', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Image Management', 'Registry Management', 'Image Copying', 'DevOps Workflows', 'Containerization']), +('CRI-O', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container runtime', 'Kubernetes integration', 'Image management', 'Pod management'], + ARRAY['Kubernetes', 'Cloud platforms', 'CI/CD tools', 'DevOps tools'], + ARRAY['Container runtime', 'Kubernetes integration', 'Image management', 'DevOps workflows'], + ARRAY['Kubernetes native', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Kubernetes dependency', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Runtime', 'Kubernetes Integration', 'Image Management', 'DevOps Workflows', 'Containerization']), +('containerd', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container runtime', 'Image management', 'Pod management', 'Storage management'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Container runtime', 'Image management', 'Pod management', 'DevOps workflows'], + ARRAY['Container runtime', 'Good integration', 'Open source', 'Cloud native', 'Lightweight'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Runtime', 'Image Management', 'Pod Management', 'DevOps Workflows', 'Containerization']), +('rkt', 'containerization', 'medium', 'good', false, true, + ARRAY['Container runtime', 'Image management', 'Pod management', 'Security'], + ARRAY['Cloud platforms', 'CI/CD tools', 'DevOps tools', 'Security tools'], + ARRAY['Container runtime', 'Image management', 'Pod management', 'DevOps workflows'], + ARRAY['Security focus', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited adoption', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Runtime', 'Image Management', 'Pod Management', 'DevOps Workflows', 'Containerization']), +('Harbor', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container registry', 'Image management', 'Security scanning', 'Vulnerability management'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Container registry', 'Image management', 'Security scanning', 'DevOps workflows'], + ARRAY['Container registry', 'Good integration', 'Open source', 'Cloud native', 'Enterprise features'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Container Registry', 'Image Management', 'Security Scanning', 'DevOps Workflows', 'Containerization']), +('Quay', 'containerization', 'medium', 'excellent', true, true, + ARRAY['Container registry', 'Image management', 'Security scanning', 'Vulnerability management'], + ARRAY['Docker', 'Kubernetes', 'Cloud platforms', 'CI/CD tools'], + ARRAY['Container registry', 'Image management', 'Security scanning', 'DevOps workflows'], + ARRAY['Container registry', 'Good integration', 'Enterprise features', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Resource intensive', 'Configuration complexity'], + 'Commercial', + ARRAY['Container Registry', 'Image Management', 'Security Scanning', 'DevOps Workflows', 'Containerization']), +('ECR', 'containerization', 'easy', 'excellent', true, true, + ARRAY['Container registry', 'Image management', 'Security scanning', 'Vulnerability management'], + ARRAY['AWS', 'Docker', 'Kubernetes', 'CI/CD tools'], + ARRAY['Container registry', 'Image management', 'Security scanning', 'DevOps workflows'], + ARRAY['AWS integration', 'Managed service', 'Good security', 'Reliable', 'Easy to use'], + ARRAY['AWS dependency', 'Cost', 'Limited features', 'Learning curve'], + 'Commercial', + ARRAY['Container Registry', 'Image Management', 'Security Scanning', 'DevOps Workflows', 'AWS Cloud']), +('GCR', 'containerization', 'easy', 'excellent', true, true, + ARRAY['Container registry', 'Image management', 'Security scanning', 'Vulnerability management'], + ARRAY['GCP', 'Docker', 'Kubernetes', 'CI/CD tools'], + ARRAY['Container registry', 'Image management', 'Security scanning', 'DevOps workflows'], + ARRAY['GCP integration', 'Managed service', 'Good security', 'Reliable', 'Easy to use'], + ARRAY['GCP dependency', 'Cost', 'Limited features', 'Learning curve'], + 'Commercial', + ARRAY['Container Registry', 'Image Management', 'Security Scanning', 'DevOps Workflows', 'GCP Cloud']), +('ACR', 'containerization', 'easy', 'excellent', true, true, + ARRAY['Container registry', 'Image management', 'Security scanning', 'Vulnerability management'], + ARRAY['Azure', 'Docker', 'Kubernetes', 'CI/CD tools'], + ARRAY['Container registry', 'Image management', 'Security scanning', 'DevOps workflows'], + ARRAY['Azure integration', 'Managed service', 'Good security', 'Reliable', 'Easy to use'], + ARRAY['Azure dependency', 'Cost', 'Limited features', 'Learning curve'], + 'Commercial', + ARRAY['Container Registry', 'Image Management', 'Security Scanning', 'DevOps Workflows', 'Azure Cloud']), +('Nexus', 'package-management', 'medium', 'excellent', true, true, + ARRAY['Package repository', 'Artifact management', 'Proxy repository', 'Security scanning'], + ARRAY['Maven', 'npm', 'Docker', 'CI/CD tools'], + ARRAY['Package repository', 'Artifact management', 'Proxy repository', 'DevOps workflows'], + ARRAY['Package repository', 'Good integration', 'Open source', 'Enterprise features', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'EPL', + ARRAY['Package Repository', 'Artifact Management', 'Proxy Repository', 'DevOps Workflows', 'Enterprise IT']), +('Artifactory', 'package-management', 'medium', 'excellent', true, true, + ARRAY['Package repository', 'Artifact management', 'Proxy repository', 'Security scanning'], + ARRAY['Maven', 'npm', 'Docker', 'CI/CD tools'], + ARRAY['Package repository', 'Artifact management', 'Proxy repository', 'DevOps workflows'], + ARRAY['Package repository', 'Good integration', 'Enterprise features', 'Reliable', 'Comprehensive'], + ARRAY['Cost', 'Learning curve', 'Resource intensive', 'Configuration complexity'], + 'Commercial', + ARRAY['Package Repository', 'Artifact Management', 'Proxy Repository', 'DevOps Workflows', 'Enterprise IT']), + +('JFrog Distribution', 'package-management', 'medium', 'excellent', true, true, + ARRAY['Package distribution', 'Artifact management', 'Release management', 'Security scanning'], + ARRAY['Artifactory', 'CI/CD tools', 'Cloud platforms', 'DevOps tools'], + ARRAY['Package distribution', 'Artifact management', 'Release management', 'DevOps workflows'], + ARRAY['Package distribution', 'Good integration', 'Enterprise features', 'Reliable', 'Comprehensive'], + ARRAY['Cost', 'Learning curve', 'Resource intensive', 'Configuration complexity'], + 'Commercial', + ARRAY['Package Distribution', 'Artifact Management', 'Release Management', 'DevOps Workflows', 'Enterprise IT']), +('Maven', 'package-management', 'medium', 'good', false, true, + ARRAY['Build automation', 'Dependency management', 'Package management', 'Project management'], + ARRAY['Java', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Build automation', 'Dependency management', 'Package management', 'DevOps workflows'], + ARRAY['Java ecosystem', 'Good documentation', 'Large community', 'Reliable', 'Comprehensive'], + ARRAY['Learning curve', 'Complex configuration', 'Performance issues', 'XML complexity'], + 'Apache 2.0', + ARRAY['Build Automation', 'Dependency Management', 'Package Management', 'DevOps Workflows', 'Java Development']), +('Gradle', 'package-management', 'medium', 'excellent', false, true, + ARRAY['Build automation', 'Dependency management', 'Package management', 'Project management'], + ARRAY['Java', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Build automation', 'Dependency management', 'Package management', 'DevOps workflows'], + ARRAY['Java ecosystem', 'Good performance', 'Flexible configuration', 'Large community', 'Modern'], + ARRAY['Learning curve', 'Complex configuration', 'Performance issues', 'Groovy dependency'], + 'Apache 2.0', + ARRAY['Build Automation', 'Dependency Management', 'Package Management', 'DevOps Workflows', 'Java Development']), +('npm', 'package-management', 'easy', 'excellent', false, true, + ARRAY['Package management', 'Dependency management', 'Scripting', 'Version management'], + ARRAY['JavaScript', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Package management', 'Dependency management', 'Scripting', 'DevOps workflows'], + ARRAY['JavaScript ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Popular'], + ARRAY['Security issues', 'Dependency hell', 'Performance issues', 'Version conflicts'], + 'Artistic', + ARRAY['Package Management', 'Dependency Management', 'Scripting', 'DevOps Workflows', 'JavaScript Development']), +('Yarn', 'package-management', 'easy', 'excellent', false, true, + ARRAY['Package management', 'Dependency management', 'Scripting', 'Version management'], + ARRAY['JavaScript', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Package management', 'Dependency management', 'Scripting', 'DevOps workflows'], + ARRAY['JavaScript ecosystem', 'Fast performance', 'Reliable', 'Good documentation', 'Popular'], + ARRAY['Learning curve', 'Compatibility issues', 'Performance issues', 'Version conflicts'], + 'BSD', + ARRAY['Package Management', 'Dependency Management', 'Scripting', 'DevOps Workflows', 'JavaScript Development']), +('pip', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Dependency management', 'Virtual environments', 'Version management'], + ARRAY['Python', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Package management', 'Dependency management', 'Virtual environments', 'DevOps workflows'], + ARRAY['Python ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Popular'], + ARRAY['Security issues', 'Dependency hell', 'Performance issues', 'Version conflicts'], + 'MIT', + ARRAY['Package Management', 'Dependency Management', 'Virtual Environments', 'DevOps Workflows', 'Python Development']), +('conda', 'package-management', 'medium', 'excellent', false, true, + ARRAY['Package management', 'Dependency management', 'Environment management', 'Version management'], + ARRAY['Python', 'Data Science', 'CI/CD tools', 'DevOps tools'], + ARRAY['Package management', 'Dependency management', 'Environment management', 'DevOps workflows'], + ARRAY['Data Science', 'Environment management', 'Large repository', 'Good documentation', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Performance issues'], + 'BSD', + ARRAY['Package Management', 'Dependency Management', 'Environment Management', 'DevOps Workflows', 'Data Science']), +('NuGet', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Dependency management', 'Version management', 'Publishing'], + ARRAY['.NET', 'CI/CD tools', 'IDEs', 'DevOps tools'], + ARRAY['Package management', 'Dependency management', 'Version management', 'DevOps workflows'], + ARRAY['.NET ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Popular'], + ARRAY['Security issues', 'Dependency hell', 'Performance issues', 'Version conflicts'], + 'Apache 2.0', + ARRAY['Package Management', 'Dependency Management', 'Version Management', 'DevOps Workflows', '.NET Development']), +('Chocolatey', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Software deployment', 'Configuration management', 'Automation'], + ARRAY['Windows', 'CI/CD tools', 'DevOps tools', 'System administration'], + ARRAY['Package management', 'Software deployment', 'Configuration management', 'DevOps workflows'], + ARRAY['Windows ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Popular'], + ARRAY['Windows only', 'Security issues', 'Performance issues', 'Version conflicts'], + 'Apache 2.0', + ARRAY['Package Management', 'Software Deployment', 'Configuration Management', 'DevOps Workflows', 'Windows Administration']), +('Homebrew', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Software deployment', 'Configuration management', 'Automation'], + ARRAY['macOS', 'CI/CD tools', 'DevOps tools', 'System administration'], + ARRAY['Package management', 'Software deployment', 'Configuration management', 'DevOps workflows'], + ARRAY['macOS ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Popular'], + ARRAY['macOS only', 'Security issues', 'Performance issues', 'Version conflicts'], + 'BSD', + ARRAY['Package Management', 'Software Deployment', 'Configuration Management', 'DevOps Workflows', 'macOS Administration']), +('Apt', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Software deployment', 'Configuration management', 'Automation'], + ARRAY['Debian', 'Ubuntu', 'CI/CD tools', 'DevOps tools'], + ARRAY['Package management', 'Software deployment', 'Configuration management', 'DevOps workflows'], + ARRAY['Debian ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Reliable'], + ARRAY['Debian only', 'Security issues', 'Performance issues', 'Version conflicts'], + 'GPL', + ARRAY['Package Management', 'Software Deployment', 'Configuration Management', 'DevOps Workflows', 'Debian Administration']), +('Yum', 'package-management', 'easy', 'good', false, true, + ARRAY['Package management', 'Software deployment', 'Configuration management', 'Automation'], + ARRAY['Red Hat', 'CentOS', 'CI/CD tools', 'DevOps tools'], + ARRAY['Package management', 'Software deployment', 'Configuration management', 'DevOps workflows'], + ARRAY['Red Hat ecosystem', 'Easy to use', 'Large repository', 'Good documentation', 'Reliable'], + ARRAY['Red Hat only', 'Security issues', 'Performance issues', 'Version conflicts'], + 'GPL', + ARRAY['Package Management', 'Software Deployment', 'Configuration Management', 'DevOps Workflows', 'Red Hat Administration']), +('Snyk', 'security', 'easy', 'excellent', true, true, + ARRAY['Security scanning', 'Vulnerability management', 'Dependency analysis', 'Compliance'], + ARRAY['CI/CD tools', 'Code repositories', 'Cloud platforms', 'DevOps tools'], + ARRAY['Security scanning', 'Vulnerability management', 'Dependency analysis', 'DevOps workflows'], + ARRAY['Security scanning', 'Good integration', 'Easy to use', 'Comprehensive', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Limited features on free tier', 'Vendor lock-in'], + 'Commercial', + ARRAY['Security Scanning', 'Vulnerability Management', 'Dependency Analysis', 'DevOps Workflows', 'Application Security']), +('SonarQube', 'security', 'medium', 'excellent', true, true, + ARRAY['Code quality', 'Security scanning', 'Vulnerability management', 'Compliance'], + ARRAY['CI/CD tools', 'Code repositories', 'IDEs', 'DevOps tools'], + ARRAY['Code quality', 'Security scanning', 'Vulnerability management', 'DevOps workflows'], + ARRAY['Code quality', 'Good integration', 'Open source', 'Comprehensive', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'LGPL', + ARRAY['Code Quality', 'Security Scanning', 'Vulnerability Management', 'DevOps Workflows', 'Application Security']), +('Checkmarx', 'security', 'medium', 'excellent', true, true, + ARRAY['Security scanning', 'Vulnerability management', 'Static analysis', 'Compliance'], + ARRAY['CI/CD tools', 'Code repositories', 'IDEs', 'DevOps tools'], + ARRAY['Security scanning', 'Vulnerability management', 'Static analysis', 'DevOps workflows'], + ARRAY['Security scanning', 'Good integration', 'Enterprise features', 'Comprehensive', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Complex setup', 'Vendor lock-in'], + 'Commercial', + ARRAY['Security Scanning', 'Vulnerability Management', 'Static Analysis', 'DevOps Workflows', 'Application Security']), +('Veracode', 'security', 'medium', 'excellent', true, true, + ARRAY['Security scanning', 'Vulnerability management', 'Static analysis', 'Compliance'], + ARRAY['CI/CD tools', 'Code repositories', 'IDEs', 'DevOps tools'], + ARRAY['Security scanning', 'Vulnerability management', 'Static analysis', 'DevOps workflows'], + ARRAY['Security scanning', 'Good integration', 'Enterprise features', 'Comprehensive', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Complex setup', 'Vendor lock-in'], + 'Commercial', + ARRAY['Security Scanning', 'Vulnerability Management', 'Static Analysis', 'DevOps Workflows', 'Application Security']), +('OWASP ZAP', 'security', 'medium', 'good', false, true, + ARRAY['Security scanning', 'Vulnerability management', 'Web application testing', 'Penetration testing'], + ARRAY['Web applications', 'CI/CD tools', 'DevOps tools', 'Security tools'], + ARRAY['Security scanning', 'Vulnerability management', 'Web application testing', 'DevOps workflows'], + ARRAY['Security scanning', 'Good integration', 'Open source', 'Comprehensive', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Security Scanning', 'Vulnerability Management', 'Web Application Testing', 'DevOps Workflows', 'Web Security']), +('Burp Suite', 'security', 'medium', 'good', false, true, + ARRAY['Security scanning', 'Vulnerability management', 'Web application testing', 'Penetration testing'], + ARRAY['Web applications', 'CI/CD tools', 'DevOps tools', 'Security tools'], + ARRAY['Security scanning', 'Vulnerability management', 'Web application testing', 'DevOps workflows'], + ARRAY['Security scanning', 'Good integration', 'Comprehensive', 'Reliable', 'Professional'], + ARRAY['Cost', 'Learning curve', 'Complex setup', 'Resource intensive'], + 'Commercial', + ARRAY['Security Scanning', 'Vulnerability Management', 'Web Application Testing', 'DevOps Workflows', 'Web Security']), + +('Nessus', 'security', 'medium', 'excellent', false, true, + ARRAY['Vulnerability scanning', 'Security assessment', 'Compliance checking', 'Risk management'], + ARRAY['Network devices', 'Servers', 'Applications', 'Cloud platforms'], + ARRAY['Vulnerability scanning', 'Security assessment', 'Compliance checking', 'DevOps workflows'], + ARRAY['Comprehensive scanning', 'Good documentation', 'Large database', 'Reliable', 'Professional'], + ARRAY['Cost', 'Learning curve', 'Resource intensive', 'Limited free version'], + 'Commercial', + ARRAY['Vulnerability Scanning', 'Security Assessment', 'Compliance Checking', 'DevOps Workflows', 'Enterprise Security']), +('OpenVAS', 'security', 'medium', 'good', false, true, + ARRAY['Vulnerability scanning', 'Security assessment', 'Compliance checking', 'Risk management'], + ARRAY['Network devices', 'Servers', 'Applications', 'Cloud platforms'], + ARRAY['Vulnerability scanning', 'Security assessment', 'Compliance checking', 'DevOps workflows'], + ARRAY['Open source', 'Comprehensive scanning', 'Good documentation', 'Reliable', 'Professional'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Limited support'], + 'GPLv2', + ARRAY['Vulnerability Scanning', 'Security Assessment', 'Compliance Checking', 'DevOps Workflows', 'Enterprise Security']), +('Metasploit', 'security', 'hard', 'good', false, true, + ARRAY['Penetration testing', 'Vulnerability assessment', 'Exploit development', 'Security research'], + ARRAY['Network devices', 'Servers', 'Applications', 'Security tools'], + ARRAY['Penetration testing', 'Vulnerability assessment', 'Security research', 'DevOps workflows'], + ARRAY['Penetration testing', 'Good documentation', 'Large database', 'Reliable', 'Professional'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Security risks'], + 'BSD', + ARRAY['Penetration Testing', 'Vulnerability Assessment', 'Security Research', 'DevOps Workflows', 'Enterprise Security']), +('Wireshark', 'monitoring', 'medium', 'good', false, true, + ARRAY['Network analysis', 'Protocol analysis', 'Packet capture', 'Troubleshooting'], + ARRAY['Network devices', 'Servers', 'Applications', 'Security tools'], + ARRAY['Network analysis', 'Protocol analysis', 'Troubleshooting', 'DevOps workflows'], + ARRAY['Network analysis', 'Good documentation', 'Large community', 'Reliable', 'Professional'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Limited automation'], + 'GPLv2', + ARRAY['Network Analysis', 'Protocol Analysis', 'Troubleshooting', 'DevOps Workflows', 'Network Security']), +('tcpdump', 'monitoring', 'hard', 'good', false, true, + ARRAY['Network analysis', 'Protocol analysis', 'Packet capture', 'Troubleshooting'], + ARRAY['Network devices', 'Servers', 'Applications', 'Security tools'], + ARRAY['Network analysis', 'Protocol analysis', 'Troubleshooting', 'DevOps workflows'], + ARRAY['Network analysis', 'Lightweight', 'Reliable', 'Professional', 'Command-line'], + ARRAY['Learning curve', 'Complex usage', 'Limited features', 'No GUI'], + 'BSD', + ARRAY['Network Analysis', 'Protocol Analysis', 'Troubleshooting', 'DevOps Workflows', 'Network Security']), +('Netdata', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['System monitoring', 'Performance monitoring', 'Real-time monitoring', 'Alerting'], + ARRAY['Servers', 'Applications', 'Databases', 'Cloud platforms'], + ARRAY['System monitoring', 'Performance monitoring', 'Real-time monitoring', 'DevOps workflows'], + ARRAY['Real-time monitoring', 'Easy to use', 'Lightweight', 'Good documentation', 'Open source'], + ARRAY['Learning curve', 'Resource intensive', 'Limited historical data', 'Configuration complexity'], + 'GPLv3', + ARRAY['System Monitoring', 'Performance Monitoring', 'Real-time Monitoring', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Telegraf', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Metrics collection', 'Data aggregation', 'Plugin system', 'Data processing'], + ARRAY['Servers', 'Applications', 'Databases', 'Cloud platforms'], + ARRAY['Metrics collection', 'Data aggregation', 'Data processing', 'DevOps workflows'], + ARRAY['Metrics collection', 'Good integration', 'Open source', 'Lightweight', 'Flexible'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'MIT', + ARRAY['Metrics Collection', 'Data Aggregation', 'Data Processing', 'DevOps Workflows', 'Infrastructure Monitoring']), +('InfluxDB', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Time series database', 'Metrics storage', 'Data analysis', 'Query processing'], + ARRAY['Monitoring tools', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Time series database', 'Metrics storage', 'Data analysis', 'DevOps workflows'], + ARRAY['Time series database', 'Good performance', 'Open source', 'Reliable', 'Scalable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'MIT', + ARRAY['Time Series Database', 'Metrics Storage', 'Data Analysis', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Grafana Loki', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Log aggregation', 'Log management', 'Log analysis', 'Log querying'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Log aggregation', 'Log management', 'Log analysis', 'DevOps workflows'], + ARRAY['Log aggregation', 'Good integration', 'Open source', 'Lightweight', 'Scalable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Log Aggregation', 'Log Management', 'Log Analysis', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Fluentd', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Log collection', 'Log aggregation', 'Log processing', 'Log forwarding'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Log collection', 'Log aggregation', 'Log processing', 'DevOps workflows'], + ARRAY['Log collection', 'Good integration', 'Open source', 'Lightweight', 'Flexible'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Log Collection', 'Log Aggregation', 'Log Processing', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Logstash', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Log collection', 'Log aggregation', 'Log processing', 'Log forwarding'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Log collection', 'Log aggregation', 'Log processing', 'DevOps workflows'], + ARRAY['Log collection', 'Good integration', 'Open source', 'Flexible', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Log Collection', 'Log Aggregation', 'Log Processing', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Filebeat', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Log collection', 'Log forwarding', 'Log shipping', 'Log monitoring'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Log collection', 'Log forwarding', 'Log shipping', 'DevOps workflows'], + ARRAY['Log collection', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Log Collection', 'Log Forwarding', 'Log Shipping', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Metricbeat', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Metrics collection', 'Metrics forwarding', 'Metrics shipping', 'Metrics monitoring'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Metrics collection', 'Metrics forwarding', 'Metrics shipping', 'DevOps workflows'], + ARRAY['Metrics collection', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Metrics Collection', 'Metrics Forwarding', 'Metrics Shipping', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Heartbeat', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Uptime monitoring', 'Health checking', 'Availability monitoring', 'Alerting'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Uptime monitoring', 'Health checking', 'Availability monitoring', 'DevOps workflows'], + ARRAY['Uptime monitoring', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Uptime Monitoring', 'Health Checking', 'Availability Monitoring', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Auditbeat', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Audit logging', 'Security monitoring', 'Compliance monitoring', 'Event tracking'], + ARRAY['Applications', 'Servers', 'Cloud platforms', 'DevOps tools'], + ARRAY['Audit logging', 'Security monitoring', 'Compliance monitoring', 'DevOps workflows'], + ARRAY['Audit logging', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Audit Logging', 'Security Monitoring', 'Compliance Monitoring', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Packetbeat', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Network monitoring', 'Packet capture', 'Protocol analysis', 'Network performance'], + ARRAY['Network devices', 'Servers', 'Applications', 'Cloud platforms'], + ARRAY['Network monitoring', 'Packet capture', 'Protocol analysis', 'DevOps workflows'], + ARRAY['Network monitoring', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Network Monitoring', 'Packet Capture', 'Protocol Analysis', 'DevOps Workflows', 'Infrastructure Monitoring']), +('Winlogbeat', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Windows log collection', 'Event log monitoring', 'Security monitoring', 'Compliance monitoring'], + ARRAY['Windows servers', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Windows log collection', 'Event log monitoring', 'Security monitoring', 'DevOps workflows'], + ARRAY['Windows logs', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Windows only', 'Learning curve', 'Limited features', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Windows Log Collection', 'Event Log Monitoring', 'Security Monitoring', 'DevOps Workflows', 'Windows Infrastructure']), + +('Jaeger', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'Request tracing'], + ARRAY['Microservices', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'DevOps workflows'], + ARRAY['Distributed tracing', 'Good integration', 'Open source', 'Cloud native', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Distributed Tracing', 'Performance Monitoring', 'Service Monitoring', 'DevOps Workflows', 'Microservices']), +('Zipkin', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'Request tracing'], + ARRAY['Microservices', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'DevOps workflows'], + ARRAY['Distributed tracing', 'Good integration', 'Open source', 'Lightweight', 'Reliable'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Distributed Tracing', 'Performance Monitoring', 'Service Monitoring', 'DevOps Workflows', 'Microservices']), +('OpenTelemetry', 'monitoring', 'medium', 'excellent', true, true, + ARRAY['Distributed tracing', 'Metrics collection', 'Log collection', 'Observability'], + ARRAY['Microservices', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Distributed tracing', 'Metrics collection', 'Log collection', 'DevOps workflows'], + ARRAY['Observability', 'Good integration', 'Open source', 'Cloud native', 'Standardized'], + ARRAY['Learning curve', 'Complex setup', 'Resource intensive', 'Configuration complexity'], + 'Apache 2.0', + ARRAY['Distributed Tracing', 'Metrics Collection', 'Log Collection', 'DevOps Workflows', 'Microservices']), +('Honeycomb', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'Observability'], + ARRAY['Microservices', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'DevOps workflows'], + ARRAY['Observability', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Distributed Tracing', 'Performance Monitoring', 'Service Monitoring', 'DevOps Workflows', 'Microservices']), +('Lightstep', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'Observability'], + ARRAY['Microservices', 'Cloud platforms', 'Applications', 'DevOps tools'], + ARRAY['Distributed tracing', 'Performance monitoring', 'Service monitoring', 'DevOps workflows'], + ARRAY['Observability', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Distributed Tracing', 'Performance Monitoring', 'Service Monitoring', 'DevOps Workflows', 'Microservices']), +('AWS CloudWatch', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'Metrics collection'], + ARRAY['AWS services', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'DevOps workflows'], + ARRAY['AWS integration', 'Managed service', 'Good documentation', 'Reliable', 'Comprehensive'], + ARRAY['AWS dependency', 'Cost', 'Learning curve', 'Limited flexibility'], + 'Commercial', + ARRAY['Cloud Monitoring', 'Application Monitoring', 'Log Management', 'DevOps Workflows', 'AWS Cloud']), +('Azure Monitor', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'Metrics collection'], + ARRAY['Azure services', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'DevOps workflows'], + ARRAY['Azure integration', 'Managed service', 'Good documentation', 'Reliable', 'Comprehensive'], + ARRAY['Azure dependency', 'Cost', 'Learning curve', 'Limited flexibility'], + 'Commercial', + ARRAY['Cloud Monitoring', 'Application Monitoring', 'Log Management', 'DevOps Workflows', 'Azure Cloud']), +('Google Cloud Monitoring', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'Metrics collection'], + ARRAY['GCP services', 'Applications', 'Cloud platforms', 'DevOps tools'], + ARRAY['Cloud monitoring', 'Application monitoring', 'Log management', 'DevOps workflows'], + ARRAY['GCP integration', 'Managed service', 'Good documentation', 'Reliable', 'Comprehensive'], + ARRAY['GCP dependency', 'Cost', 'Learning curve', 'Limited flexibility'], + 'Commercial', + ARRAY['Cloud Monitoring', 'Application Monitoring', 'Log Management', 'DevOps Workflows', 'GCP Cloud']), +('Datadog APM', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'Error tracking'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'DevOps workflows'], + ARRAY['APM features', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Application Monitoring', 'Performance Monitoring', 'Distributed Tracing', 'DevOps Workflows', 'Application Performance']), +('New Relic APM', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'Error tracking'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'DevOps workflows'], + ARRAY['APM features', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Application Monitoring', 'Performance Monitoring', 'Distributed Tracing', 'DevOps Workflows', 'Application Performance']), +('Dynatrace', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'AI-powered insights'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'DevOps workflows'], + ARRAY['AI-powered', 'Good integration', 'Easy to use', 'Cloud native', 'Comprehensive'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Application Monitoring', 'Performance Monitoring', 'Distributed Tracing', 'DevOps Workflows', 'Application Performance']), +('AppDynamics', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'Business metrics'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Application monitoring', 'Performance monitoring', 'Distributed tracing', 'DevOps workflows'], + ARRAY['Business metrics', 'Good integration', 'Easy to use', 'Cloud native', 'Comprehensive'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Application Monitoring', 'Performance Monitoring', 'Distributed Tracing', 'DevOps Workflows', 'Application Performance']), +('Raygun', 'monitoring', 'easy', 'good', true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'Real user monitoring'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'DevOps workflows'], + ARRAY['Error tracking', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Error Monitoring', 'Performance Monitoring', 'User Monitoring', 'DevOps Workflows', 'Application Performance']), +('Sentry', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'Real user monitoring'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'DevOps workflows'], + ARRAY['Error tracking', 'Good integration', 'Open source', 'Cloud native', 'Reliable'], + ARRAY['Learning curve', 'Resource intensive', 'Limited features on free tier', 'Configuration complexity'], + 'BSD', + ARRAY['Error Monitoring', 'Performance Monitoring', 'User Monitoring', 'DevOps Workflows', 'Application Performance']), +('Rollbar', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'Real user monitoring'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'DevOps workflows'], + ARRAY['Error tracking', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Error Monitoring', 'Performance Monitoring', 'User Monitoring', 'DevOps Workflows', 'Application Performance']), +('Bugsnag', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'Real user monitoring'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'DevOps workflows'], + ARRAY['Error tracking', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Error Monitoring', 'Performance Monitoring', 'User Monitoring', 'DevOps Workflows', 'Application Performance']), +('Airbrake', 'monitoring', 'easy', 'excellent', true, true, + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'Real user monitoring'], + ARRAY['Applications', 'Cloud platforms', 'DevOps tools', 'Monitoring tools'], + ARRAY['Error monitoring', 'Performance monitoring', 'User monitoring', 'DevOps workflows'], + ARRAY['Error tracking', 'Good integration', 'Easy to use', 'Cloud native', 'Reliable'], + ARRAY['Cost', 'Learning curve', 'Vendor lock-in', 'Limited features on free tier'], + 'Commercial', + ARRAY['Error Monitoring', 'Performance Monitoring', 'User Monitoring', 'DevOps Workflows', 'Application Performance']); + + + INSERT INTO ai_ml_technologies ( + name, ml_type, language_support, gpu_acceleration, cloud_integration, pretrained_models, + ease_of_deployment, model_accuracy_potential, primary_use_cases, strengths, weaknesses, + license_type, domain +) VALUES +('TensorFlow', 'deep-learning', ARRAY['python', 'javascript', 'c++', 'java'], true, true, true, 75, 95, + ARRAY['Deep learning models', 'Computer vision', 'Natural language processing', 'Recommendation systems'], + ARRAY['Industry standard', 'Google backing', 'Production ready', 'Large ecosystem', 'TensorBoard visualization'], + ARRAY['Steep learning curve', 'Complex API', 'Large memory usage', 'Verbose code'], + 'Apache 2.0', + ARRAY['Computer Vision', 'Natural Language Processing', 'Recommendation Systems', 'Enterprise AI', 'Data Analytics']), +('PyTorch', 'deep-learning', ARRAY['python', 'c++'], true, true, true, 80, 95, + ARRAY['Research projects', 'Computer vision', 'NLP models', 'Prototype development'], + ARRAY['Pythonic API', 'Dynamic graphs', 'Research friendly', 'Strong community', 'Easy debugging'], + ARRAY['Less production ready', 'Smaller ecosystem', 'Memory intensive', 'Facebook dependency'], + 'BSD', + ARRAY['Research AI', 'Computer Vision', 'Natural Language Processing', 'Prototyping', 'Academic Projects']), +('Scikit-learn', 'machine-learning', ARRAY['python'], false, false, true, 90, 85, + ARRAY['Classification', 'Regression', 'Clustering', 'Data preprocessing', 'Model evaluation'], + ARRAY['Easy to use', 'Well documented', 'Consistent API', 'Wide algorithm coverage', 'Great for beginners'], + ARRAY['No deep learning', 'No GPU support', 'Limited scalability', 'Python only'], + 'BSD', + ARRAY['Data Analytics', 'Business Intelligence', 'Predictive Modeling', 'Education', 'Small-scale ML']), +('Hugging Face', 'nlp', ARRAY['python', 'javascript'], true, true, true, 85, 92, + ARRAY['Text generation', 'Sentiment analysis', 'Language translation', 'Question answering'], + ARRAY['Pre-trained models', 'Easy to use', 'Large model hub', 'Community driven', 'Transformer focus'], + ARRAY['NLP focused', 'Model size limitations', 'Internet dependency', 'Limited customization'], + 'Apache 2.0', + ARRAY['Natural Language Processing', 'Chatbots', 'Content Generation', 'Customer Support', 'Language Translation']), +('OpenAI API', 'nlp', ARRAY['python', 'javascript', 'curl'], false, true, true, 95, 98, + ARRAY['Text generation', 'Code completion', 'Chatbots', 'Content creation', 'Language understanding'], + ARRAY['State-of-the-art models', 'Easy integration', 'No training required', 'Excellent documentation', 'Regular updates'], + ARRAY['API costs', 'Data privacy concerns', 'Rate limits', 'External dependency', 'Limited customization'], + 'Proprietary', + ARRAY['Chatbots', 'Content Creation', 'Customer Support', 'Code Automation', 'SaaS Applications']), +('Keras', 'deep-learning', ARRAY['python', 'r'], true, true, true, 85, 90, + ARRAY['Neural networks', 'Deep learning', 'Computer vision', 'Natural language processing'], + ARRAY['User-friendly API', 'Modular design', 'Easy prototyping', 'Good documentation', 'TensorFlow backend'], + ARRAY['Limited flexibility', 'Abstraction overhead', 'TensorFlow dependency', 'Less control'], + 'MIT', + ARRAY['Deep Learning', 'Computer Vision', 'Natural Language Processing', 'Education', 'Rapid Prototyping']), +('MXNet', 'deep-learning', ARRAY['python', 'c++', 'java', 'scala', 'julia'], true, true, true, 70, 92, + ARRAY['Deep learning', 'Computer vision', 'Natural language processing', 'Recommendation systems'], + ARRAY['Lightweight', 'Scalable', 'Multi-language', 'Good performance', 'Amazon backing'], + ARRAY['Smaller community', 'Limited documentation', 'Less popular', 'Complex setup'], + 'Apache 2.0', + ARRAY['Deep Learning', 'Computer Vision', 'Natural Language Processing', 'Enterprise AI', 'Cloud Computing']), +('Caffe', 'deep-learning', ARRAY['python', 'c++', 'matlab'], true, false, true, 60, 88, + ARRAY['Computer vision', 'Image processing', 'Deep learning', 'Convolutional networks'], + ARRAY['Fast performance', 'Good for vision', 'C++ core', 'Research oriented', 'Model zoo'], + ARRAY['Limited flexibility', 'Python wrapper', 'Steep learning curve', 'Less maintained'], + 'BSD', + ARRAY['Computer Vision', 'Image Processing', 'Deep Learning', 'Research Projects', 'Academic Applications']), +('Theano', 'deep-learning', ARRAY['python'], true, false, true, 65, 85, + ARRAY['Deep learning', 'Neural networks', 'Mathematical optimization', 'Research'], + ARRAY['Mathematical foundation', 'Good performance', 'Research oriented', 'Flexible', 'Optimization focused'], + ARRAY['Deprecated', 'Limited support', 'Complex API', 'Steep learning curve'], + 'BSD', + ARRAY['Deep Learning', 'Neural Networks', 'Mathematical Optimization', 'Research Projects', 'Academic Applications']), +('CNTK', 'deep-learning', ARRAY['python', 'c++', 'c#'], true, true, true, 65, 90, + ARRAY['Deep learning', 'Computer vision', 'Speech recognition', 'Natural language processing'], + ARRAY['Microsoft backing', 'Good performance', 'Production ready', 'Multi-language', 'Enterprise features'], + ARRAY['Complex API', 'Limited community', 'Steep learning curve', 'Less popular'], + 'MIT', + ARRAY['Deep Learning', 'Computer Vision', 'Speech Recognition', 'Enterprise AI', 'Microsoft Ecosystem']), +('XGBoost', 'machine-learning', ARRAY['python', 'r', 'java', 'c++', 'julia'], true, true, true, 80, 92, + ARRAY['Gradient boosting', 'Classification', 'Regression', 'Kaggle competitions', 'Data mining'], + ARRAY['High performance', 'Regularization', 'Missing value handling', 'Cross-platform', 'Popular in competitions'], + ARRAY['Complex parameters', 'Memory intensive', 'Overfitting risk', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['Gradient Boosting', 'Classification', 'Regression', 'Data Mining', 'Competitive Analytics']), +('LightGBM', 'machine-learning', ARRAY['python', 'r', 'c++'], true, true, true, 75, 90, + ARRAY['Gradient boosting', 'Classification', 'Regression', 'Large datasets', 'Data mining'], + ARRAY['Fast training', 'Memory efficient', 'Good accuracy', 'Microsoft backing', 'Scalable'], + ARRAY['Complex parameters', 'Overfitting risk', 'Limited documentation', 'Steep learning curve'], + 'MIT', + ARRAY['Gradient Boosting', 'Classification', 'Regression', 'Large Datasets', 'Data Mining']), +('CatBoost', 'machine-learning', ARRAY['python', 'r', 'c++', 'java'], true, true, true, 75, 88, + ARRAY['Gradient boosting', 'Classification', 'Regression', 'Categorical features', 'Data mining'], + ARRAY['Categorical handling', 'Good accuracy', 'Yandex backing', 'Robust', 'Easy to use'], + ARRAY['Complex parameters', 'Memory intensive', 'Overfitting risk', 'Limited community'], + 'Apache 2.0', + ARRAY['Gradient Boosting', 'Classification', 'Regression', 'Categorical Features', 'Data Mining']), +('Random Forest', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 85, 82, + ARRAY['Classification', 'Regression', 'Feature selection', 'Ensemble learning', 'Data mining'], + ARRAY['Easy to use', 'Robust', 'Feature importance', 'No overfitting', 'Good accuracy'], + ARRAY['Black box', 'Memory intensive', 'Slow prediction', 'Limited interpretability'], + 'BSD', + ARRAY['Classification', 'Regression', 'Feature Selection', 'Ensemble Learning', 'Data Mining']), +('Decision Trees', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 90, 75, + ARRAY['Classification', 'Regression', 'Feature selection', 'Decision making', 'Data mining'], + ARRAY['Easy to interpret', 'Fast training', 'No preprocessing', 'Visualizable', 'Simple'], + ARRAY['Overfitting', 'Unstable', 'Limited complexity', 'Poor accuracy'], + 'BSD', + ARRAY['Classification', 'Regression', 'Feature Selection', 'Decision Making', 'Data Mining']), +('SVM', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 70, 85, + ARRAY['Classification', 'Regression', 'Outlier detection', 'Pattern recognition', 'Data mining'], + ARRAY['Effective in high dimensions', 'Memory efficient', 'Versatile', 'Good accuracy', 'Well studied'], + ARRAY['Complex parameters', 'Slow training', 'Black box', 'Sensitive to parameters'], + 'BSD', + ARRAY['Classification', 'Regression', 'Outlier Detection', 'Pattern Recognition', 'Data Mining']), +('K-Means', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 85, 70, + ARRAY['Clustering', 'Segmentation', 'Data mining', 'Pattern recognition', 'Unsupervised learning'], + ARRAY['Simple', 'Fast', 'Scalable', 'Easy to understand', 'Widely used'], + ARRAY['Sensitive to initialization', 'Fixed clusters', 'Outlier sensitive', 'Limited complexity'], + 'BSD', + ARRAY['Clustering', 'Segmentation', 'Data Mining', 'Pattern Recognition', 'Unsupervised Learning']), +('DBSCAN', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 75, 75, + ARRAY['Clustering', 'Outlier detection', 'Density-based clustering', 'Data mining', 'Pattern recognition'], + ARRAY['No cluster number', 'Outlier detection', 'Density based', 'Arbitrary shapes', 'Robust'], + ARRAY['Parameter sensitive', 'Slow performance', 'Complex implementation', 'Memory intensive'], + 'BSD', + ARRAY['Clustering', 'Outlier Detection', 'Density-based Clustering', 'Data Mining', 'Pattern Recognition']), +('Hierarchical Clustering', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 80, 72, + ARRAY['Clustering', 'Hierarchical analysis', 'Data mining', 'Pattern recognition', 'Unsupervised learning'], + ARRAY['Hierarchical structure', 'No cluster number', 'Visualizable', 'Flexible', 'Interpretable'], + ARRAY['Slow performance', 'Memory intensive', 'Complex implementation', 'Sensitive to noise'], + 'BSD', + ARRAY['Clustering', 'Hierarchical Analysis', 'Data Mining', 'Pattern Recognition', 'Unsupervised Learning']), + +('PCA', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 85, 70, + ARRAY['Dimensionality reduction', 'Feature extraction', 'Data visualization', 'Data preprocessing'], + ARRAY['Simple', 'Fast', 'Widely used', 'Good documentation', 'Interpretable'], + ARRAY['Linear only', 'Information loss', 'Parameter sensitive', 'Limited complexity'], + 'BSD', + ARRAY['Dimensionality Reduction', 'Feature Extraction', 'Data Visualization', 'Data Preprocessing', 'Data Analytics']), +('t-SNE', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 75, 80, + ARRAY['Dimensionality reduction', 'Data visualization', 'Feature extraction', 'Data exploration'], + ARRAY['Non-linear', 'Good visualization', 'Preserves structure', 'Widely used', 'Effective'], + ARRAY['Slow performance', 'Parameter sensitive', 'Stochastic', 'Memory intensive'], + 'BSD', + ARRAY['Dimensionality Reduction', 'Data Visualization', 'Feature Extraction', 'Data Exploration', 'Data Analytics']), +('UMAP', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 80, 85, + ARRAY['Dimensionality reduction', 'Data visualization', 'Feature extraction', 'Data exploration'], + ARRAY['Fast performance', 'Good visualization', 'Preserves structure', 'Scalable', 'Modern'], + ARRAY['Parameter sensitive', 'Complex implementation', 'Limited documentation', 'New technology'], + 'BSD', + ARRAY['Dimensionality Reduction', 'Data Visualization', 'Feature Extraction', 'Data Exploration', 'Data Analytics']), +('LDA', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 80, 75, + ARRAY['Topic modeling', 'Text analysis', 'Document classification', 'Feature extraction'], + ARRAY['Topic modeling', 'Interpretable', 'Unsupervised', 'Text focused', 'Widely used'], + ARRAY['Linear assumption', 'Parameter sensitive', 'Limited complexity', 'Text specific'], + 'BSD', + ARRAY['Topic Modeling', 'Text Analysis', 'Document Classification', 'Feature Extraction', 'Text Analytics']), +('NMF', 'machine-learning', ARRAY['python', 'r', 'java', 'c++'], false, false, true, 75, 72, + ARRAY['Topic modeling', 'Dimensionality reduction', 'Feature extraction', 'Data mining'], + ARRAY['Non-negative', 'Interpretable', 'Unsupervised', 'Flexible', 'Widely used'], + ARRAY['Parameter sensitive', 'Limited complexity', 'Slow performance', 'Memory intensive'], + 'BSD', + ARRAY['Topic Modeling', 'Dimensionality Reduction', 'Feature Extraction', 'Data Mining', 'Text Analytics']), +('Gensim', 'nlp', ARRAY['python'], false, false, true, 80, 80, + ARRAY['Topic modeling', 'Word embeddings', 'Document similarity', 'Text analysis'], + ARRAY['Topic modeling', 'Word embeddings', 'Document similarity', 'Text focused', 'Easy to use'], + ARRAY['Limited deep learning', 'Python only', 'Small community', 'Limited documentation'], + 'LGPL', + ARRAY['Topic Modeling', 'Word Embeddings', 'Document Similarity', 'Text Analysis', 'Text Analytics']), +('spaCy', 'nlp', ARRAY['python'], false, false, true, 85, 85, + ARRAY['Text processing', 'Named entity recognition', 'Dependency parsing', 'Text classification'], + ARRAY['Industrial strength', 'Fast performance', 'Pre-trained models', 'Good documentation', 'Production ready'], + ARRAY['Limited deep learning', 'Python only', 'Memory intensive', 'Complex setup'], + 'MIT', + ARRAY['Text Processing', 'Named Entity Recognition', 'Dependency Parsing', 'Text Classification', 'NLP Applications']), +('NLTK', 'nlp', ARRAY['python'], false, false, true, 90, 75, + ARRAY['Text processing', 'Tokenization', 'Stemming', 'Lemmatization', 'Text analysis'], + ARRAY['Comprehensive', 'Educational', 'Well documented', 'Large corpus', 'Easy to learn'], + ARRAY['Slow performance', 'Academic focus', 'Limited production use', 'Memory intensive'], + 'Apache 2.0', + ARRAY['Text Processing', 'Tokenization', 'Stemming', 'Lemmatization', 'Text Analytics', 'Education']), +('Stanford NLP', 'nlp', ARRAY['java'], false, false, true, 70, 88, + ARRAY['Text processing', 'Named entity recognition', 'Dependency parsing', 'Coreference resolution'], + ARRAY['High accuracy', 'Comprehensive', 'Research oriented', 'Well documented', 'Academic backing'], + ARRAY['Java only', 'Slow performance', 'Complex setup', 'Limited community'], + 'GPL', + ARRAY['Text Processing', 'Named Entity Recognition', 'Dependency Parsing', 'Coreference Resolution', 'Research NLP']), +('OpenNLP', 'nlp', ARRAY['java'], false, false, true, 75, 80, + ARRAY['Text processing', 'Named entity recognition', 'Tokenization', 'Sentence detection'], + ARRAY['Open source', 'Java based', 'Machine learning', 'Production ready', 'Well documented'], + ARRAY['Java only', 'Limited features', 'Slow performance', 'Small community'], + 'Apache 2.0', + ARRAY['Text Processing', 'Named Entity Recognition', 'Tokenization', 'Sentence Detection', 'Java NLP']), +('Apache OpenNLP', 'nlp', ARRAY['java'], false, false, true, 75, 80, + ARRAY['Text processing', 'Named entity recognition', 'Tokenization', 'Sentence detection'], + ARRAY['Apache backing', 'Open source', 'Machine learning', 'Production ready', 'Well documented'], + ARRAY['Java only', 'Limited features', 'Slow performance', 'Small community'], + 'Apache 2.0', + ARRAY['Text Processing', 'Named Entity Recognition', 'Tokenization', 'Sentence Detection', 'Apache NLP']), +('CoreNLP', 'nlp', ARRAY['java'], false, false, true, 70, 88, + ARRAY['Text processing', 'Named entity recognition', 'Dependency parsing', 'Coreference resolution'], + ARRAY['Stanford backing', 'High accuracy', 'Comprehensive', 'Research oriented', 'Well documented'], + ARRAY['Java only', 'Slow performance', 'Complex setup', 'Limited community'], + 'GPL', + ARRAY['Text Processing', 'Named Entity Recognition', 'Dependency Parsing', 'Coreference Resolution', 'Stanford NLP']), +('BERT', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 95, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['State-of-the-art', 'Pre-trained', 'Transfer learning', 'Google backing', 'Versatile'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Advanced NLP']), +('GPT', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 96, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['State-of-the-art', 'Large scale', 'OpenAI backing', 'Versatile', 'Creative'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference', 'Costly'], + 'MIT', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Advanced NLP']), +('T5', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 94, + ARRAY['Text generation', 'Translation', 'Summarization', 'Question answering'], + ARRAY['Text-to-text', 'Versatile', 'Google backing', 'Pre-trained', 'State-of-the-art'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Translation', 'Summarization', 'Question Answering', 'Advanced NLP']), +('RoBERTa', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 94, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['Optimized BERT', 'High accuracy', 'Facebook backing', 'Pre-trained', 'Robust'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Advanced NLP']), +('DistilBERT', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 85, 90, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['Lightweight BERT', 'Fast inference', 'Good accuracy', 'Hugging Face', 'Production ready'], + ARRAY['Less accurate', 'Limited features', 'Resource intensive', 'Complex setup'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Lightweight NLP']), +('ALBERT', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 92, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['Parameter efficient', 'Good accuracy', 'Google backing', 'Pre-trained', 'Lightweight'], + ARRAY['Complex training', 'Resource intensive', 'Limited features', 'Complex setup'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Efficient NLP']), + +('ELECTRA', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 92, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['Efficient training', 'High accuracy', 'Google backing', 'Pre-trained', 'Innovative'], + ARRAY['Complex training', 'Resource intensive', 'Limited features', 'Complex setup'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Efficient NLP']), +('XLNet', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 93, + ARRAY['Text classification', 'Question answering', 'Named entity recognition', 'Text generation'], + ARRAY['Autoregressive', 'High accuracy', 'CMU backing', 'Pre-trained', 'State-of-the-art'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Classification', 'Question Answering', 'Named Entity Recognition', 'Text Generation', 'Advanced NLP']), +('GPT-2', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 90, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Pre-trained', 'OpenAI backing', 'Versatile', 'Creative'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'MIT', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('GPT-3', 'nlp', ARRAY['python', 'javascript', 'curl'], false, true, true, 90, 97, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['State-of-the-art', 'Large scale', 'OpenAI backing', 'Versatile', 'Creative'], + ARRAY['API only', 'Expensive', 'Rate limits', 'External dependency'], + 'Proprietary', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Enterprise NLP']), +('GPT-4', 'nlp', ARRAY['python', 'javascript', 'curl'], false, true, true, 95, 98, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['State-of-the-art', 'Large scale', 'OpenAI backing', 'Versatile', 'Creative'], + ARRAY['API only', 'Very expensive', 'Rate limits', 'External dependency'], + 'Proprietary', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Enterprise NLP']), +('Claude', 'nlp', ARRAY['python', 'javascript', 'curl'], false, true, true, 90, 97, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['State-of-the-art', 'Large scale', 'Anthropic backing', 'Safe AI', 'Versatile'], + ARRAY['API only', 'Expensive', 'Rate limits', 'External dependency'], + 'Proprietary', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Safe AI']), +('Llama', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 94, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Large scale', 'Meta backing', 'Versatile', 'Creative'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('Llama 2', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 95, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Large scale', 'Meta backing', 'Versatile', 'Improved'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('Mistral', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 93, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Efficient', 'Mistral AI backing', 'Versatile', 'Fast'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'New technology'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Efficient NLP']), +('Mixtral', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 94, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Mixture of experts', 'Mistral AI backing', 'Versatile', 'High quality'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'New technology'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Advanced NLP']), +('Falcon', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 92, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Efficient', 'TII backing', 'Versatile', 'Fast'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'New technology'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('BLOOM', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 90, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Multilingual', 'BigScience backing', 'Versatile', 'Large scale'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Multilingual NLP']), +('GPT-NeoX', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 90, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Large scale', 'EleutherAI backing', 'Versatile', 'Community driven'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('OPT', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 90, + ARRAY['Text generation', 'Language modeling', 'Chatbots', 'Content creation'], + ARRAY['Open source', 'Meta backing', 'Versatile', 'Pre-trained', 'Accessible'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Language Modeling', 'Chatbots', 'Content Creation', 'Open Source NLP']), +('BART', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 92, + ARRAY['Text generation', 'Summarization', 'Translation', 'Question answering'], + ARRAY['Denoising autoencoder', 'High accuracy', 'Facebook backing', 'Pre-trained', 'Versatile'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Slow inference'], + 'Apache 2.0', + ARRAY['Text Generation', 'Summarization', 'Translation', 'Question Answering', 'Advanced NLP']), +('Pegasus', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 90, + ARRAY['Text generation', 'Summarization', 'Abstractive summarization', 'Content creation'], + ARRAY['Summarization focused', 'High quality', 'Google backing', 'Pre-trained', 'Specialized'], + ARRAY['Large model', 'Resource intensive', 'Complex setup', 'Limited scope'], + 'Apache 2.0', + ARRAY['Text Generation', 'Summarization', 'Abstractive Summarization', 'Content Creation', 'Specialized NLP']), +('T5-small', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 85, 85, + ARRAY['Text generation', 'Translation', 'Summarization', 'Question answering'], + ARRAY['Lightweight', 'Fast inference', 'Google backing', 'Pre-trained', 'Accessible'], + ARRAY['Less accurate', 'Limited features', 'Resource intensive', 'Complex setup'], + 'Apache 2.0', + ARRAY['Text Generation', 'Translation', 'Summarization', 'Question Answering', 'Lightweight NLP']), +('T5-base', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 88, + ARRAY['Text generation', 'Translation', 'Summarization', 'Question answering'], + ARRAY['Balanced', 'Good accuracy', 'Google backing', 'Pre-trained', 'Versatile'], + ARRAY['Resource intensive', 'Complex setup', 'Slow inference', 'Large model'], + 'Apache 2.0', + ARRAY['Text Generation', 'Translation', 'Summarization', 'Question Answering', 'Balanced NLP']), +('T5-large', 'nlp', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 92, + ARRAY['Text generation', 'Translation', 'Summarization', 'Question answering'], + ARRAY['High accuracy', 'Large scale', 'Google backing', 'Pre-trained', 'State-of-the-art'], + ARRAY['Very resource intensive', 'Complex setup', 'Very slow inference', 'Very large model'], + 'Apache 2.0', + ARRAY['Text Generation', 'Translation', 'Summarization', 'Question Answering', 'Large Scale NLP']), + +('YOLO', 'computer-vision', ARRAY['python', 'c++', 'javascript'], true, true, true, 75, 90, + ARRAY['Object detection', 'Real-time detection', 'Image processing', 'Computer vision'], + ARRAY['Real-time', 'High accuracy', 'Single pass', 'Widely used', 'Open source'], + ARRAY['Complex training', 'Resource intensive', 'Limited to detection', 'Parameter sensitive'], + 'GPL', + ARRAY['Object Detection', 'Real-time Detection', 'Image Processing', 'Computer Vision', 'Real-time Vision']), +('SSD', 'computer-vision', ARRAY['python', 'c++', 'javascript'], true, true, true, 80, 88, + ARRAY['Object detection', 'Real-time detection', 'Image processing', 'Computer vision'], + ARRAY['Real-time', 'Multi-scale', 'Good accuracy', 'Widely used', 'Open source'], + ARRAY['Complex training', 'Resource intensive', 'Limited to detection', 'Parameter sensitive'], + 'Apache 2.0', + ARRAY['Object Detection', 'Real-time Detection', 'Image Processing', 'Computer Vision', 'Multi-scale Vision']), +('Faster R-CNN', 'computer-vision', ARRAY['python', 'c++', 'javascript'], true, true, true, 70, 92, + ARRAY['Object detection', 'Image processing', 'Computer vision', 'Feature extraction'], + ARRAY['High accuracy', 'Region proposal', 'Widely used', 'Open source', 'Research oriented'], + ARRAY['Slow inference', 'Complex training', 'Resource intensive', 'Parameter sensitive'], + 'MIT', + ARRAY['Object Detection', 'Image Processing', 'Computer Vision', 'Feature Extraction', 'High Accuracy Vision']), +('Mask R-CNN', 'computer-vision', ARRAY['python', 'c++', 'javascript'], true, true, true, 65, 93, + ARRAY['Object detection', 'Instance segmentation', 'Image processing', 'Computer vision'], + ARRAY['Instance segmentation', 'High accuracy', 'Facebook backing', 'Open source', 'Research oriented'], + ARRAY['Very slow inference', 'Very complex training', 'Very resource intensive', 'Parameter sensitive'], + 'Apache 2.0', + ARRAY['Object Detection', 'Instance Segmentation', 'Image Processing', 'Computer Vision', 'Segmentation Vision']), +('ResNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 85, 90, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Deep architecture', 'Residual connections', 'High accuracy', 'Microsoft backing', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Parameter sensitive'], + 'MIT', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Deep Vision']), +('VGG', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 85, 88, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Simple architecture', 'Good accuracy', 'Oxford backing', 'Widely used', 'Standard benchmark'], + ARRAY['Large parameters', 'Resource intensive', 'Slow training', 'Parameter sensitive'], + 'MIT', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Standard Vision']), +('Inception', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 90, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Inception modules', 'Efficient', 'Google backing', 'High accuracy', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Parameter sensitive'], + 'Apache 2.0', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Efficient Vision']), +('MobileNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 90, 82, + ARRAY['Image classification', 'Mobile vision', 'Edge computing', 'Computer vision'], + ARRAY['Lightweight', 'Fast inference', 'Google backing', 'Mobile optimized', 'Efficient'], + ARRAY['Less accurate', 'Limited complexity', 'Resource intensive', 'Parameter sensitive'], + 'Apache 2.0', + ARRAY['Image Classification', 'Mobile Vision', 'Edge Computing', 'Computer Vision', 'Lightweight Vision']), +('EfficientNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 85, 88, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Efficient scaling', 'Good accuracy', 'Google backing', 'Balanced', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Parameter sensitive'], + 'Apache 2.0', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Efficient Vision']), +('DenseNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 88, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Dense connections', 'Parameter efficient', 'Good accuracy', 'Facebook backing', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Parameter sensitive'], + 'BSD', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Efficient Vision']), +('AlexNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 90, 80, + ARRAY['Image classification', 'Feature extraction', 'Transfer learning', 'Computer vision'], + ARRAY['Pioneering', 'Simple architecture', 'Good accuracy', 'Toronto backing', 'Historical'], + ARRAY['Outdated', 'Large parameters', 'Resource intensive', 'Limited complexity'], + 'BSD', + ARRAY['Image Classification', 'Feature Extraction', 'Transfer Learning', 'Computer Vision', 'Historical Vision']), +('LeNet', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 95, 75, + ARRAY['Image classification', 'Handwritten digits', 'Feature extraction', 'Computer vision'], + ARRAY['Pioneering', 'Very simple', 'Lightweight', 'Fast inference', 'Educational'], + ARRAY['Very outdated', 'Very limited', 'Low accuracy', 'Simple architecture'], + 'BSD', + ARRAY['Image Classification', 'Handwritten Digits', 'Feature Extraction', 'Computer Vision', 'Educational Vision']), +('U-Net', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 90, + ARRAY['Image segmentation', 'Medical imaging', 'Biomedical vision', 'Computer vision'], + ARRAY['U-shaped architecture', 'Good for segmentation', 'Biomedical focus', 'Open source', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Limited to segmentation'], + 'MIT', + ARRAY['Image Segmentation', 'Medical Imaging', 'Biomedical Vision', 'Computer Vision', 'Segmentation Vision']), +('DeepLab', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 88, + ARRAY['Image segmentation', 'Semantic segmentation', 'Computer vision', 'Image processing'], + ARRAY['Semantic segmentation', 'High accuracy', 'Google backing', 'Open source', 'Research oriented'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Limited to segmentation'], + 'Apache 2.0', + ARRAY['Image Segmentation', 'Semantic Segmentation', 'Computer Vision', 'Image Processing', 'Segmentation Vision']), +('FCN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 85, + ARRAY['Image segmentation', 'Semantic segmentation', 'Computer vision', 'Image processing'], + ARRAY['Fully convolutional', 'Good for segmentation', 'Pioneering', 'Open source', 'Widely used'], + ARRAY['Complex architecture', 'Resource intensive', 'Slow training', 'Limited to segmentation'], + 'BSD', + ARRAY['Image Segmentation', 'Semantic Segmentation', 'Computer Vision', 'Image Processing', 'Segmentation Vision']), +('StyleGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 65, 92, + ARRAY['Image generation', 'Style transfer', 'Art generation', 'Computer vision'], + ARRAY['Style-based', 'High quality', 'NVIDIA backing', 'Open source', 'Creative'], + ARRAY['Very complex', 'Very resource intensive', 'Very slow training', 'Limited applications'], + 'CC-BY-NC', + ARRAY['Image Generation', 'Style Transfer', 'Art Generation', 'Computer Vision', 'Generative Vision']), +('CycleGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 88, + ARRAY['Image generation', 'Style transfer', 'Domain adaptation', 'Computer vision'], + ARRAY['Cycle consistency', 'No paired data', 'Good quality', 'Open source', 'Versatile'], + ARRAY['Complex training', 'Resource intensive', 'Slow training', 'Unstable results'], + 'Apache 2.0', + ARRAY['Image Generation', 'Style Transfer', 'Domain Adaptation', 'Computer Vision', 'Generative Vision']), +('Pix2Pix', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 85, + ARRAY['Image generation', 'Image translation', 'Style transfer', 'Computer vision'], + ARRAY['Paired data', 'Good quality', 'Open source', 'Versatile', 'Reliable'], + ARRAY['Requires paired data', 'Complex training', 'Resource intensive', 'Slow training'], + 'Apache 2.0', + ARRAY['Image Generation', 'Image Translation', 'Style Transfer', 'Computer Vision', 'Generative Vision']), +('DCGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 80, 82, + ARRAY['Image generation', 'Art generation', 'Creative AI', 'Computer vision'], + ARRAY['Deep convolutional', 'Good quality', 'Pioneering', 'Open source', 'Widely used'], + ARRAY['Complex training', 'Resource intensive', 'Slow training', 'Unstable results'], + 'MIT', + ARRAY['Image Generation', 'Art Generation', 'Creative AI', 'Computer Vision', 'Generative Vision']), + +('ProGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 88, + ARRAY['Image generation', 'Progressive growing', 'Art generation', 'Computer vision'], + ARRAY['Progressive growing', 'High quality', 'NVIDIA backing', 'Open source', 'Stable'], + ARRAY['Complex training', 'Resource intensive', 'Slow training', 'Limited applications'], + 'CC-BY-NC', + ARRAY['Image Generation', 'Progressive Growing', 'Art Generation', 'Computer Vision', 'Generative Vision']), +('BigGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 65, 94, + ARRAY['Image generation', 'Large scale generation', 'Art generation', 'Computer vision'], + ARRAY['Large scale', 'High quality', 'Google backing', 'Open source', 'State-of-the-art'], + ARRAY['Very complex', 'Very resource intensive', 'Very slow training', 'Limited applications'], + 'Apache 2.0', + ARRAY['Image Generation', 'Large Scale Generation', 'Art Generation', 'Computer Vision', 'Advanced Vision']), +('SAGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 88, + ARRAY['Image generation', 'Self-attention', 'Art generation', 'Computer vision'], + ARRAY['Self-attention', 'Good quality', 'Open source', 'Innovative', 'Widely used'], + ARRAY['Complex training', 'Resource intensive', 'Slow training', 'Unstable results'], + 'MIT', + ARRAY['Image Generation', 'Self-attention', 'Art Generation', 'Computer Vision', 'Attention Vision']), +('StarGAN', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 85, + ARRAY['Image generation', 'Multi-domain translation', 'Style transfer', 'Computer vision'], + ARRAY['Multi-domain', 'Good quality', 'Open source', 'Versatile', 'Efficient'], + ARRAY['Complex training', 'Resource intensive', 'Slow training', 'Limited domains'], + 'Apache 2.0', + ARRAY['Image Generation', 'Multi-domain Translation', 'Style Transfer', 'Computer Vision', 'Multi-domain Vision']), +('NeRF', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 60, 92, + ARRAY['3D reconstruction', 'Novel view synthesis', '3D vision', 'Computer vision'], + ARRAY['Neural radiance fields', 'High quality', 'Innovative', 'Open source', 'Research oriented'], + ARRAY['Very complex', 'Very resource intensive', 'Very slow training', 'Limited applications'], + 'MIT', + ARRAY['3D Reconstruction', 'Novel View Synthesis', '3D Vision', 'Computer Vision', '3D Vision']), +('OpenCV', 'computer-vision', ARRAY['python', 'c++', 'java', 'javascript'], true, false, true, 85, 80, + ARRAY['Image processing', 'Computer vision', 'Feature detection', 'Real-time vision'], + ARRAY['Comprehensive', 'Real-time', 'Multi-language', 'Well documented', 'Industry standard'], + ARRAY['Limited deep learning', 'Complex API', 'Steep learning curve', 'Memory intensive'], + 'BSD', + ARRAY['Image Processing', 'Computer Vision', 'Feature Detection', 'Real-time Vision', 'Industrial Vision']), +('Dlib', 'computer-vision', ARRAY['python', 'c++'], false, false, true, 80, 82, + ARRAY['Face detection', 'Facial recognition', 'Feature extraction', 'Computer vision'], + ARRAY['Face focused', 'High accuracy', 'Well documented', 'Easy to use', 'Reliable'], + ARRAY['Limited scope', 'C++ focused', 'Limited deep learning', 'Small community'], + 'Boost', + ARRAY['Face Detection', 'Facial Recognition', 'Feature Extraction', 'Computer Vision', 'Face Vision']), +('MediaPipe', 'computer-vision', ARRAY['python', 'javascript', 'c++'], true, true, true, 85, 85, + ARRAY['Real-time vision', 'Mobile vision', 'Face detection', 'Hand tracking'], + ARRAY['Real-time', 'Mobile optimized', 'Google backing', 'Pre-built models', 'Easy to use'], + ARRAY['Limited customization', 'Google dependency', 'Limited deep learning', 'Resource intensive'], + 'Apache 2.0', + ARRAY['Real-time Vision', 'Mobile Vision', 'Face Detection', 'Hand Tracking', 'Mobile Vision']), +('Detectron2', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 75, 90, + ARRAY['Object detection', 'Instance segmentation', 'Computer vision', 'Research'], + ARRAY['Facebook backing', 'High quality', 'Modular', 'Research oriented', 'State-of-the-art'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Limited production use'], + 'Apache 2.0', + ARRAY['Object Detection', 'Instance Segmentation', 'Computer Vision', 'Research', 'Research Vision']), +('MMDetection', 'computer-vision', ARRAY['python', 'tensorflow', 'pytorch'], true, true, true, 70, 90, + ARRAY['Object detection', 'Instance segmentation', 'Computer vision', 'Research'], + ARRAY['Comprehensive', 'High quality', 'Open source', 'Research oriented', 'Modular'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Limited production use'], + 'Apache 2.0', + ARRAY['Object Detection', 'Instance Segmentation', 'Computer Vision', 'Research', 'Research Vision']), +('Albumentations', 'computer-vision', ARRAY['python'], false, false, true, 90, 75, + ARRAY['Image augmentation', 'Data preprocessing', 'Computer vision', 'ML pipelines'], + ARRAY['Fast augmentation', 'Comprehensive', 'Well documented', 'Easy to use', 'Production ready'], + ARRAY['Limited to augmentation', 'Python only', 'Limited deep learning', 'Small scope'], + 'MIT', + ARRAY['Image Augmentation', 'Data Preprocessing', 'Computer Vision', 'ML Pipelines', 'Data Augmentation']), +('Imgaug', 'computer-vision', ARRAY['python'], false, false, true, 85, 75, + ARRAY['Image augmentation', 'Data preprocessing', 'Computer vision', 'ML pipelines'], + ARRAY['Comprehensive', 'Flexible', 'Well documented', 'Easy to use', 'Production ready'], + ARRAY['Limited to augmentation', 'Python only', 'Limited deep learning', 'Small scope'], + 'MIT', + ARRAY['Image Augmentation', 'Data Preprocessing', 'Computer Vision', 'ML Pipelines', 'Data Augmentation']), +('Kornia', 'computer-vision', ARRAY['python', 'c++'], true, false, true, 80, 80, + ARRAY['Image processing', 'Computer vision', 'Differentiable operations', 'Deep learning'], + ARRAY['Differentiable', 'GPU accelerated', 'PyTorch integration', 'Comprehensive', 'Research oriented'], + ARRAY['PyTorch dependency', 'Complex API', 'Steep learning curve', 'Limited documentation'], + 'Apache 2.0', + ARRAY['Image Processing', 'Computer Vision', 'Differentiable Operations', 'Deep Learning', 'Research Vision']), +('TensorFlow Lite', 'deep-learning', ARRAY['python', 'java', 'c++', 'javascript'], false, true, true, 90, 85, + ARRAY['Mobile ML', 'Edge computing', 'Model deployment', 'Embedded systems'], + ARRAY['Mobile optimized', 'Google backing', 'Production ready', 'Multi-platform', 'Efficient'], + ARRAY['Limited models', 'Google dependency', 'Limited flexibility', 'Complex conversion'], + 'Apache 2.0', + ARRAY['Mobile ML', 'Edge Computing', 'Model Deployment', 'Embedded Systems', 'Mobile AI']), +('ONNX', 'deep-learning', ARRAY['python', 'c++', 'java', 'javascript'], false, true, true, 85, 88, + ARRAY['Model deployment', 'Cross-platform', 'Model optimization', 'ML interoperability'], + ARRAY['Cross-platform', 'Open standard', 'Multi-framework', 'Optimized', 'Production ready'], + ARRAY['Complex conversion', 'Limited features', 'Steep learning curve', 'Limited debugging'], + 'MIT', + ARRAY['Model Deployment', 'Cross-platform', 'Model Optimization', 'ML Interoperability', 'Model Deployment']), +('TensorFlow.js', 'deep-learning', ARRAY['javascript', 'python', 'typescript'], false, true, true, 85, 82, + ARRAY['Web ML', 'Browser deployment', 'Client-side ML', 'Web applications'], + ARRAY['Browser based', 'Google backing', 'Easy integration', 'Web optimized', 'Production ready'], + ARRAY['Limited models', 'Browser limitations', 'Performance constraints', 'JavaScript dependency'], + 'Apache 2.0', + ARRAY['Web ML', 'Browser Deployment', 'Client-side ML', 'Web Applications', 'Web AI']), +('PyTorch Lightning', 'deep-learning', ARRAY['python'], true, true, true, 85, 90, + ARRAY['Deep learning', 'Research', 'Production', 'Model training'], + ARRAY['Simplified training', 'Good abstractions', 'Research oriented', 'Production ready', 'Popular'], + ARRAY['Abstraction overhead', 'Limited flexibility', 'Steep learning curve', 'PyTorch dependency'], + 'Apache 2.0', + ARRAY['Deep Learning', 'Research', 'Production', 'Model Training', 'Research AI']), +('Hugging Face Transformers', 'nlp', ARRAY['python', 'javascript', 'rust'], true, true, true, 85, 95, + ARRAY['NLP models', 'Transformers', 'Pre-trained models', 'Text processing'], + ARRAY['Comprehensive', 'Easy to use', 'Large model hub', 'Community driven', 'State-of-the-art'], + ARRAY['Large dependencies', 'Resource intensive', 'Complex API', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['NLP Models', 'Transformers', 'Pre-trained Models', 'Text Processing', 'Advanced NLP']), +('Sentence Transformers', 'nlp', ARRAY['python'], true, true, true, 85, 88, + ARRAY['Sentence embeddings', 'Semantic search', 'Text similarity', 'NLP applications'], + ARRAY['Sentence focused', 'Easy to use', 'Good performance', 'Well documented', 'Popular'], + ARRAY['Limited scope', 'Resource intensive', 'Complex setup', 'Limited customization'], + 'Apache 2.0', + ARRAY['Sentence Embeddings', 'Semantic Search', 'Text Similarity', 'NLP Applications', 'Text Analytics']), + +('LangChain', 'nlp', ARRAY['python', 'javascript'], false, true, true, 85, 85, + ARRAY['LLM applications', 'Chain building', 'Agent development', 'NLP pipelines'], + ARRAY['Chain composition', 'Agent framework', 'Multi-LLM support', 'Well documented', 'Popular'], + ARRAY['Complex framework', 'Steep learning curve', 'Abstraction overhead', 'Limited production use'], + 'MIT', + ARRAY['LLM Applications', 'Chain Building', 'Agent Development', 'NLP Pipelines', 'Agent AI']), +('LlamaIndex', 'nlp', ARRAY['python', 'javascript'], false, true, true, 85, 85, + ARRAY['LLM applications', 'Data indexing', 'Retrieval augmentation', 'NLP pipelines'], + ARRAY['Data indexing', 'Retrieval focused', 'Multi-LLM support', 'Well documented', 'Popular'], + ARRAY['Complex framework', 'Steep learning curve', 'Abstraction overhead', 'Limited production use'], + 'MIT', + ARRAY['LLM Applications', 'Data Indexing', 'Retrieval Augmentation', 'NLP Pipelines', 'Retrieval AI']), +('Haystack', 'nlp', ARRAY['python'], false, true, true, 80, 85, + ARRAY['Question answering', 'Document search', 'NLP pipelines', 'Information retrieval'], + ARRAY['QA focused', 'Document processing', 'Modular', 'Well documented', 'Production ready'], + ARRAY['Limited scope', 'Python only', 'Complex setup', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['Question Answering', 'Document Search', 'NLP Pipelines', 'Information Retrieval', 'Search AI']), +('FAISS', 'machine-learning', ARRAY['python', 'c++'], true, false, true, 80, 85, + ARRAY['Similarity search', 'Vector search', 'Embedding search', 'Information retrieval'], + ARRAY['Fast search', 'Scalable', 'Facebook backing', 'Well documented', 'Production ready'], + ARRAY['Limited to search', 'Complex setup', 'Memory intensive', 'Limited features'], + 'MIT', + ARRAY['Similarity Search', 'Vector Search', 'Embedding Search', 'Information Retrieval', 'Search AI']), +('Annoy', 'machine-learning', ARRAY['python', 'c++', 'java'], false, false, true, 85, 80, + ARRAY['Similarity search', 'Vector search', 'Embedding search', 'Information retrieval'], + ARRAY['Fast search', 'Memory efficient', 'Spotify backing', 'Easy to use', 'Production ready'], + ARRAY['Limited to search', 'Limited features', 'Small community', 'Limited documentation'], + 'Apache 2.0', + ARRAY['Similarity Search', 'Vector Search', 'Embedding Search', 'Information Retrieval', 'Search AI']), +('Milvus', 'machine-learning', ARRAY['python', 'c++', 'java', 'go'], true, true, true, 75, 88, + ARRAY['Similarity search', 'Vector database', 'Embedding search', 'Information retrieval'], + ARRAY['Vector database', 'Scalable', 'Cloud native', 'Multi-language', 'Production ready'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Limited features'], + 'Apache 2.0', + ARRAY['Similarity Search', 'Vector Database', 'Embedding Search', 'Information Retrieval', 'Vector Database']), +('Pinecone', 'machine-learning', ARRAY['python', 'javascript', 'curl'], false, true, true, 90, 90, + ARRAY['Similarity search', 'Vector database', 'Embedding search', 'Information retrieval'], + ARRAY['Managed service', 'Easy to use', 'Scalable', 'Fast performance', 'Production ready'], + ARRAY['Proprietary', 'Costly', 'External dependency', 'Limited control'], + 'Proprietary', + ARRAY['Similarity Search', 'Vector Database', 'Embedding Search', 'Information Retrieval', 'Managed AI']), +('Weaviate', 'machine-learning', ARRAY['python', 'javascript', 'go', 'java'], true, true, true, 80, 88, + ARRAY['Similarity search', 'Vector database', 'Embedding search', 'Information retrieval'], + ARRAY['GraphQL API', 'Multi-modal', 'Cloud native', 'Open source', 'Production ready'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Limited features'], + 'BSD', + ARRAY['Similarity Search', 'Vector Database', 'Embedding Search', 'Information Retrieval', 'Vector Database']), +('Chroma', 'machine-learning', ARRAY['python', 'javascript'], false, true, true, 90, 82, + ARRAY['Similarity search', 'Vector database', 'Embedding search', 'Information retrieval'], + ARRAY['Easy to use', 'Lightweight', 'Open source', 'Python focused', 'Production ready'], + ARRAY['Limited scalability', 'Limited features', 'Small community', 'Limited documentation'], + 'Apache 2.0', + ARRAY['Similarity Search', 'Vector Database', 'Embedding Search', 'Information Retrieval', 'Lightweight AI']), +('Qdrant', 'machine-learning', ARRAY['python', 'rust', 'go', 'java'], true, true, true, 80, 88, + ARRAY['Similarity search', 'Vector database', 'Embedding search', 'Information retrieval'], + ARRAY['Rust based', 'Fast performance', 'Cloud native', 'Open source', 'Production ready'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Limited features'], + 'Apache 2.0', + ARRAY['Similarity Search', 'Vector Database', 'Embedding Search', 'Information Retrieval', 'Vector Database']), +('Redis', 'machine-learning', ARRAY['python', 'javascript', 'java', 'c++'], false, true, true, 85, 80, + ARRAY['Vector search', 'Caching', 'Real-time search', 'Information retrieval'], + ARRAY['Fast performance', 'Scalable', 'Production ready', 'Multi-language', 'Well established'], + ARRAY['Limited ML features', 'Complex setup', 'Resource intensive', 'Steep learning curve'], + 'BSD', + ARRAY['Vector Search', 'Caching', 'Real-time Search', 'Information Retrieval', 'Real-time AI']), +('Elasticsearch', 'machine-learning', ARRAY['python', 'javascript', 'java'], false, true, true, 75, 82, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Full-text search', 'Scalable', 'Production ready', 'Multi-language', 'Well established'], + ARRAY['Limited ML features', 'Complex setup', 'Resource intensive', 'Steep learning curve'], + 'ELv2', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Search AI']), +('OpenSearch', 'machine-learning', ARRAY['python', 'javascript', 'java'], false, true, true, 75, 82, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Open source', 'Full-text search', 'Scalable', 'Production ready', 'Multi-language'], + ARRAY['Limited ML features', 'Complex setup', 'Resource intensive', 'Steep learning curve'], + 'Apache 2.0', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Open Source AI']), +('Typesense', 'machine-learning', ARRAY['python', 'javascript', 'go'], false, true, true, 85, 80, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Fast performance', 'Easy to use', 'Open source', 'Cloud native', 'Production ready'], + ARRAY['Limited features', 'Small community', 'Limited documentation', 'Limited scalability'], + 'Apache 2.0', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Fast Search']), +('Meilisearch', 'machine-learning', ARRAY['python', 'javascript', 'go', 'rust'], false, true, true, 90, 78, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Very fast', 'Easy to use', 'Open source', 'Lightweight', 'Production ready'], + ARRAY['Limited ML features', 'Limited features', 'Small community', 'Limited scalability'], + 'MIT', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Fast Search']), +('Solr', 'machine-learning', ARRAY['python', 'javascript', 'java'], false, true, true, 70, 80, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Enterprise grade', 'Scalable', 'Production ready', 'Well established', 'Feature rich'], + ARRAY['Complex setup', 'Resource intensive', 'Steep learning curve', 'Java focused'], + 'Apache 2.0', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Enterprise Search']), +('Whoosh', 'machine-learning', ARRAY['python'], false, false, true, 90, 75, + ARRAY['Vector search', 'Full-text search', 'Information retrieval', 'Data analytics'], + ARRAY['Pure Python', 'Easy to use', 'Lightweight', 'Well documented', 'Good for prototyping'], + ARRAY['Limited scalability', 'Limited features', 'Python only', 'Limited production use'], + 'BSD', + ARRAY['Vector Search', 'Full-text Search', 'Information Retrieval', 'Data Analytics', 'Lightweight Search']), +('Pympler', 'machine-learning', ARRAY['python'], false, false, true, 85, 75, + ARRAY['Memory profiling', 'Data analysis', 'Performance monitoring', 'ML optimization'], + ARRAY['Memory focused', 'Easy to use', 'Well documented', 'Python focused', 'Lightweight'], + ARRAY['Limited scope', 'Python only', 'Limited features', 'Small community'], + 'Apache 2.0', + ARRAY['Memory Profiling', 'Data Analysis', 'Performance Monitoring', 'ML Optimization', 'Performance AI']), +('Memory Profiler', 'machine-learning', ARRAY['python'], false, false, true, 90, 75, + ARRAY['Memory profiling', 'Data analysis', 'Performance monitoring', 'ML optimization'], + ARRAY['Easy to use', 'Well documented', 'Python focused', 'Lightweight', 'Production ready'], + ARRAY['Limited scope', 'Python only', 'Limited features', 'Small community'], + 'BSD', + ARRAY['Memory Profiling', 'Data Analysis', 'Performance Monitoring', 'ML Optimization', 'Performance AI']); + +-- ===================================================== +-- DATA INSERTION - TECH PRICING +-- ===================================================== + +INSERT INTO tech_pricing (tech_name, tech_category, price_tier_id, development_cost_usd, monthly_operational_cost_usd, license_cost_usd, training_cost_usd, maintenance_cost_percentage, cost_per_user_usd, min_cpu_cores, min_ram_gb, min_storage_gb, total_cost_of_ownership_score, price_performance_ratio) VALUES + +-- Frontend Technologies Pricing +('React', 'frontend', 1, 200, 0, 0, 100, 10, 0, 0.5, 1, 5, 95, 90), +('Vue.js', 'frontend', 1, 150, 0, 0, 50, 8, 0, 0.5, 1, 5, 98, 95), +('Angular', 'frontend', 2, 400, 0, 0, 300, 15, 0, 1, 2, 10, 85, 80), +('Svelte', 'frontend', 1, 180, 0, 0, 80, 8, 0, 0.25, 0.5, 3, 92, 95), +('Next.js', 'frontend', 2, 300, 20, 0, 150, 12, 0, 1, 2, 10, 88, 85), + +-- Backend Technologies Pricing +('Node.js', 'backend', 1, 150, 10, 0, 80, 8, 0, 0.5, 1, 5, 92, 88), +('Express.js', 'backend', 1, 100, 5, 0, 40, 5, 0, 0.5, 1, 5, 95, 92), +('Django', 'backend', 2, 300, 15, 0, 200, 12, 0, 1, 2, 10, 88, 85), +('FastAPI', 'backend', 1, 180, 12, 0, 60, 8, 0, 0.5, 1, 8, 90, 90), +('Spring Boot', 'backend', 3, 500, 25, 0, 400, 18, 0, 2, 4, 20, 82, 78), + +-- Database Technologies Pricing +('PostgreSQL', 'database', 1, 100, 15, 0, 120, 10, 0.001, 1, 2, 20, 90, 88), +('MongoDB', 'database', 2, 150, 30, 0, 100, 12, 0.002, 1, 2, 15, 85, 82), +('Redis', 'database', 1, 80, 20, 0, 60, 8, 0.0001, 0.5, 1, 5, 92, 90), +('SQLite', 'database', 1, 50, 0, 0, 20, 3, 0, 0.25, 0.5, 2, 98, 95), +('MySQL', 'database', 1, 80, 12, 0, 80, 8, 0.001, 1, 1, 10, 88, 85), + +-- Cloud Technologies Pricing +('AWS', 'cloud', 3, 200, 150, 0, 300, 15, 0.05, 2, 4, 50, 85, 82), +('Vercel', 'cloud', 1, 50, 20, 0, 30, 5, 0.02, 0.5, 1, 10, 90, 88), +('DigitalOcean', 'cloud', 2, 100, 50, 0, 50, 8, 0.03, 1, 2, 25, 88, 85), +('Railway', 'cloud', 1, 80, 25, 0, 40, 6, 0.01, 0.5, 1, 10, 92, 90), +('Netlify', 'cloud', 1, 40, 15, 0, 25, 4, 0.01, 0.5, 1, 5, 95, 92), + +-- Testing Technologies Pricing +('Jest', 'testing', 1, 100, 0, 0, 50, 5, 0, 0.5, 1, 3, 95, 92), +('Cypress', 'testing', 2, 200, 0, 0, 100, 8, 0, 1, 2, 8, 88, 85), +('Playwright', 'testing', 2, 180, 0, 0, 120, 10, 0, 1, 2, 10, 85, 82), +('Selenium', 'testing', 3, 300, 0, 0, 200, 15, 0, 2, 3, 15, 80, 78); + +-- ===================================================== +-- DATA INSERTION - MOBILE TECHNOLOGIES +-- ===================================================== + + +-- ===================================================== +-- DATA INSERTION - DEVOPS TECHNOLOGIES +-- ===================================================== + + + +-- ===================================================== +-- DATA INSERTION - MOBILE AND DEVOPS PRICING +-- ===================================================== + +INSERT INTO tech_pricing (tech_name, tech_category, price_tier_id, development_cost_usd, monthly_operational_cost_usd, license_cost_usd, training_cost_usd, maintenance_cost_percentage, cost_per_user_usd, min_cpu_cores, min_ram_gb, min_storage_gb, total_cost_of_ownership_score, price_performance_ratio) VALUES + +-- Mobile Technologies Pricing +('React Native', 'mobile', 2, 400, 0, 0, 200, 12, 0, 1, 2, 10, 88, 85), +('Flutter', 'mobile', 2, 450, 0, 0, 250, 15, 0, 1, 2, 12, 85, 82), +('Ionic', 'mobile', 1, 250, 0, 0, 100, 8, 0, 0.5, 1, 8, 92, 88), +('Swift (iOS)', 'mobile', 3, 800, 99, 0, 400, 20, 0, 2, 4, 20, 75, 70), +('Kotlin (Android)', 'mobile', 3, 750, 25, 0, 350, 18, 0, 2, 4, 20, 78, 72), + +-- DevOps Technologies Pricing +('Docker', 'devops', 1, 150, 0, 0, 120, 8, 0, 0.5, 1, 5, 90, 88), +('GitHub Actions', 'devops', 1, 100, 20, 0, 60, 5, 0, 0.25, 0.5, 2, 95, 92), +('Jenkins', 'devops', 2, 200, 50, 0, 300, 15, 0, 1, 2, 10, 82, 80), +('Kubernetes', 'devops', 3, 500, 100, 0, 600, 25, 0, 2, 4, 30, 78, 75), +('Terraform', 'devops', 2, 300, 0, 0, 250, 12, 0, 1, 2, 8, 85, 82), + +-- AI/ML Technologies Pricing +('TensorFlow', 'ai-ml', 2, 300, 50, 0, 400, 18, 0, 1, 4, 20, 80, 78), +('PyTorch', 'ai-ml', 2, 280, 45, 0, 350, 16, 0, 1, 4, 20, 82, 80), +('Scikit-learn', 'ai-ml', 1, 150, 0, 0, 100, 8, 0, 0.5, 2, 10, 95, 92), +('Hugging Face', 'ai-ml', 2, 200, 30, 0, 150, 10, 0.01, 1, 3, 15, 88, 85), +('OpenAI API', 'ai-ml', 3, 100, 200, 0, 50, 5, 0.05, 0.5, 1, 5, 75, 70); + +-- ===================================================== +-- DATA INSERTION - PRICE-BASED TECH STACKS +-- ===================================================== + +INSERT INTO price_based_stacks (stack_name, price_tier_id, total_monthly_cost_usd, total_setup_cost_usd, frontend_tech, backend_tech, database_tech, cloud_tech, testing_tech, mobile_tech, devops_tech, ai_ml_tech, suitable_project_scales, team_size_range, development_time_months, maintenance_complexity, scalability_ceiling, recommended_domains, success_rate_percentage, user_satisfaction_score, description, pros, cons) VALUES + +-- Micro Budget Stacks (5-25 USD) +('Ultra Budget Starter', 1, 15.00, 500.00, 'React', 'Node.js', 'SQLite', 'Netlify', 'Jest', NULL, 'GitHub Actions', NULL, + ARRAY['Personal projects', 'Learning'], '1-2', 2, 'low', 'small', + ARRAY['Portfolio websites', 'Small blogs', 'Learning projects', 'Personal tools'], + 88, 85, 'Perfect for beginners and personal projects with minimal hosting costs', + ARRAY['Extremely low cost', 'Great for learning', 'Simple deployment', 'Good performance for small projects'], + ARRAY['Limited scalability', 'No mobile support', 'Basic features only', 'Single developer focused']), + +('Free Tier Full Stack', 1, 20.00, 650.00, 'Vue.js', 'Express.js', 'PostgreSQL', 'Railway', 'Jest', NULL, 'GitHub Actions', NULL, + ARRAY['MVPs', 'Small projects'], '1-3', 3, 'low', 'small', + ARRAY['Startup MVPs', 'Small business websites', 'API development', 'Prototype applications'], + 85, 82, 'Complete full-stack solution using free tiers and minimal paid services', + ARRAY['Full database support', 'Real backend capabilities', 'Easy deployment', 'Cost-effective'], + ARRAY['Limited resources', 'Basic monitoring', 'No mobile app', 'Scaling limitations']), + +('Minimal VPS Stack', 1, 12.00, 400.00, 'Svelte', 'Express.js', 'SQLite', 'DigitalOcean', 'Jest', NULL, 'Docker', NULL, + ARRAY['Personal projects', 'Learning'], '1-2', 2, 'low', 'small', + ARRAY['Personal websites', 'Small tools', 'Learning projects', 'Prototypes'], + 82, 80, 'Ultra-minimal stack for absolute beginners with VPS hosting', + ARRAY['Lowest possible cost', 'Simple setup', 'Good for learning', 'VPS control'], + ARRAY['Manual server management', 'Limited support', 'Basic features only', 'No auto-scaling']), + +('Static Site Stack', 1, 8.00, 200.00, 'Next.js', 'Serverless', 'SQLite', 'Vercel', 'Jest', NULL, 'GitHub Actions', NULL, + ARRAY['Personal projects', 'Learning'], '1-2', 1, 'low', 'small', + ARRAY['Portfolio sites', 'Blogs', 'Landing pages', 'Documentation sites'], + 90, 88, 'Static site generation with serverless backend functions', + ARRAY['Very low cost', 'Fast performance', 'Easy deployment', 'Great for content'], + ARRAY['Limited dynamic features', 'No real-time capabilities', 'Static content only', 'Limited backend']), + +-- Startup Budget Stacks (25.01-100 USD) +('Startup MVP Stack', 2, 75.00, 1200.00, 'Next.js', 'FastAPI', 'PostgreSQL', 'Vercel', 'Cypress', 'React Native', 'GitHub Actions', NULL, + ARRAY['MVPs', 'Small to medium'], '2-5', 4, 'medium', 'medium', + ARRAY['Tech startups', 'SaaS products', 'E-commerce platforms', 'Content platforms'], + 90, 88, 'Modern stack perfect for startups building cross-platform products', + ARRAY['Full-stack solution', 'Mobile app included', 'Good performance', 'Modern tech stack', 'Scalable foundation'], + ARRAY['Higher learning curve', 'Multiple technologies to manage', 'Limited AI capabilities', 'Monthly costs add up']), + +('Node.js Monorepo', 2, 85.00, 1000.00, 'React', 'Node.js', 'MongoDB', 'DigitalOcean', 'Jest', 'React Native', 'Docker', NULL, + ARRAY['Small to medium'], '3-6', 5, 'medium', 'medium', + ARRAY['Social platforms', 'Real-time applications', 'Content management', 'Collaborative tools'], + 87, 85, 'JavaScript-everywhere approach with shared code between web and mobile', + ARRAY['Unified language', 'Code sharing', 'Strong ecosystem', 'Cost-effective hosting', 'Container ready'], + ARRAY['JavaScript limitations', 'NoSQL complexity', 'Performance ceiling', 'Single language dependency']), + +('Budget E-commerce', 2, 45.00, 800.00, 'Vue.js', 'Express.js', 'PostgreSQL', 'DigitalOcean', 'Jest', 'Ionic', 'GitHub Actions', NULL, + ARRAY['Small to medium'], '2-4', 3, 'low', 'medium', + ARRAY['E-commerce', 'Online stores', 'Marketplaces', 'Retail platforms'], + 89, 87, 'Cost-effective e-commerce solution with mobile app support', + ARRAY['E-commerce ready', 'Mobile app included', 'Good performance', 'Cost-effective', 'Easy to scale'], + ARRAY['Limited advanced features', 'Basic payment integration', 'Manual scaling', 'Limited analytics']), + +('Lean SaaS Stack', 2, 65.00, 900.00, 'React', 'Django', 'PostgreSQL', 'Railway', 'Cypress', NULL, 'Docker', 'Scikit-learn', + ARRAY['Small to medium'], '2-5', 4, 'medium', 'medium', + ARRAY['SaaS platforms', 'Web applications', 'Business tools', 'Data-driven apps'], + 88, 86, 'Lean SaaS stack with basic AI capabilities and good scalability', + ARRAY['AI capabilities', 'Good performance', 'Scalable', 'Cost-effective', 'Python ecosystem'], + ARRAY['Limited AI features', 'Python performance', 'Learning curve', 'Manual deployment']), + +-- Small Business Stacks +('Professional Business Stack', 3, 180.00, 2000.00, 'Angular', 'Django', 'PostgreSQL', 'DigitalOcean', 'Playwright', 'Flutter', 'Jenkins', 'Scikit-learn', + ARRAY['Medium'], '4-8', 6, 'medium', 'large', + ARRAY['Enterprise applications', 'Data-driven platforms', 'Business automation', 'Customer portals'], + 92, 90, 'Robust stack for established businesses needing reliable, scalable solutions', + ARRAY['Enterprise-grade', 'Strong typing', 'Excellent data handling', 'Cross-platform mobile', 'ML capabilities', 'Reliable infrastructure'], + ARRAY['Higher complexity', 'Longer development time', 'Steeper learning curve', 'More infrastructure management']), + +('Modern SaaS Stack', 3, 220.00, 2500.00, 'React', 'FastAPI', 'PostgreSQL', 'AWS', 'Cypress', 'React Native', 'Terraform', 'Hugging Face', + ARRAY['Medium to large'], '5-10', 7, 'high', 'large', + ARRAY['SaaS platforms', 'AI-powered applications', 'Data analytics', 'API-first products'], + 89, 87, 'Modern stack with cloud-native architecture and AI integration', + ARRAY['Cloud-native', 'AI capabilities', 'High performance', 'Infrastructure as code', 'Excellent scalability', 'Modern tech stack'], + ARRAY['High complexity', 'AWS learning curve', 'Higher operational costs', 'Multiple moving parts', 'Requires DevOps expertise']), + +-- Growth Stage Stacks +('Scale-Ready Platform', 4, 450.00, 4000.00, 'Next.js', 'Spring Boot', 'PostgreSQL', 'AWS', 'Selenium', 'Flutter', 'Kubernetes', 'TensorFlow', + ARRAY['Large'], '8-15', 9, 'high', 'enterprise', + ARRAY['Enterprise platforms', 'High-traffic applications', 'Complex business logic', 'AI-driven solutions'], + 94, 92, 'Enterprise-grade stack designed for high-scale applications with advanced features', + ARRAY['Enterprise reliability', 'High performance', 'Advanced AI/ML', 'Excellent scalability', 'Comprehensive testing', 'Production-ready'], + ARRAY['Very high complexity', 'Expensive to run', 'Requires expert team', 'Long development cycles', 'High maintenance overhead']), + +-- Scale-Up Stacks +('Enterprise Powerhouse', 5, 800.00, 6000.00, 'Angular', 'Spring Boot', 'PostgreSQL', 'AWS', 'Selenium', 'Flutter', 'Kubernetes', 'TensorFlow', + ARRAY['Enterprise'], '10-20', 12, 'high', 'enterprise', + ARRAY['Large enterprises', 'Mission-critical applications', 'Complex workflows', 'Advanced analytics'], + 96, 94, 'Ultimate enterprise stack with maximum reliability, performance, and feature completeness', + ARRAY['Maximum reliability', 'Enterprise features', 'Comprehensive solution', 'Expert support', 'Battle-tested components', 'Future-proof'], + ARRAY['Very expensive', 'Extreme complexity', 'Long time to market', 'Requires large expert team', 'High operational overhead']); + +-- ===================================================== +-- DATA INSERTION - STACK RECOMMENDATIONS +-- ===================================================== + +INSERT INTO stack_recommendations (price_tier_id, business_domain, project_scale, team_experience_level, recommended_stack_id, confidence_score, recommendation_reasons, potential_risks, alternative_stacks) VALUES + +-- Micro Budget Recommendations +(1, 'personal', 'small', 'beginner', 1, 95, + ARRAY['Perfect for learning', 'Minimal cost', 'Simple to deploy', 'Good documentation'], + ARRAY['Limited scalability', 'No database persistence', 'Single developer dependency'], + ARRAY[2]), + +(1, 'startup', 'small', 'intermediate', 2, 90, + ARRAY['Full-stack capabilities', 'Database included', 'Room to grow', 'Cost-effective'], + ARRAY['Resource limitations on free tiers', 'May hit scaling walls', 'Limited advanced features'], + ARRAY[1, 3]), + +-- Startup Budget Recommendations +(2, 'saas', 'medium', 'intermediate', 3, 92, + ARRAY['Modern tech stack', 'Mobile app included', 'Good performance', 'Startup-friendly pricing'], + ARRAY['Multiple technologies to learn', 'Vendor lock-in potential', 'Scaling costs'], + ARRAY[4, 5]), + +(2, 'ecommerce', 'medium', 'beginner', 4, 88, + ARRAY['JavaScript everywhere', 'Real-time capabilities', 'Cost-effective', 'Good for content'], + ARRAY['NoSQL complexity', 'Performance limitations', 'Single language risk'], + ARRAY[3, 5]), + +-- Small Business Recommendations +(3, 'enterprise', 'large', 'expert', 5, 94, + ARRAY['Enterprise-grade reliability', 'Strong typing', 'Excellent data handling', 'ML capabilities'], + ARRAY['High complexity', 'Longer development time', 'Requires skilled team'], + ARRAY[6, 7]), + +(3, 'saas', 'large', 'expert', 6, 91, + ARRAY['Cloud-native architecture', 'AI capabilities', 'High performance', 'Modern stack'], + ARRAY['AWS complexity', 'Higher operational costs', 'Requires DevOps expertise'], + ARRAY[5, 7]), + +-- Growth Stage Recommendations +(4, 'enterprise', 'enterprise', 'expert', 7, 96, + ARRAY['Maximum scalability', 'Enterprise features', 'Advanced AI/ML', 'Production-ready'], + ARRAY['Very high complexity', 'Expensive', 'Requires large expert team'], + ARRAY[8]), + +-- Scale-Up Recommendations +(5, 'enterprise', 'enterprise', 'expert', 8, 98, + ARRAY['Ultimate reliability', 'Complete enterprise solution', 'Maximum performance', 'Future-proof'], + ARRAY['Extremely expensive', 'High complexity', 'Long development cycles'], + ARRAY[7]), + +-- Additional Domain Recommendations +-- Healthcare Domain +(2, 'healthcare', 'medium', 'intermediate', 3, 90, + ARRAY['HIPAA compliance ready', 'Secure data handling', 'Good for medical apps', 'Privacy-focused'], + ARRAY['Compliance complexity', 'Security requirements', 'Regulatory overhead'], + ARRAY[4, 5]), + +(3, 'healthcare', 'large', 'expert', 5, 92, + ARRAY['Enterprise security', 'Compliance features', 'Scalable architecture', 'Data protection'], + ARRAY['High complexity', 'Compliance costs', 'Expert team required'], + ARRAY[6, 7]), + +-- Education Domain +(1, 'education', 'small', 'beginner', 2, 88, + ARRAY['Easy to use', 'Good for learning platforms', 'Cost-effective', 'Simple deployment'], + ARRAY['Limited features', 'Basic functionality', 'Scaling limitations'], + ARRAY[1, 3]), + +(2, 'education', 'medium', 'intermediate', 4, 85, + ARRAY['Good for LMS', 'Content management', 'User-friendly', 'Scalable'], + ARRAY['Feature limitations', 'Customization constraints', 'Performance ceiling'], + ARRAY[3, 5]), + +-- Finance Domain +(3, 'finance', 'large', 'expert', 5, 94, + ARRAY['Security-focused', 'Compliance ready', 'Reliable', 'Enterprise-grade'], + ARRAY['High complexity', 'Compliance requirements', 'Expert team needed'], + ARRAY[6, 7]), + +(4, 'finance', 'enterprise', 'expert', 7, 96, + ARRAY['Maximum security', 'Full compliance', 'Advanced features', 'Production-ready'], + ARRAY['Very expensive', 'Complex implementation', 'Large team required'], + ARRAY[8]), + +-- Gaming Domain +(2, 'gaming', 'medium', 'intermediate', 3, 87, + ARRAY['Real-time capabilities', 'Good performance', 'Cross-platform', 'Modern stack'], + ARRAY['Performance limitations', 'Complexity', 'Learning curve'], + ARRAY[4, 5]), + +(3, 'gaming', 'large', 'expert', 6, 89, + ARRAY['High performance', 'Scalable', 'Cloud-native', 'Advanced features'], + ARRAY['High costs', 'Complex architecture', 'Expert team required'], + ARRAY[5, 7]), + +-- Media/Content Domain +(1, 'media', 'small', 'beginner', 1, 85, + ARRAY['Content-focused', 'Easy deployment', 'Good for blogs', 'Cost-effective'], + ARRAY['Limited features', 'Basic functionality', 'Scaling issues'], + ARRAY[2, 3]), + +(2, 'media', 'medium', 'intermediate', 4, 88, + ARRAY['Content management', 'Good performance', 'Scalable', 'User-friendly'], + ARRAY['Feature limitations', 'Customization needs', 'Performance constraints'], + ARRAY[3, 5]), + +-- IoT Domain +(3, 'iot', 'large', 'expert', 5, 91, + ARRAY['Data handling', 'Real-time processing', 'Scalable', 'Enterprise-ready'], + ARRAY['High complexity', 'Data management', 'Expert team needed'], + ARRAY[6, 7]), + +(4, 'iot', 'enterprise', 'expert', 7, 93, + ARRAY['Advanced data processing', 'Maximum scalability', 'Enterprise features', 'Production-ready'], + ARRAY['Very expensive', 'Complex implementation', 'Large team required'], + ARRAY[8]), + +-- Social Media Domain +(2, 'social', 'medium', 'intermediate', 3, 89, + ARRAY['Real-time features', 'Good for social apps', 'Scalable', 'Modern stack'], + ARRAY['Performance challenges', 'Complexity', 'Scaling costs'], + ARRAY[4, 5]), + +(3, 'social', 'large', 'expert', 6, 91, + ARRAY['High performance', 'Advanced features', 'Scalable', 'Cloud-native'], + ARRAY['High costs', 'Complex architecture', 'Expert team required'], + ARRAY[5, 7]), + +-- E-learning Domain +(1, 'elearning', 'small', 'beginner', 2, 86, + ARRAY['Learning-focused', 'Easy to use', 'Cost-effective', 'Good for courses'], + ARRAY['Limited features', 'Basic functionality', 'Scaling limitations'], + ARRAY[1, 3]), + +(2, 'elearning', 'medium', 'intermediate', 4, 88, + ARRAY['LMS capabilities', 'Content management', 'User-friendly', 'Scalable'], + ARRAY['Feature limitations', 'Customization needs', 'Performance constraints'], + ARRAY[3, 5]), + +-- Real Estate Domain +(2, 'realestate', 'medium', 'intermediate', 4, 87, + ARRAY['Property management', 'Good for listings', 'User-friendly', 'Scalable'], + ARRAY['Feature limitations', 'Customization needs', 'Performance constraints'], + ARRAY[3, 5]), + +(3, 'realestate', 'large', 'expert', 5, 89, + ARRAY['Advanced features', 'Enterprise-ready', 'Scalable', 'Professional'], + ARRAY['High complexity', 'Expert team needed', 'Implementation costs'], + ARRAY[6, 7]), + +-- Travel Domain +(2, 'travel', 'medium', 'intermediate', 3, 88, + ARRAY['Booking capabilities', 'Good performance', 'User-friendly', 'Scalable'], + ARRAY['Feature limitations', 'Integration complexity', 'Performance constraints'], + ARRAY[4, 5]), + +(3, 'travel', 'large', 'expert', 6, 90, + ARRAY['Advanced booking', 'High performance', 'Scalable', 'Enterprise features'], + ARRAY['High costs', 'Complex architecture', 'Expert team required'], + ARRAY[5, 7]), + +-- Manufacturing Domain +(3, 'manufacturing', 'large', 'expert', 5, 92, + ARRAY['Industrial features', 'Data processing', 'Scalable', 'Enterprise-ready'], + ARRAY['High complexity', 'Specialized requirements', 'Expert team needed'], + ARRAY[6, 7]), + +(4, 'manufacturing', 'enterprise', 'expert', 7, 94, + ARRAY['Advanced industrial features', 'Maximum scalability', 'Enterprise integration', 'Production-ready'], + ARRAY['Very expensive', 'Complex implementation', 'Large team required'], + ARRAY[8]); + +-- ===================================================== +-- INDEXES FOR PERFORMANCE +-- ===================================================== + +-- Price-based indexes +CREATE INDEX idx_tech_pricing_tier ON tech_pricing(price_tier_id); +CREATE INDEX idx_tech_pricing_category ON tech_pricing(tech_category); +CREATE INDEX idx_price_based_stacks_tier ON price_based_stacks(price_tier_id); +CREATE INDEX idx_stack_recommendations_tier ON stack_recommendations(price_tier_id); + +-- Technology-specific indexes +CREATE INDEX idx_frontend_maturity ON frontend_technologies(maturity_score); +CREATE INDEX idx_backend_performance ON backend_technologies(performance_rating); +CREATE INDEX idx_database_type ON database_technologies(database_type); +CREATE INDEX idx_cloud_provider ON cloud_technologies(provider); + +-- Search optimization indexes +CREATE INDEX idx_frontend_name_search ON frontend_technologies USING gin(to_tsvector('english', name)); +CREATE INDEX idx_backend_name_search ON backend_technologies USING gin(to_tsvector('english', name)); +CREATE INDEX idx_stack_name_search ON price_based_stacks USING gin(to_tsvector('english', stack_name)); + +-- Composite indexes for common queries +CREATE INDEX idx_tech_pricing_cost_performance ON tech_pricing(total_cost_of_ownership_score, price_performance_ratio); +CREATE INDEX idx_stack_scale_complexity ON price_based_stacks(scalability_ceiling, maintenance_complexity); + +-- ===================================================== +-- VIEWS FOR EASIER QUERYING +-- ===================================================== + +-- View for complete stack information with pricing +CREATE OR REPLACE VIEW complete_stack_info AS +SELECT + pbs.id, + pbs.stack_name, + pt.tier_name, + pt.target_audience, + pbs.total_monthly_cost_usd, + pbs.total_setup_cost_usd, + pbs.frontend_tech, + pbs.backend_tech, + pbs.database_tech, + pbs.cloud_tech, + pbs.testing_tech, + pbs.mobile_tech, + pbs.devops_tech, + pbs.ai_ml_tech, + pbs.team_size_range, + pbs.development_time_months, + pbs.maintenance_complexity, + pbs.scalability_ceiling, + pbs.recommended_domains, + pbs.success_rate_percentage, + pbs.user_satisfaction_score +FROM price_based_stacks pbs +JOIN price_tiers pt ON pbs.price_tier_id = pt.id; + +-- View for technology comparison by category +CREATE OR REPLACE VIEW tech_comparison AS +SELECT + 'frontend' as category, + name, + maturity_score, + learning_curve, + performance_rating as rating, + strengths, + weaknesses +FROM frontend_technologies +UNION ALL +SELECT + 'backend' as category, + name, + maturity_score, + learning_curve, + performance_rating as rating, + strengths, + weaknesses +FROM backend_technologies +UNION ALL +SELECT + 'database' as category, + name, + maturity_score, + 'medium' as learning_curve, + performance_rating as rating, + strengths, + weaknesses +FROM database_technologies; + +-- View for price analysis +CREATE OR REPLACE VIEW price_analysis AS +SELECT + tp.tech_name, + tp.tech_category, + pt.tier_name, + tp.monthly_operational_cost_usd as monthly_cost, + tp.development_cost_usd + tp.training_cost_usd as initial_cost, + tp.total_cost_of_ownership_score, + tp.price_performance_ratio, + tp.cost_per_user_usd +FROM tech_pricing tp +JOIN price_tiers pt ON tp.price_tier_id = pt.id; + +-- ===================================================== +-- SAMPLE QUERIES FOR TESTING +-- ===================================================== + +-- Find all stacks within a budget range +/* +SELECT * FROM complete_stack_info +WHERE total_monthly_cost_usd BETWEEN 50 AND 200 +ORDER BY total_monthly_cost_usd; +*/ + +-- Get technology recommendations for a specific price tier +/* +SELECT DISTINCT tech_name, tech_category, monthly_operational_cost_usd +FROM tech_pricing tp +JOIN price_tiers pt ON tp.price_tier_id = pt.id +WHERE pt.tier_name = 'Startup Budget' +ORDER BY tech_category, monthly_operational_cost_usd; +*/ + +-- Find the most cost-effective stack for a specific domain +/* +SELECT * FROM complete_stack_info +WHERE 'saas' = ANY(recommended_domains) +ORDER BY total_monthly_cost_usd +LIMIT 5; +*/ + +-- Compare technologies by performance and cost +/* +SELECT * FROM price_analysis +WHERE tech_category = 'frontend' +ORDER BY price_performance_ratio DESC; +*/ + +-- ===================================================== +-- STORED PROCEDURES FOR COMMON OPERATIONS +-- ===================================================== + +-- Function to recommend stacks based on budget and requirements +CREATE OR REPLACE FUNCTION recommend_stacks( + budget_min DECIMAL DEFAULT 0, + budget_max DECIMAL DEFAULT 10000, + domain VARCHAR DEFAULT NULL, + team_size VARCHAR DEFAULT NULL, + experience_level VARCHAR DEFAULT NULL +) +RETURNS TABLE ( + stack_name VARCHAR, + monthly_cost DECIMAL, + setup_cost DECIMAL, + tier_name VARCHAR, + confidence_score INTEGER, + tech_stack TEXT, + recommendation_reason TEXT +) AS $$ +BEGIN + RETURN QUERY + SELECT + pbs.stack_name::VARCHAR, + pbs.total_monthly_cost_usd, + pbs.total_setup_cost_usd, + pt.tier_name::VARCHAR, + sr.confidence_score, + CONCAT(pbs.frontend_tech, ' + ', pbs.backend_tech, ' + ', pbs.database_tech, ' + ', pbs.cloud_tech)::TEXT as tech_stack, + array_to_string(sr.recommendation_reasons, ', ')::TEXT as recommendation_reason + FROM price_based_stacks pbs + JOIN price_tiers pt ON pbs.price_tier_id = pt.id + LEFT JOIN stack_recommendations sr ON pbs.id = sr.recommended_stack_id + WHERE pbs.total_monthly_cost_usd BETWEEN budget_min AND budget_max + AND (domain IS NULL OR domain = ANY(pbs.recommended_domains)) + AND (team_size IS NULL OR pbs.team_size_range = team_size) + AND (experience_level IS NULL OR sr.team_experience_level = experience_level OR sr.team_experience_level IS NULL) + ORDER BY sr.confidence_score DESC NULLS LAST, pbs.total_monthly_cost_usd ASC; +END; +$$ LANGUAGE plpgsql; + +-- Function to calculate total cost of ownership for a custom stack +CREATE OR REPLACE FUNCTION calculate_tco( + frontend_name VARCHAR, + backend_name VARCHAR, + database_name VARCHAR, + cloud_name VARCHAR, + months INTEGER DEFAULT 12 +) +RETURNS TABLE ( + total_setup_cost DECIMAL, + monthly_operational_cost DECIMAL, + total_yearly_cost DECIMAL, + cost_breakdown JSONB +) AS $$ +DECLARE + setup_cost DECIMAL := 0; + monthly_cost DECIMAL := 0; + breakdown JSONB; +BEGIN + -- Calculate costs from each technology + SELECT + COALESCE(SUM(tp.development_cost_usd + tp.training_cost_usd), 0), + COALESCE(SUM(tp.monthly_operational_cost_usd), 0), + jsonb_object_agg(tp.tech_name, jsonb_build_object( + 'setup_cost', tp.development_cost_usd + tp.training_cost_usd, + 'monthly_cost', tp.monthly_operational_cost_usd, + 'category', tp.tech_category + )) + INTO setup_cost, monthly_cost, breakdown + FROM tech_pricing tp + WHERE tp.tech_name IN (frontend_name, backend_name, database_name, cloud_name); + + RETURN QUERY + SELECT + setup_cost, + monthly_cost, + setup_cost + (monthly_cost * months), + breakdown; +END; +$$ LANGUAGE plpgsql; + +-- Function to find technology alternatives within budget +CREATE OR REPLACE FUNCTION find_alternatives( + tech_category VARCHAR, + current_tech VARCHAR, + max_monthly_cost DECIMAL DEFAULT NULL +) +RETURNS TABLE ( + alternative_name VARCHAR, + monthly_cost DECIMAL, + performance_rating INTEGER, + learning_curve VARCHAR, + cost_difference DECIMAL, + performance_difference INTEGER +) AS $$ +DECLARE + current_cost DECIMAL; + current_performance INTEGER; +BEGIN + -- Get current technology metrics + SELECT tp.monthly_operational_cost_usd INTO current_cost + FROM tech_pricing tp + WHERE tp.tech_name = current_tech AND tp.tech_category = tech_category; + + -- Get performance rating based on category + IF tech_category = 'frontend' THEN + SELECT ft.performance_rating INTO current_performance + FROM frontend_technologies ft WHERE ft.name = current_tech; + ELSIF tech_category = 'backend' THEN + SELECT bt.performance_rating INTO current_performance + FROM backend_technologies bt WHERE bt.name = current_tech; + ELSIF tech_category = 'database' THEN + SELECT dt.performance_rating INTO current_performance + FROM database_technologies dt WHERE dt.name = current_tech; + END IF; + + -- Return alternatives based on category + IF tech_category = 'frontend' THEN + RETURN QUERY + SELECT + ft.name::VARCHAR, + tp.monthly_operational_cost_usd, + ft.performance_rating, + ft.learning_curve::VARCHAR, + tp.monthly_operational_cost_usd - current_cost, + ft.performance_rating - current_performance + FROM frontend_technologies ft + JOIN tech_pricing tp ON ft.name = tp.tech_name + WHERE ft.name != current_tech + AND (max_monthly_cost IS NULL OR tp.monthly_operational_cost_usd <= max_monthly_cost) + ORDER BY ft.performance_rating DESC, tp.monthly_operational_cost_usd ASC; + + ELSIF tech_category = 'backend' THEN + RETURN QUERY + SELECT + bt.name::VARCHAR, + tp.monthly_operational_cost_usd, + bt.performance_rating, + bt.learning_curve::VARCHAR, + tp.monthly_operational_cost_usd - current_cost, + bt.performance_rating - current_performance + FROM backend_technologies bt + JOIN tech_pricing tp ON bt.name = tp.tech_name + WHERE bt.name != current_tech + AND (max_monthly_cost IS NULL OR tp.monthly_operational_cost_usd <= max_monthly_cost) + ORDER BY bt.performance_rating DESC, tp.monthly_operational_cost_usd ASC; + + ELSIF tech_category = 'database' THEN + RETURN QUERY + SELECT + dt.name::VARCHAR, + tp.monthly_operational_cost_usd, + dt.performance_rating, + 'medium'::VARCHAR as learning_curve, + tp.monthly_operational_cost_usd - current_cost, + dt.performance_rating - current_performance + FROM database_technologies dt + JOIN tech_pricing tp ON dt.name = tp.tech_name + WHERE dt.name != current_tech + AND (max_monthly_cost IS NULL OR tp.monthly_operational_cost_usd <= max_monthly_cost) + ORDER BY dt.performance_rating DESC, tp.monthly_operational_cost_usd ASC; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- ===================================================== +-- BUSINESS INTELLIGENCE VIEWS +-- ===================================================== + +-- Technology adoption and success rates +CREATE OR REPLACE VIEW tech_adoption_analysis AS +SELECT + tech_category, + tech_name, + COUNT(*) as stack_usage_count, + AVG(user_satisfaction_score) as avg_satisfaction, + AVG(success_rate_percentage) as avg_success_rate, + AVG(total_monthly_cost_usd) as avg_monthly_cost +FROM tech_pricing tp +JOIN price_based_stacks pbs ON ( + tp.tech_name = pbs.frontend_tech OR + tp.tech_name = pbs.backend_tech OR + tp.tech_name = pbs.database_tech OR + tp.tech_name = pbs.cloud_tech OR + tp.tech_name = pbs.testing_tech OR + tp.tech_name = pbs.devops_tech OR + tp.tech_name = pbs.ai_ml_tech +) +GROUP BY tech_category, tech_name +ORDER BY avg_success_rate DESC, avg_satisfaction DESC; + +-- Price tier effectiveness analysis +CREATE OR REPLACE VIEW price_tier_analysis AS +SELECT + pt.tier_name, + pt.target_audience, + COUNT(pbs.id) as available_stacks, + AVG(pbs.user_satisfaction_score) as avg_satisfaction, + AVG(pbs.success_rate_percentage) as avg_success_rate, + MIN(pbs.total_monthly_cost_usd) as min_monthly_cost, + MAX(pbs.total_monthly_cost_usd) as max_monthly_cost, + AVG(pbs.total_monthly_cost_usd) as avg_monthly_cost, + AVG(pbs.development_time_months) as avg_dev_time +FROM price_tiers pt +JOIN price_based_stacks pbs ON pt.id = pbs.price_tier_id +GROUP BY pt.id, pt.tier_name, pt.target_audience +ORDER BY pt.min_price_usd; + +-- Domain-specific stack recommendations +CREATE OR REPLACE VIEW domain_stack_analysis AS +SELECT + domain, + COUNT(*) as available_stacks, + AVG(total_monthly_cost_usd) as avg_monthly_cost, + AVG(user_satisfaction_score) as avg_satisfaction, + AVG(success_rate_percentage) as avg_success_rate, + array_agg(stack_name ORDER BY user_satisfaction_score DESC) as top_stacks +FROM ( + SELECT + unnest(recommended_domains) as domain, + stack_name, + total_monthly_cost_usd, + user_satisfaction_score, + success_rate_percentage + FROM price_based_stacks +) domain_stacks +GROUP BY domain +ORDER BY avg_satisfaction DESC; + +-- ===================================================== +-- TRIGGERS FOR DATA CONSISTENCY +-- ===================================================== + +-- Function to update stack costs when tech pricing changes +CREATE OR REPLACE FUNCTION update_stack_costs() +RETURNS TRIGGER AS $$ +BEGIN + -- Update all stacks that use the modified technology + UPDATE price_based_stacks + SET + total_monthly_cost_usd = ( + SELECT COALESCE( + (SELECT monthly_operational_cost_usd FROM tech_pricing WHERE tech_name = frontend_tech AND tech_category = 'frontend') + + (SELECT monthly_operational_cost_usd FROM tech_pricing WHERE tech_name = backend_tech AND tech_category = 'backend') + + (SELECT monthly_operational_cost_usd FROM tech_pricing WHERE tech_name = database_tech AND tech_category = 'database') + + (SELECT monthly_operational_cost_usd FROM tech_pricing WHERE tech_name = cloud_tech AND tech_category = 'cloud'), 0 + ) + ), + total_setup_cost_usd = ( + SELECT COALESCE( + (SELECT development_cost_usd + training_cost_usd FROM tech_pricing WHERE tech_name = frontend_tech AND tech_category = 'frontend') + + (SELECT development_cost_usd + training_cost_usd FROM tech_pricing WHERE tech_name = backend_tech AND tech_category = 'backend') + + (SELECT development_cost_usd + training_cost_usd FROM tech_pricing WHERE tech_name = database_tech AND tech_category = 'database') + + (SELECT development_cost_usd + training_cost_usd FROM tech_pricing WHERE tech_name = cloud_tech AND tech_category = 'cloud'), 0 + ) + ) + WHERE frontend_tech = NEW.tech_name + OR backend_tech = NEW.tech_name + OR database_tech = NEW.tech_name + OR cloud_tech = NEW.tech_name + OR testing_tech = NEW.tech_name + OR devops_tech = NEW.tech_name + OR ai_ml_tech = NEW.tech_name; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Create trigger +CREATE TRIGGER update_stack_costs_trigger + AFTER UPDATE ON tech_pricing + FOR EACH ROW + EXECUTE FUNCTION update_stack_costs(); + +-- ===================================================== +-- SAMPLE DATA QUERIES AND USAGE EXAMPLES +-- ===================================================== + +/* +-- Example 1: Find stacks within budget for a SaaS startup +SELECT * FROM recommend_stacks(50, 200, 'saas', '3-5', 'intermediate'); + +-- Example 2: Calculate TCO for a custom stack +SELECT * FROM calculate_tco('React', 'Node.js', 'PostgreSQL', 'Vercel', 12); + +-- Example 3: Find alternatives to expensive cloud providers +SELECT * FROM find_alternatives('cloud', 'AWS', 100); + +-- Example 4: Get all micro-budget friendly technologies +SELECT tp.tech_name, tp.tech_category, tp.monthly_operational_cost_usd, pt.tier_name +FROM tech_pricing tp +JOIN price_tiers pt ON tp.price_tier_id = pt.id +WHERE pt.tier_name = 'Micro Budget' +ORDER BY tp.tech_category, tp.monthly_operational_cost_usd; + +-- Example 5: Find the most cost-effective stack by domain +SELECT + unnest(recommended_domains) as domain, + stack_name, + total_monthly_cost_usd, + user_satisfaction_score, + success_rate_percentage +FROM price_based_stacks +WHERE 'ecommerce' = ANY(recommended_domains) +ORDER BY total_monthly_cost_usd +LIMIT 3; + +-- Example 6: Technology performance vs cost analysis +SELECT + tech_name, + tech_category, + monthly_operational_cost_usd, + price_performance_ratio, + total_cost_of_ownership_score +FROM tech_pricing +WHERE tech_category = 'frontend' +ORDER BY price_performance_ratio DESC; + +-- Example 7: Get complete stack details with all technologies +SELECT + csi.*, + CONCAT( + 'Frontend: ', frontend_tech, ' | ', + 'Backend: ', backend_tech, ' | ', + 'Database: ', database_tech, ' | ', + 'Cloud: ', cloud_tech + ) as full_stack_description +FROM complete_stack_info csi +WHERE tier_name = 'Startup Budget' +ORDER BY total_monthly_cost_usd; +*/ + +-- ===================================================== +-- FINAL SETUP VERIFICATION +-- ===================================================== + +-- Verify data integrity +DO $$ +DECLARE + table_count INTEGER; + total_records INTEGER; +BEGIN + -- Count tables + SELECT COUNT(*) INTO table_count + FROM information_schema.tables + WHERE table_schema = 'public' AND table_type = 'BASE TABLE'; + + -- Count total records across main tables + SELECT + (SELECT COUNT(*) FROM frontend_technologies) + + (SELECT COUNT(*) FROM backend_technologies) + + (SELECT COUNT(*) FROM database_technologies) + + (SELECT COUNT(*) FROM cloud_technologies) + + (SELECT COUNT(*) FROM testing_technologies) + + (SELECT COUNT(*) FROM mobile_technologies) + + (SELECT COUNT(*) FROM devops_technologies) + + (SELECT COUNT(*) FROM ai_ml_technologies) + + (SELECT COUNT(*) FROM tech_pricing) + + (SELECT COUNT(*) FROM price_based_stacks) + + (SELECT COUNT(*) FROM stack_recommendations) + INTO total_records; + + RAISE NOTICE 'Database setup completed successfully!'; + RAISE NOTICE 'Created % tables with % total records', table_count, total_records; + RAISE NOTICE 'Ready for Neo4j migration and enhanced tech stack recommendations'; +END $$; + +-- ===================================================== +-- BUSINESS/PRODUCTIVITY TOOLS TABLE +-- ===================================================== + +-- Create tools table for business/productivity tools recommendations +CREATE TABLE tools ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + category VARCHAR(100) NOT NULL, + description TEXT, + primary_use_cases TEXT, + popularity_score INT CHECK (popularity_score >= 1 AND popularity_score <= 100), + created_at TIMESTAMP DEFAULT now() +); + +-- Create indexes for better performance +CREATE INDEX idx_tools_category ON tools(category); +CREATE INDEX idx_tools_popularity ON tools(popularity_score); +CREATE INDEX idx_tools_name_search ON tools USING gin(to_tsvector('english', name)); + +-- ===================================================== +-- SEED DATA - BUSINESS/PRODUCTIVITY TOOLS +-- ===================================================== + +INSERT INTO tools (name, category, description, primary_use_cases, popularity_score) VALUES + +-- E-commerce Tools +('Shopify', 'e-commerce', 'Complete e-commerce platform for online stores with built-in payment processing, inventory management, and marketing tools', 'Online store creation, product management, order processing, payment handling, inventory tracking, customer management, marketing automation', 95), +('WooCommerce', 'e-commerce', 'WordPress plugin that transforms any WordPress site into a fully functional e-commerce store', 'WordPress e-commerce, product catalog, payment processing, order management, inventory control, customer accounts', 90), +('Magento', 'e-commerce', 'Enterprise-grade e-commerce platform with advanced customization and scalability features', 'Large-scale e-commerce, B2B commerce, multi-store management, advanced catalog management, enterprise integrations', 85), +('BigCommerce', 'e-commerce', 'SaaS e-commerce platform with built-in features for growing online businesses', 'Online store setup, payment processing, SEO optimization, multi-channel selling, inventory management', 80), +('Squarespace Commerce', 'e-commerce', 'Website builder with integrated e-commerce capabilities for small to medium businesses', 'Website creation with e-commerce, product showcase, payment processing, inventory management, customer management', 75), +('PrestaShop', 'e-commerce', 'Open-source e-commerce platform with extensive customization options', 'Custom e-commerce solutions, multi-language stores, advanced product management, payment gateway integration', 70), + +-- CRM Tools +('HubSpot CRM', 'crm', 'Free CRM platform with sales, marketing, and customer service tools for growing businesses', 'Lead management, contact tracking, sales pipeline management, email marketing, customer support, analytics', 95), +('Salesforce CRM', 'crm', 'Enterprise-grade CRM platform with extensive customization and integration capabilities', 'Enterprise sales management, customer relationship management, marketing automation, analytics, custom applications', 98), +('Zoho CRM', 'crm', 'Comprehensive CRM solution with sales, marketing, and customer support features', 'Lead and contact management, sales automation, email marketing, customer support, analytics, mobile access', 85), +('Pipedrive', 'crm', 'Sales-focused CRM with visual pipeline management and automation features', 'Sales pipeline management, deal tracking, contact management, email integration, sales reporting', 80), +('Freshworks CRM', 'crm', 'Modern CRM platform with AI-powered insights and automation capabilities', 'Lead management, contact tracking, sales automation, email marketing, customer support, AI insights', 75), +('Monday.com CRM', 'crm', 'Visual CRM platform with customizable workflows and team collaboration features', 'Sales pipeline management, contact tracking, team collaboration, project management, automation', 70), + +-- Analytics Tools +('Google Analytics', 'analytics', 'Web analytics service that tracks and reports website traffic and user behavior', 'Website traffic analysis, user behavior tracking, conversion tracking, audience insights, performance monitoring', 98), +('Mixpanel', 'analytics', 'Advanced analytics platform focused on user behavior and product analytics', 'User behavior analysis, funnel analysis, cohort analysis, A/B testing, product analytics, retention tracking', 85), +('Amplitude', 'analytics', 'Product analytics platform for understanding user behavior and driving growth', 'User journey analysis, behavioral analytics, cohort analysis, retention analysis, feature adoption tracking', 80), +('Hotjar', 'analytics', 'User behavior analytics tool with heatmaps, session recordings, and feedback collection', 'Heatmap analysis, session recordings, user feedback, conversion optimization, user experience analysis', 75), +('Tableau', 'analytics', 'Business intelligence and data visualization platform for advanced analytics', 'Data visualization, business intelligence, advanced analytics, reporting, data exploration, dashboard creation', 90), +('Power BI', 'analytics', 'Microsoft business analytics service for data visualization and business intelligence', 'Data visualization, business intelligence, reporting, dashboard creation, data modeling, advanced analytics', 85), + +-- Payment Processing +('Stripe', 'payments', 'Online payment processing platform for internet businesses with developer-friendly APIs', 'Online payments, subscription billing, marketplace payments, international payments, fraud prevention, API integration', 95), +('PayPal', 'payments', 'Global payment platform supporting online payments, money transfers, and business solutions', 'Online payments, money transfers, business payments, international transactions, mobile payments, invoicing', 90), +('Razorpay', 'payments', 'Payment gateway solution designed for Indian businesses with local payment methods', 'Indian payment processing, UPI payments, card payments, subscription billing, payment links, business banking', 85), +('Square', 'payments', 'Payment processing platform with point-of-sale and online payment solutions', 'Point-of-sale payments, online payments, invoicing, business management, payment analytics, mobile payments', 80), +('Adyen', 'payments', 'Global payment platform for enterprise businesses with advanced fraud prevention', 'Enterprise payments, global payment processing, fraud prevention, payment optimization, unified commerce', 75), +('Braintree', 'payments', 'PayPal-owned payment platform with advanced features for online and mobile payments', 'Online payments, mobile payments, marketplace payments, subscription billing, fraud protection, global payments', 70), + +-- Communication Tools +('Slack', 'communication', 'Business communication platform with channels, direct messaging, and app integrations', 'Team communication, project collaboration, file sharing, app integrations, video calls, workflow automation', 95), +('Microsoft Teams', 'communication', 'Collaboration platform with chat, video meetings, and Microsoft 365 integration', 'Team communication, video conferencing, file collaboration, Microsoft 365 integration, project management', 90), +('Discord', 'communication', 'Voice, video, and text communication platform popular with gaming and tech communities', 'Community building, voice/video calls, text chat, server management, bot integration, streaming', 85), +('Zoom', 'communication', 'Video conferencing platform with meeting, webinar, and collaboration features', 'Video meetings, webinars, screen sharing, recording, virtual events, team collaboration', 90), +('Telegram', 'communication', 'Cloud-based messaging platform with group chats, channels, and bot support', 'Messaging, group chats, channels, file sharing, bot integration, voice/video calls, cloud storage', 80), +('WhatsApp Business', 'communication', 'Business messaging platform for customer communication and marketing', 'Customer communication, business messaging, marketing campaigns, catalog sharing, payment integration', 75), + +-- Project Management +('Trello', 'project-management', 'Visual project management tool using boards, lists, and cards for task organization', 'Task management, project tracking, team collaboration, workflow visualization, deadline management, progress tracking', 85), +('Jira', 'project-management', 'Agile project management tool designed for software development teams', 'Agile project management, issue tracking, sprint planning, bug tracking, release management, team collaboration', 90), +('Asana', 'project-management', 'Work management platform for teams to organize, track, and manage their work', 'Task management, project planning, team collaboration, workflow automation, progress tracking, deadline management', 85), +('Monday.com', 'project-management', 'Work operating system with customizable workflows and visual project management', 'Project management, team collaboration, workflow automation, resource management, time tracking, reporting', 80), +('Notion', 'project-management', 'All-in-one workspace combining notes, docs, wikis, and project management', 'Note-taking, documentation, project management, team collaboration, knowledge management, task tracking', 85), +('Basecamp', 'project-management', 'Project management and team communication platform with simple, organized interface', 'Project management, team communication, file sharing, scheduling, progress tracking, client collaboration', 75), + +-- Marketing Tools +('Mailchimp', 'marketing', 'Email marketing and automation platform with audience management and analytics', 'Email marketing, marketing automation, audience segmentation, campaign management, analytics, landing pages', 90), +('Klaviyo', 'marketing', 'E-commerce marketing automation platform with advanced segmentation and personalization', 'E-commerce marketing, email automation, SMS marketing, customer segmentation, personalization, analytics', 85), +('SEMrush', 'marketing', 'Digital marketing toolkit with SEO, PPC, content, and social media marketing tools', 'SEO analysis, keyword research, competitor analysis, PPC management, content marketing, social media management', 80), +('HubSpot Marketing', 'marketing', 'Inbound marketing platform with lead generation, email marketing, and analytics', 'Lead generation, email marketing, marketing automation, landing pages, analytics, CRM integration', 85), +('Hootsuite', 'marketing', 'Social media management platform for scheduling, monitoring, and analytics', 'Social media scheduling, content management, social listening, analytics, team collaboration, brand monitoring', 80), +('Canva', 'marketing', 'Graphic design platform with templates and tools for creating marketing materials', 'Graphic design, social media graphics, presentations, marketing materials, brand assets, team collaboration', 90), + +-- Design & Content Creation +('Figma', 'design', 'Collaborative interface design tool with real-time editing and prototyping features', 'UI/UX design, prototyping, design systems, team collaboration, design handoff, component libraries', 95), +('Adobe Creative Suite', 'design', 'Comprehensive suite of creative tools for design, photography, and video production', 'Graphic design, photo editing, video production, web design, illustration, animation, print design', 90), +('Sketch', 'design', 'Digital design toolkit for creating user interfaces and user experiences', 'UI design, prototyping, design systems, vector graphics, collaboration, design handoff', 85), +('InVision', 'design', 'Digital product design platform with prototyping and collaboration features', 'Prototyping, design collaboration, user testing, design handoff, design systems, workflow management', 80), +('Adobe XD', 'design', 'User experience design tool with prototyping and collaboration capabilities', 'UX design, prototyping, design systems, collaboration, user testing, design handoff', 85), +('Framer', 'design', 'Interactive design tool for creating high-fidelity prototypes and animations', 'Interactive prototyping, animation design, responsive design, user testing, design handoff', 75), + +-- Development & DevOps +('GitHub', 'development', 'Code hosting platform with version control, collaboration, and project management features', 'Code hosting, version control, collaboration, project management, CI/CD, code review, issue tracking', 95), +('GitLab', 'development', 'DevOps platform with Git repository management, CI/CD, and project management', 'Version control, CI/CD, project management, code review, issue tracking, DevOps automation', 85), +('Bitbucket', 'development', 'Git repository management solution with built-in CI/CD and collaboration tools', 'Version control, code collaboration, CI/CD, project management, code review, issue tracking', 80), +('Jira Software', 'development', 'Agile project management tool specifically designed for software development teams', 'Agile project management, sprint planning, issue tracking, release management, team collaboration', 90), +('Confluence', 'development', 'Team collaboration and documentation platform for knowledge sharing and project documentation', 'Documentation, knowledge management, team collaboration, project documentation, meeting notes, wikis', 85), +('Jenkins', 'development', 'Open-source automation server for building, testing, and deploying software', 'CI/CD automation, build automation, testing automation, deployment automation, pipeline management', 80), + +-- Customer Support +('Zendesk', 'customer-support', 'Customer service platform with ticketing, knowledge base, and communication tools', 'Customer support, ticket management, knowledge base, live chat, customer communication, analytics', 90), +('Intercom', 'customer-support', 'Customer messaging platform with support, engagement, and marketing features', 'Customer support, live chat, messaging, customer engagement, marketing automation, analytics', 85), +('Freshdesk', 'customer-support', 'Cloud-based customer support software with ticketing and communication features', 'Customer support, ticket management, knowledge base, live chat, customer communication, automation', 80), +('Help Scout', 'customer-support', 'Customer service platform focused on team collaboration and customer satisfaction', 'Customer support, ticket management, team collaboration, customer communication, knowledge base, analytics', 75), +('LiveChat', 'customer-support', 'Live chat software for customer support and sales with automation features', 'Live chat, customer support, sales chat, chat automation, visitor tracking, analytics', 70), +('Crisp', 'customer-support', 'Customer messaging platform with live chat, email, and social media integration', 'Live chat, customer support, email integration, social media integration, visitor tracking, analytics', 65), + +-- Business Intelligence & Reporting +('Google Data Studio', 'business-intelligence', 'Free data visualization and reporting tool that integrates with Google services', 'Data visualization, reporting, dashboard creation, Google Analytics integration, data exploration', 80), +('Looker', 'business-intelligence', 'Business intelligence platform with data modeling and visualization capabilities', 'Business intelligence, data modeling, visualization, reporting, analytics, data exploration', 85), +('Qlik Sense', 'business-intelligence', 'Self-service data visualization and business intelligence platform', 'Data visualization, business intelligence, self-service analytics, reporting, data exploration', 80), +('Sisense', 'business-intelligence', 'Business intelligence platform with embedded analytics and data visualization', 'Business intelligence, embedded analytics, data visualization, reporting, data modeling', 75), +('Domo', 'business-intelligence', 'Cloud-based business intelligence platform with real-time data visualization', 'Business intelligence, real-time analytics, data visualization, reporting, dashboard creation', 70), +('Metabase', 'business-intelligence', 'Open-source business intelligence tool with easy-to-use interface for data exploration', 'Business intelligence, data exploration, reporting, dashboard creation, SQL queries, data visualization', 75), + +-- Accounting & Finance +('QuickBooks', 'accounting', 'Accounting software for small and medium businesses with invoicing and expense tracking', 'Accounting, invoicing, expense tracking, financial reporting, tax preparation, payroll management', 90), +('Xero', 'accounting', 'Cloud-based accounting software for small businesses with bank reconciliation and reporting', 'Accounting, bank reconciliation, invoicing, expense tracking, financial reporting, inventory management', 85), +('FreshBooks', 'accounting', 'Cloud-based accounting software designed for small businesses and freelancers', 'Accounting, invoicing, expense tracking, time tracking, project management, financial reporting', 80), +('Wave', 'accounting', 'Free accounting software for small businesses with invoicing and receipt scanning', 'Accounting, invoicing, expense tracking, receipt scanning, financial reporting, tax preparation', 75), +('Sage', 'accounting', 'Business management software with accounting, payroll, and HR features', 'Accounting, payroll management, HR management, financial reporting, inventory management, business intelligence', 80), +('Zoho Books', 'accounting', 'Online accounting software with invoicing, expense tracking, and financial reporting', 'Accounting, invoicing, expense tracking, financial reporting, inventory management, project management', 75); + +-- ===================================================== +-- VERIFICATION QUERIES FOR TOOLS TABLE +-- ===================================================== + +-- Verify data insertion +DO $$ +DECLARE + tool_count INTEGER; + category_count INTEGER; +BEGIN + SELECT COUNT(*) INTO tool_count FROM tools; + SELECT COUNT(DISTINCT category) INTO category_count FROM tools; + + RAISE NOTICE 'Tools table migration completed successfully!'; + RAISE NOTICE 'Created tools table with % categories and % total tools', category_count, tool_count; + RAISE NOTICE 'Ready for domain-based tool recommendations'; +END $$; + +-- ===================================================== +-- NEO4J MIGRATION PREPARATION NOTES +-- ===================================================== + +/* +For future Neo4j migration, consider these relationships: +1. Technology -> BELONGS_TO -> Category +2. Technology -> HAS_PRICING -> PriceTier +3. Technology -> COMPATIBLE_WITH -> Technology +4. Stack -> INCLUDES -> Technology +5. Stack -> SUITABLE_FOR -> Domain +6. Stack -> RECOMMENDED_FOR -> PriceTier +7. Technology -> ALTERNATIVE_TO -> Technology +8. Stack -> COMPETES_WITH -> Stack +9. Tools -> RECOMMENDED_FOR -> Domain +10. Tools -> CATEGORY_MATCHES -> Technology + +Key nodes: +- Technology (with all properties) +- PriceTier (budget categories) +- Domain (business domains) +- Stack (technology combinations) +- Team (size and experience) +- Tools (business/productivity tools) + +This relational structure provides a solid foundation for graph database migration +while maintaining referential integrity and query performance. +*/ \ No newline at end of file diff --git a/services/tech-stack-selector/db/002_tools_migration.sql b/services/tech-stack-selector/db/002_tools_migration.sql new file mode 100644 index 0000000..af5b443 --- /dev/null +++ b/services/tech-stack-selector/db/002_tools_migration.sql @@ -0,0 +1,162 @@ +-- ===================================================== +-- Tools Table Migration +-- Business/Productivity Tools for Domain-Based Recommendations +-- ===================================================== + +-- Create tools table +CREATE TABLE tools ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + category VARCHAR(100) NOT NULL, + description TEXT, + primary_use_cases TEXT, + popularity_score INT CHECK (popularity_score >= 1 AND popularity_score <= 100), + created_at TIMESTAMP DEFAULT now() +); + +-- Create indexes for better performance +CREATE INDEX idx_tools_category ON tools(category); +CREATE INDEX idx_tools_popularity ON tools(popularity_score); +CREATE INDEX idx_tools_name_search ON tools USING gin(to_tsvector('english', name)); + +-- ===================================================== +-- SEED DATA - BUSINESS/PRODUCTIVITY TOOLS +-- ===================================================== + +INSERT INTO tools (name, category, description, primary_use_cases, popularity_score) VALUES + +-- E-commerce Tools +('Shopify', 'e-commerce', 'Complete e-commerce platform for online stores with built-in payment processing, inventory management, and marketing tools', 'Online store creation, product management, order processing, payment handling, inventory tracking, customer management, marketing automation', 95), +('WooCommerce', 'e-commerce', 'WordPress plugin that transforms any WordPress site into a fully functional e-commerce store', 'WordPress e-commerce, product catalog, payment processing, order management, inventory control, customer accounts', 90), +('Magento', 'e-commerce', 'Enterprise-grade e-commerce platform with advanced customization and scalability features', 'Large-scale e-commerce, B2B commerce, multi-store management, advanced catalog management, enterprise integrations', 85), +('BigCommerce', 'e-commerce', 'SaaS e-commerce platform with built-in features for growing online businesses', 'Online store setup, payment processing, SEO optimization, multi-channel selling, inventory management', 80), +('Squarespace Commerce', 'e-commerce', 'Website builder with integrated e-commerce capabilities for small to medium businesses', 'Website creation with e-commerce, product showcase, payment processing, inventory management, customer management', 75), +('PrestaShop', 'e-commerce', 'Open-source e-commerce platform with extensive customization options', 'Custom e-commerce solutions, multi-language stores, advanced product management, payment gateway integration', 70), + +-- CRM Tools +('HubSpot CRM', 'crm', 'Free CRM platform with sales, marketing, and customer service tools for growing businesses', 'Lead management, contact tracking, sales pipeline management, email marketing, customer support, analytics', 95), +('Salesforce CRM', 'crm', 'Enterprise-grade CRM platform with extensive customization and integration capabilities', 'Enterprise sales management, customer relationship management, marketing automation, analytics, custom applications', 98), +('Zoho CRM', 'crm', 'Comprehensive CRM solution with sales, marketing, and customer support features', 'Lead and contact management, sales automation, email marketing, customer support, analytics, mobile access', 85), +('Pipedrive', 'crm', 'Sales-focused CRM with visual pipeline management and automation features', 'Sales pipeline management, deal tracking, contact management, email integration, sales reporting', 80), +('Freshworks CRM', 'crm', 'Modern CRM platform with AI-powered insights and automation capabilities', 'Lead management, contact tracking, sales automation, email marketing, customer support, AI insights', 75), +('Monday.com CRM', 'crm', 'Visual CRM platform with customizable workflows and team collaboration features', 'Sales pipeline management, contact tracking, team collaboration, project management, automation', 70), + +-- Analytics Tools +('Google Analytics', 'analytics', 'Web analytics service that tracks and reports website traffic and user behavior', 'Website traffic analysis, user behavior tracking, conversion tracking, audience insights, performance monitoring', 98), +('Mixpanel', 'analytics', 'Advanced analytics platform focused on user behavior and product analytics', 'User behavior analysis, funnel analysis, cohort analysis, A/B testing, product analytics, retention tracking', 85), +('Amplitude', 'analytics', 'Product analytics platform for understanding user behavior and driving growth', 'User journey analysis, behavioral analytics, cohort analysis, retention analysis, feature adoption tracking', 80), +('Hotjar', 'analytics', 'User behavior analytics tool with heatmaps, session recordings, and feedback collection', 'Heatmap analysis, session recordings, user feedback, conversion optimization, user experience analysis', 75), +('Tableau', 'analytics', 'Business intelligence and data visualization platform for advanced analytics', 'Data visualization, business intelligence, advanced analytics, reporting, data exploration, dashboard creation', 90), +('Power BI', 'analytics', 'Microsoft business analytics service for data visualization and business intelligence', 'Data visualization, business intelligence, reporting, dashboard creation, data modeling, advanced analytics', 85), + +-- Payment Processing +('Stripe', 'payments', 'Online payment processing platform for internet businesses with developer-friendly APIs', 'Online payments, subscription billing, marketplace payments, international payments, fraud prevention, API integration', 95), +('PayPal', 'payments', 'Global payment platform supporting online payments, money transfers, and business solutions', 'Online payments, money transfers, business payments, international transactions, mobile payments, invoicing', 90), +('Razorpay', 'payments', 'Payment gateway solution designed for Indian businesses with local payment methods', 'Indian payment processing, UPI payments, card payments, subscription billing, payment links, business banking', 85), +('Square', 'payments', 'Payment processing platform with point-of-sale and online payment solutions', 'Point-of-sale payments, online payments, invoicing, business management, payment analytics, mobile payments', 80), +('Adyen', 'payments', 'Global payment platform for enterprise businesses with advanced fraud prevention', 'Enterprise payments, global payment processing, fraud prevention, payment optimization, unified commerce', 75), +('Braintree', 'payments', 'PayPal-owned payment platform with advanced features for online and mobile payments', 'Online payments, mobile payments, marketplace payments, subscription billing, fraud protection, global payments', 70), + +-- Communication Tools +('Slack', 'communication', 'Business communication platform with channels, direct messaging, and app integrations', 'Team communication, project collaboration, file sharing, app integrations, video calls, workflow automation', 95), +('Microsoft Teams', 'communication', 'Collaboration platform with chat, video meetings, and Microsoft 365 integration', 'Team communication, video conferencing, file collaboration, Microsoft 365 integration, project management', 90), +('Discord', 'communication', 'Voice, video, and text communication platform popular with gaming and tech communities', 'Community building, voice/video calls, text chat, server management, bot integration, streaming', 85), +('Zoom', 'communication', 'Video conferencing platform with meeting, webinar, and collaboration features', 'Video meetings, webinars, screen sharing, recording, virtual events, team collaboration', 90), +('Telegram', 'communication', 'Cloud-based messaging platform with group chats, channels, and bot support', 'Messaging, group chats, channels, file sharing, bot integration, voice/video calls, cloud storage', 80), +('WhatsApp Business', 'communication', 'Business messaging platform for customer communication and marketing', 'Customer communication, business messaging, marketing campaigns, catalog sharing, payment integration', 75), + +-- Project Management +('Trello', 'project-management', 'Visual project management tool using boards, lists, and cards for task organization', 'Task management, project tracking, team collaboration, workflow visualization, deadline management, progress tracking', 85), +('Jira', 'project-management', 'Agile project management tool designed for software development teams', 'Agile project management, issue tracking, sprint planning, bug tracking, release management, team collaboration', 90), +('Asana', 'project-management', 'Work management platform for teams to organize, track, and manage their work', 'Task management, project planning, team collaboration, workflow automation, progress tracking, deadline management', 85), +('Monday.com', 'project-management', 'Work operating system with customizable workflows and visual project management', 'Project management, team collaboration, workflow automation, resource management, time tracking, reporting', 80), +('Notion', 'project-management', 'All-in-one workspace combining notes, docs, wikis, and project management', 'Note-taking, documentation, project management, team collaboration, knowledge management, task tracking', 85), +('Basecamp', 'project-management', 'Project management and team communication platform with simple, organized interface', 'Project management, team communication, file sharing, scheduling, progress tracking, client collaboration', 75), + +-- Marketing Tools +('Mailchimp', 'marketing', 'Email marketing and automation platform with audience management and analytics', 'Email marketing, marketing automation, audience segmentation, campaign management, analytics, landing pages', 90), +('Klaviyo', 'marketing', 'E-commerce marketing automation platform with advanced segmentation and personalization', 'E-commerce marketing, email automation, SMS marketing, customer segmentation, personalization, analytics', 85), +('SEMrush', 'marketing', 'Digital marketing toolkit with SEO, PPC, content, and social media marketing tools', 'SEO analysis, keyword research, competitor analysis, PPC management, content marketing, social media management', 80), +('HubSpot Marketing', 'marketing', 'Inbound marketing platform with lead generation, email marketing, and analytics', 'Lead generation, email marketing, marketing automation, landing pages, analytics, CRM integration', 85), +('Hootsuite', 'marketing', 'Social media management platform for scheduling, monitoring, and analytics', 'Social media scheduling, content management, social listening, analytics, team collaboration, brand monitoring', 80), +('Canva', 'marketing', 'Graphic design platform with templates and tools for creating marketing materials', 'Graphic design, social media graphics, presentations, marketing materials, brand assets, team collaboration', 90), + +-- Design & Content Creation +('Figma', 'design', 'Collaborative interface design tool with real-time editing and prototyping features', 'UI/UX design, prototyping, design systems, team collaboration, design handoff, component libraries', 95), +('Adobe Creative Suite', 'design', 'Comprehensive suite of creative tools for design, photography, and video production', 'Graphic design, photo editing, video production, web design, illustration, animation, print design', 90), +('Sketch', 'design', 'Digital design toolkit for creating user interfaces and user experiences', 'UI design, prototyping, design systems, vector graphics, collaboration, design handoff', 85), +('InVision', 'design', 'Digital product design platform with prototyping and collaboration features', 'Prototyping, design collaboration, user testing, design handoff, design systems, workflow management', 80), +('Adobe XD', 'design', 'User experience design tool with prototyping and collaboration capabilities', 'UX design, prototyping, design systems, collaboration, user testing, design handoff', 85), +('Framer', 'design', 'Interactive design tool for creating high-fidelity prototypes and animations', 'Interactive prototyping, animation design, responsive design, user testing, design handoff', 75), + +-- Development & DevOps +('GitHub', 'development', 'Code hosting platform with version control, collaboration, and project management features', 'Code hosting, version control, collaboration, project management, CI/CD, code review, issue tracking', 95), +('GitLab', 'development', 'DevOps platform with Git repository management, CI/CD, and project management', 'Version control, CI/CD, project management, code review, issue tracking, DevOps automation', 85), +('Bitbucket', 'development', 'Git repository management solution with built-in CI/CD and collaboration tools', 'Version control, code collaboration, CI/CD, project management, code review, issue tracking', 80), +('Jira Software', 'development', 'Agile project management tool specifically designed for software development teams', 'Agile project management, sprint planning, issue tracking, release management, team collaboration', 90), +('Confluence', 'development', 'Team collaboration and documentation platform for knowledge sharing and project documentation', 'Documentation, knowledge management, team collaboration, project documentation, meeting notes, wikis', 85), +('Jenkins', 'development', 'Open-source automation server for building, testing, and deploying software', 'CI/CD automation, build automation, testing automation, deployment automation, pipeline management', 80), + +-- Customer Support +('Zendesk', 'customer-support', 'Customer service platform with ticketing, knowledge base, and communication tools', 'Customer support, ticket management, knowledge base, live chat, customer communication, analytics', 90), +('Intercom', 'customer-support', 'Customer messaging platform with support, engagement, and marketing features', 'Customer support, live chat, messaging, customer engagement, marketing automation, analytics', 85), +('Freshdesk', 'customer-support', 'Cloud-based customer support software with ticketing and communication features', 'Customer support, ticket management, knowledge base, live chat, customer communication, automation', 80), +('Help Scout', 'customer-support', 'Customer service platform focused on team collaboration and customer satisfaction', 'Customer support, ticket management, team collaboration, customer communication, knowledge base, analytics', 75), +('LiveChat', 'customer-support', 'Live chat software for customer support and sales with automation features', 'Live chat, customer support, sales chat, chat automation, visitor tracking, analytics', 70), +('Crisp', 'customer-support', 'Customer messaging platform with live chat, email, and social media integration', 'Live chat, customer support, email integration, social media integration, visitor tracking, analytics', 65), + +-- Business Intelligence & Reporting +('Google Data Studio', 'business-intelligence', 'Free data visualization and reporting tool that integrates with Google services', 'Data visualization, reporting, dashboard creation, Google Analytics integration, data exploration', 80), +('Looker', 'business-intelligence', 'Business intelligence platform with data modeling and visualization capabilities', 'Business intelligence, data modeling, visualization, reporting, analytics, data exploration', 85), +('Qlik Sense', 'business-intelligence', 'Self-service data visualization and business intelligence platform', 'Data visualization, business intelligence, self-service analytics, reporting, data exploration', 80), +('Sisense', 'business-intelligence', 'Business intelligence platform with embedded analytics and data visualization', 'Business intelligence, embedded analytics, data visualization, reporting, data modeling', 75), +('Domo', 'business-intelligence', 'Cloud-based business intelligence platform with real-time data visualization', 'Business intelligence, real-time analytics, data visualization, reporting, dashboard creation', 70), +('Metabase', 'business-intelligence', 'Open-source business intelligence tool with easy-to-use interface for data exploration', 'Business intelligence, data exploration, reporting, dashboard creation, SQL queries, data visualization', 75), + +-- Accounting & Finance +('QuickBooks', 'accounting', 'Accounting software for small and medium businesses with invoicing and expense tracking', 'Accounting, invoicing, expense tracking, financial reporting, tax preparation, payroll management', 90), +('Xero', 'accounting', 'Cloud-based accounting software for small businesses with bank reconciliation and reporting', 'Accounting, bank reconciliation, invoicing, expense tracking, financial reporting, inventory management', 85), +('FreshBooks', 'accounting', 'Cloud-based accounting software designed for small businesses and freelancers', 'Accounting, invoicing, expense tracking, time tracking, project management, financial reporting', 80), +('Wave', 'accounting', 'Free accounting software for small businesses with invoicing and receipt scanning', 'Accounting, invoicing, expense tracking, receipt scanning, financial reporting, tax preparation', 75), +('Sage', 'accounting', 'Business management software with accounting, payroll, and HR features', 'Accounting, payroll management, HR management, financial reporting, inventory management, business intelligence', 80), +('Zoho Books', 'accounting', 'Online accounting software with invoicing, expense tracking, and financial reporting', 'Accounting, invoicing, expense tracking, financial reporting, inventory management, project management', 75); + +-- ===================================================== +-- VERIFICATION QUERIES +-- ===================================================== + +-- Verify data insertion +SELECT + category, + COUNT(*) as tool_count, + AVG(popularity_score) as avg_popularity +FROM tools +GROUP BY category +ORDER BY tool_count DESC; + +-- Example query: Get tools by category +SELECT name, description, popularity_score +FROM tools +WHERE category = 'e-commerce' +ORDER BY popularity_score DESC; + +-- Example query: Search for tools by use case +SELECT name, category, primary_use_cases +FROM tools +WHERE primary_use_cases ILIKE '%payment%' +ORDER BY popularity_score DESC; + +-- ===================================================== +-- MIGRATION COMPLETED +-- ===================================================== + +-- Display completion message +DO $$ +BEGIN + RAISE NOTICE 'Tools table migration completed successfully!'; + RAISE NOTICE 'Created tools table with % categories and % total tools', + (SELECT COUNT(DISTINCT category) FROM tools), + (SELECT COUNT(*) FROM tools); + RAISE NOTICE 'Ready for domain-based tool recommendations'; +END $$; + diff --git a/services/tech-stack-selector/db/003_tools_pricing_migration.sql b/services/tech-stack-selector/db/003_tools_pricing_migration.sql new file mode 100644 index 0000000..0c80c97 --- /dev/null +++ b/services/tech-stack-selector/db/003_tools_pricing_migration.sql @@ -0,0 +1,788 @@ +-- ===================================================== +-- Tools Pricing Migration +-- Add pricing fields and data to tools table +-- ===================================================== + +-- Add pricing fields to tools table +ALTER TABLE tools ADD COLUMN IF NOT EXISTS price_tier_id INTEGER REFERENCES price_tiers(id); +ALTER TABLE tools ADD COLUMN IF NOT EXISTS monthly_cost_usd DECIMAL(10,2) DEFAULT 0; +ALTER TABLE tools ADD COLUMN IF NOT EXISTS setup_cost_usd DECIMAL(10,2) DEFAULT 0; +ALTER TABLE tools ADD COLUMN IF NOT EXISTS license_cost_usd DECIMAL(10,2) DEFAULT 0; +ALTER TABLE tools ADD COLUMN IF NOT EXISTS training_cost_usd DECIMAL(10,2) DEFAULT 0; +ALTER TABLE tools ADD COLUMN IF NOT EXISTS total_cost_of_ownership_score INTEGER CHECK (total_cost_of_ownership_score >= 1 AND total_cost_of_ownership_score <= 100); +ALTER TABLE tools ADD COLUMN IF NOT EXISTS price_performance_ratio INTEGER CHECK (price_performance_ratio >= 1 AND price_performance_ratio <= 100); + +-- Create index for better performance +CREATE INDEX IF NOT EXISTS idx_tools_price_tier ON tools(price_tier_id); +CREATE INDEX IF NOT EXISTS idx_tools_monthly_cost ON tools(monthly_cost_usd); + +-- ===================================================== +-- UPDATE TOOLS WITH PRICING DATA +-- ===================================================== + +-- E-commerce Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 29.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 90 +WHERE name = 'Shopify'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 100.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'WooCommerce'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Growth Stage'), + monthly_cost_usd = 200.00, + setup_cost_usd = 2000.00, + license_cost_usd = 0.00, + training_cost_usd = 500.00, + total_cost_of_ownership_score = 75, + price_performance_ratio = 80 +WHERE name = 'Magento'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 39.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'BigCommerce'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 18.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Squarespace Commerce'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 300.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 92, + price_performance_ratio = 90 +WHERE name = 'PrestaShop'; + +-- CRM Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 98, + price_performance_ratio = 95 +WHERE name = 'HubSpot CRM'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Scale-Up'), + monthly_cost_usd = 150.00, + setup_cost_usd = 1000.00, + license_cost_usd = 0.00, + training_cost_usd = 800.00, + total_cost_of_ownership_score = 80, + price_performance_ratio = 75 +WHERE name = 'Salesforce CRM'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 20.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Zoho CRM'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Pipedrive'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 29.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 82 +WHERE name = 'Freshworks CRM'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 25.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 87, + price_performance_ratio = 85 +WHERE name = 'Monday.com CRM'; + +-- Analytics Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 98, + price_performance_ratio = 95 +WHERE name = 'Google Analytics'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 25.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Mixpanel'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 20.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Amplitude'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Hotjar'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Growth Stage'), + monthly_cost_usd = 70.00, + setup_cost_usd = 500.00, + license_cost_usd = 0.00, + training_cost_usd = 400.00, + total_cost_of_ownership_score = 80, + price_performance_ratio = 75 +WHERE name = 'Tableau'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 10.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 92, + price_performance_ratio = 90 +WHERE name = 'Power BI'; + +-- Payment Processing Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Stripe'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 90 +WHERE name = 'PayPal'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Razorpay'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Square'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Growth Stage'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 300.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Adyen'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 87, + price_performance_ratio = 82 +WHERE name = 'Braintree'; + +-- Communication Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 8.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 92, + price_performance_ratio = 90 +WHERE name = 'Slack'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 6.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Microsoft Teams'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Discord'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Zoom'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 25.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Telegram'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 10.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'WhatsApp Business'; + +-- Project Management Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 6.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Trello'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 8.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Jira'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 11.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 87, + price_performance_ratio = 85 +WHERE name = 'Asana'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 10.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Monday.com'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 8.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Notion'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 82 +WHERE name = 'Basecamp'; + +-- Marketing Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Mailchimp'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 20.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Klaviyo'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 120.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 300.00, + total_cost_of_ownership_score = 75, + price_performance_ratio = 70 +WHERE name = 'SEMrush'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 50.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 80, + price_performance_ratio = 75 +WHERE name = 'HubSpot Marketing'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 49.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Hootsuite'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Canva'; + +-- Design & Content Creation Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 12.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Figma'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 53.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 400.00, + total_cost_of_ownership_score = 80, + price_performance_ratio = 75 +WHERE name = 'Adobe Creative Suite'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 9.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Sketch'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 8.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 87, + price_performance_ratio = 82 +WHERE name = 'InVision'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 10.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Adobe XD'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 20.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Framer'; + +-- Development & DevOps Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 98, + price_performance_ratio = 95 +WHERE name = 'GitHub'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 90 +WHERE name = 'GitLab'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Bitbucket'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 8.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Jira Software'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 6.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Confluence'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 92, + price_performance_ratio = 90 +WHERE name = 'Jenkins'; + +-- Customer Support Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 19.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Zendesk'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 39.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Intercom'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'Freshdesk'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 20.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Help Scout'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 16.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 87, + price_performance_ratio = 82 +WHERE name = 'LiveChat'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 25.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Crisp'; + +-- Business Intelligence & Reporting Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 95 +WHERE name = 'Google Data Studio'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Growth Stage'), + monthly_cost_usd = 90.00, + setup_cost_usd = 500.00, + license_cost_usd = 0.00, + training_cost_usd = 400.00, + total_cost_of_ownership_score = 80, + price_performance_ratio = 75 +WHERE name = 'Looker'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Qlik Sense'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Growth Stage'), + monthly_cost_usd = 83.00, + setup_cost_usd = 1000.00, + license_cost_usd = 0.00, + training_cost_usd = 500.00, + total_cost_of_ownership_score = 75, + price_performance_ratio = 70 +WHERE name = 'Sisense'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 25.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Domo'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 95, + price_performance_ratio = 90 +WHERE name = 'Metabase'; + +-- Accounting & Finance Tools Pricing +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 200.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'QuickBooks'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 13.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 92, + price_performance_ratio = 90 +WHERE name = 'Xero'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 100.00, + total_cost_of_ownership_score = 90, + price_performance_ratio = 88 +WHERE name = 'FreshBooks'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Micro Budget'), + monthly_cost_usd = 0.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 50.00, + total_cost_of_ownership_score = 98, + price_performance_ratio = 95 +WHERE name = 'Wave'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Small Business'), + monthly_cost_usd = 25.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 300.00, + total_cost_of_ownership_score = 85, + price_performance_ratio = 80 +WHERE name = 'Sage'; + +UPDATE tools SET + price_tier_id = (SELECT id FROM price_tiers WHERE tier_name = 'Startup Budget'), + monthly_cost_usd = 15.00, + setup_cost_usd = 0.00, + license_cost_usd = 0.00, + training_cost_usd = 150.00, + total_cost_of_ownership_score = 88, + price_performance_ratio = 85 +WHERE name = 'Zoho Books'; + +-- ===================================================== +-- VERIFICATION QUERIES +-- ===================================================== + +-- Verify tools pricing data +SELECT + t.name, + t.category, + pt.tier_name, + t.monthly_cost_usd, + t.setup_cost_usd, + t.total_cost_of_ownership_score, + t.price_performance_ratio +FROM tools t +LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id +ORDER BY t.monthly_cost_usd DESC, t.name; + +-- Summary by price tier +SELECT + pt.tier_name, + COUNT(t.id) as tool_count, + AVG(t.monthly_cost_usd) as avg_monthly_cost, + AVG(t.total_cost_of_ownership_score) as avg_tco_score +FROM price_tiers pt +LEFT JOIN tools t ON pt.id = t.price_tier_id +GROUP BY pt.id, pt.tier_name +ORDER BY pt.min_price_usd; + +-- ===================================================== +-- MIGRATION COMPLETED +-- ===================================================== + +-- Migration completed successfully +-- Tools are now connected to price tiers and can be included in budget calculations diff --git a/services/tech-stack-selector/docker-start.sh b/services/tech-stack-selector/docker-start.sh new file mode 100644 index 0000000..919f540 --- /dev/null +++ b/services/tech-stack-selector/docker-start.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +# ================================================================================================ +# ENHANCED TECH STACK SELECTOR - DOCKER STARTUP SCRIPT +# Optimized for Docker environment with proper service discovery +# ================================================================================================ + +set -e + +# Parse command line arguments +FORCE_MIGRATION=false +if [ "$1" = "--force-migration" ] || [ "$1" = "-f" ]; then + FORCE_MIGRATION=true + echo "🔄 Force migration mode enabled" +elif [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --force-migration, -f Force re-run all migrations" + echo " --help, -h Show this help message" + echo "" + echo "Examples:" + echo " $0 # Normal startup with auto-migration detection" + echo " $0 --force-migration # Force re-run all migrations" + exit 0 +fi + +echo "="*60 +echo "🚀 ENHANCED TECH STACK SELECTOR v15.0 - DOCKER VERSION" +echo "="*60 +echo "✅ PostgreSQL data migrated to Neo4j" +echo "✅ Price-based relationships" +echo "✅ Real data from PostgreSQL" +echo "✅ Comprehensive pricing analysis" +echo "✅ Docker-optimized startup" +echo "="*60 + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Function to print colored output +print_status() { + echo -e "${GREEN}✅ $1${NC}" +} + +print_warning() { + echo -e "${YELLOW}⚠️ $1${NC}" +} + +print_error() { + echo -e "${RED}❌ $1${NC}" +} + +print_info() { + echo -e "${BLUE}ℹ️ $1${NC}" +} + +# Get environment variables with defaults +POSTGRES_HOST=${POSTGRES_HOST:-postgres} +POSTGRES_PORT=${POSTGRES_PORT:-5432} +POSTGRES_USER=${POSTGRES_USER:-pipeline_admin} +POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secure_pipeline_2024} +POSTGRES_DB=${POSTGRES_DB:-dev_pipeline} +NEO4J_URI=${NEO4J_URI:-bolt://neo4j:7687} +NEO4J_USER=${NEO4J_USER:-neo4j} +NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} +CLAUDE_API_KEY=${CLAUDE_API_KEY:-} + +print_status "Environment variables loaded" +print_info "PostgreSQL: ${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}" +print_info "Neo4j: ${NEO4J_URI}" + +# Function to wait for service to be ready +wait_for_service() { + local service_name=$1 + local host=$2 + local port=$3 + local max_attempts=30 + local attempt=1 + + print_info "Waiting for ${service_name} to be ready..." + + while [ $attempt -le $max_attempts ]; do + if nc -z $host $port 2>/dev/null; then + print_status "${service_name} is ready!" + return 0 + fi + + print_info "Attempt ${attempt}/${max_attempts}: ${service_name} not ready yet, waiting 2 seconds..." + sleep 2 + attempt=$((attempt + 1)) + done + + print_error "${service_name} failed to become ready after ${max_attempts} attempts" + return 1 +} + +# Wait for PostgreSQL +if ! wait_for_service "PostgreSQL" $POSTGRES_HOST $POSTGRES_PORT; then + exit 1 +fi + +# Wait for Neo4j +if ! wait_for_service "Neo4j" neo4j 7687; then + exit 1 +fi + +# Function to check if database needs migration +check_database_migration() { + print_info "Checking if database needs migration..." + + # Check if price_tiers table exists and has data + if ! python3 -c " +import psycopg2 +import os +try: + conn = psycopg2.connect( + host=os.getenv('POSTGRES_HOST', 'postgres'), + port=int(os.getenv('POSTGRES_PORT', '5432')), + user=os.getenv('POSTGRES_USER', 'pipeline_admin'), + password=os.getenv('POSTGRES_PASSWORD', 'secure_pipeline_2024'), + database=os.getenv('POSTGRES_DB', 'dev_pipeline') + ) + cursor = conn.cursor() + + # Check if price_tiers table exists + cursor.execute(\"\"\" + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'price_tiers' + ); + \"\"\") + table_exists = cursor.fetchone()[0] + + if not table_exists: + print('price_tiers table does not exist - migration needed') + exit(1) + + # Check if price_tiers has data + cursor.execute('SELECT COUNT(*) FROM price_tiers;') + count = cursor.fetchone()[0] + + if count == 0: + print('price_tiers table is empty - migration needed') + exit(1) + + # Check if stack_recommendations has sufficient data + cursor.execute('SELECT COUNT(*) FROM stack_recommendations;') + rec_count = cursor.fetchone()[0] + + if rec_count < 20: # Reduced threshold for Docker environment + print(f'stack_recommendations has only {rec_count} records - migration needed') + exit(1) + + print('Database appears to be fully migrated') + cursor.close() + conn.close() + +except Exception as e: + print(f'Error checking database: {e}') + exit(1) +" 2>/dev/null; then + return 1 # Migration needed + else + return 0 # Migration not needed + fi +} + +# Function to run PostgreSQL migrations +run_postgres_migrations() { + print_info "Running PostgreSQL migrations..." + + # Migration files in order + migration_files=( + "db/001_schema.sql" + "db/002_tools_migration.sql" + "db/003_tools_pricing_migration.sql" + ) + + # Set PGPASSWORD to avoid password prompts + export PGPASSWORD="$POSTGRES_PASSWORD" + + for migration_file in "${migration_files[@]}"; do + if [ ! -f "$migration_file" ]; then + print_error "Migration file not found: $migration_file" + exit 1 + fi + + print_info "Running migration: $migration_file" + + # Run migration with error handling + if psql -h $POSTGRES_HOST -p $POSTGRES_PORT -U $POSTGRES_USER -d $POSTGRES_DB -f "$migration_file" -q 2>/dev/null; then + print_status "Migration completed: $migration_file" + else + print_error "Migration failed: $migration_file" + print_info "Check the error logs above for details" + exit 1 + fi + done + + # Unset password + unset PGPASSWORD + + print_status "All PostgreSQL migrations completed successfully" +} + +# Check if migration is needed and run if necessary +if [ "$FORCE_MIGRATION" = true ]; then + print_warning "Force migration enabled - running migrations..." + run_postgres_migrations + + # Verify migration was successful + print_info "Verifying migration..." + if check_database_migration; then + print_status "Migration verification successful" + else + print_error "Migration verification failed" + exit 1 + fi +elif check_database_migration; then + print_status "Database is already migrated" +else + print_warning "Database needs migration - running migrations..." + run_postgres_migrations + + # Verify migration was successful + print_info "Verifying migration..." + if check_database_migration; then + print_status "Migration verification successful" + else + print_error "Migration verification failed" + exit 1 + fi +fi + +# Check if Neo4j migration has been run +print_info "Checking if Neo4j migration has been completed..." +if ! python3 -c " +from neo4j import GraphDatabase +import os +try: + driver = GraphDatabase.driver( + os.getenv('NEO4J_URI', 'bolt://neo4j:7687'), + auth=(os.getenv('NEO4J_USER', 'neo4j'), os.getenv('NEO4J_PASSWORD', 'password')) + ) + with driver.session() as session: + result = session.run('MATCH (p:PriceTier) RETURN count(p) as count') + price_tiers = result.single()['count'] + if price_tiers == 0: + print('No data found in Neo4j - migration needed') + exit(1) + else: + print(f'Found {price_tiers} price tiers - migration appears complete') + driver.close() +except Exception as e: + print(f'Error checking migration status: {e}') + exit(1) +" 2>/dev/null; then + print_warning "No data found in Neo4j - running migration..." + + # Run migration + if python3 migrate_postgres_to_neo4j.py; then + print_status "Migration completed successfully" + else + print_error "Migration failed" + exit 1 + fi +else + print_status "Migration appears to be complete" +fi + +# Set environment variables for the application +export NEO4J_URI="$NEO4J_URI" +export NEO4J_USER="$NEO4J_USER" +export NEO4J_PASSWORD="$NEO4J_PASSWORD" +export POSTGRES_HOST="$POSTGRES_HOST" +export POSTGRES_PORT="$POSTGRES_PORT" +export POSTGRES_USER="$POSTGRES_USER" +export POSTGRES_PASSWORD="$POSTGRES_PASSWORD" +export POSTGRES_DB="$POSTGRES_DB" +export CLAUDE_API_KEY="$CLAUDE_API_KEY" + +print_status "Environment variables set" + +# Create logs directory if it doesn't exist +mkdir -p logs + +# Start the migrated application +print_info "Starting Enhanced Tech Stack Selector (Docker Version)..." +print_info "Server will be available at: http://localhost:8002" +print_info "API documentation: http://localhost:8002/docs" +print_info "Health check: http://localhost:8002/health" +print_info "Diagnostics: http://localhost:8002/api/diagnostics" +print_info "" +print_info "Press Ctrl+C to stop the server" +print_info "" + +# Start the application +cd src +python3 main_migrated.py diff --git a/services/tech-stack-selector/migrate_postgres_to_neo4j.py b/services/tech-stack-selector/migrate_postgres_to_neo4j.py new file mode 100644 index 0000000..9a2f068 --- /dev/null +++ b/services/tech-stack-selector/migrate_postgres_to_neo4j.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 +""" +PostgreSQL to Neo4j Migration Script +Migrates existing PostgreSQL data to Neo4j with proper price-based relationships +""" + +import os +import sys +import subprocess +from loguru import logger + +def run_migration(): + """Run the complete migration process""" + + logger.info("="*60) + logger.info("🚀 POSTGRESQL TO NEO4J MIGRATION") + logger.info("="*60) + logger.info("✅ Using existing PostgreSQL data") + logger.info("✅ Creating price-based relationships") + logger.info("✅ Migrating to Neo4j knowledge graph") + logger.info("="*60) + + # Get environment variables with defaults + postgres_host = os.getenv("POSTGRES_HOST", "postgres") + postgres_port = int(os.getenv("POSTGRES_PORT", "5432")) + postgres_user = os.getenv("POSTGRES_USER", "pipeline_admin") + postgres_password = os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024") + postgres_db = os.getenv("POSTGRES_DB", "dev_pipeline") + neo4j_uri = os.getenv("NEO4J_URI", "bolt://neo4j:7687") + neo4j_user = os.getenv("NEO4J_USER", "neo4j") + neo4j_password = os.getenv("NEO4J_PASSWORD", "password") + + # Check if PostgreSQL is running + logger.info("🔍 Checking PostgreSQL connection...") + try: + import psycopg2 + conn = psycopg2.connect( + host=postgres_host, + port=postgres_port, + user=postgres_user, + password=postgres_password, + database=postgres_db + ) + conn.close() + logger.info("✅ PostgreSQL is running and accessible") + except Exception as e: + logger.error(f"❌ PostgreSQL connection failed: {e}") + logger.error("Please ensure PostgreSQL is running and the database is set up") + return False + + # Check if Neo4j is running + logger.info("🔍 Checking Neo4j connection...") + try: + from neo4j import GraphDatabase + driver = GraphDatabase.driver(neo4j_uri, auth=(neo4j_user, neo4j_password)) + driver.verify_connectivity() + driver.close() + logger.info("✅ Neo4j is running and accessible") + except Exception as e: + logger.error(f"❌ Neo4j connection failed: {e}") + logger.error("Please ensure Neo4j is running") + return False + + # Set up Neo4j schema + logger.info("🔧 Setting up Neo4j schema...") + try: + from neo4j import GraphDatabase + driver = GraphDatabase.driver(neo4j_uri, auth=(neo4j_user, neo4j_password)) + + with driver.session() as session: + # Read and execute the schema file + with open("Neo4j_From_Postgres.cql", 'r') as f: + cql_content = f.read() + + # Split by semicolon and execute each statement + statements = [stmt.strip() for stmt in cql_content.split(';') if stmt.strip()] + + for i, statement in enumerate(statements): + if statement and not statement.startswith('//'): + try: + session.run(statement) + logger.info(f"✅ Executed schema statement {i+1}/{len(statements)}") + except Exception as e: + logger.warning(f"⚠️ Schema statement {i+1} failed: {e}") + continue + + driver.close() + logger.info("✅ Neo4j schema setup completed") + except Exception as e: + logger.error(f"❌ Neo4j schema setup failed: {e}") + return False + + # Run the migration + logger.info("🔄 Running PostgreSQL to Neo4j migration...") + try: + # Add src to path + sys.path.append('src') + + from postgres_to_neo4j_migration import PostgresToNeo4jMigration + + # Configuration + postgres_config = { + "host": postgres_host, + "port": postgres_port, + "user": postgres_user, + "password": postgres_password, + "database": postgres_db + } + + neo4j_config = { + "uri": neo4j_uri, + "user": neo4j_user, + "password": neo4j_password + } + + # Run migration + migration = PostgresToNeo4jMigration(postgres_config, neo4j_config) + success = migration.run_full_migration() + + if success: + logger.info("✅ Migration completed successfully!") + return True + else: + logger.error("❌ Migration failed!") + return False + + except Exception as e: + logger.error(f"❌ Migration failed: {e}") + return False + +def test_migrated_data(): + """Test the migrated data""" + logger.info("🧪 Testing migrated data...") + + try: + from neo4j import GraphDatabase + + driver = GraphDatabase.driver(neo4j_uri, auth=(neo4j_user, neo4j_password)) + + with driver.session() as session: + # Test price tiers + result = session.run("MATCH (p:PriceTier) RETURN count(p) as count") + price_tiers_count = result.single()["count"] + logger.info(f"✅ Price tiers: {price_tiers_count}") + + # Test technologies + result = session.run("MATCH (t:Technology) RETURN count(t) as count") + technologies_count = result.single()["count"] + logger.info(f"✅ Technologies: {technologies_count}") + + # Test tools + result = session.run("MATCH (tool:Tool) RETURN count(tool) as count") + tools_count = result.single()["count"] + logger.info(f"✅ Tools: {tools_count}") + + # Test tech stacks + result = session.run("MATCH (s:TechStack) RETURN count(s) as count") + stacks_count = result.single()["count"] + logger.info(f"✅ Tech stacks: {stacks_count}") + + # Test relationships + result = session.run("MATCH ()-[r]->() RETURN count(r) as count") + relationships_count = result.single()["count"] + logger.info(f"✅ Relationships: {relationships_count}") + + # Test complete stacks + result = session.run(""" + MATCH (s:TechStack) + WHERE exists((s)-[:BELONGS_TO_TIER]->()) + AND exists((s)-[:USES_FRONTEND]->()) + AND exists((s)-[:USES_BACKEND]->()) + AND exists((s)-[:USES_DATABASE]->()) + AND exists((s)-[:USES_CLOUD]->()) + RETURN count(s) as count + """) + complete_stacks_count = result.single()["count"] + logger.info(f"✅ Complete stacks: {complete_stacks_count}") + + driver.close() + logger.info("✅ Data validation completed successfully!") + return True + + except Exception as e: + logger.error(f"❌ Data validation failed: {e}") + return False + +def start_migrated_service(): + """Start the migrated service""" + logger.info("🚀 Starting migrated service...") + + try: + # Set environment variables + os.environ["NEO4J_URI"] = neo4j_uri + os.environ["NEO4J_USER"] = neo4j_user + os.environ["NEO4J_PASSWORD"] = neo4j_password + os.environ["POSTGRES_HOST"] = postgres_host + os.environ["POSTGRES_PORT"] = str(postgres_port) + os.environ["POSTGRES_USER"] = postgres_user + os.environ["POSTGRES_PASSWORD"] = postgres_password + os.environ["POSTGRES_DB"] = postgres_db + os.environ["CLAUDE_API_KEY"] = "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" + + # Start the service + subprocess.run([ + sys.executable, "src/main_migrated.py" + ]) + + except Exception as e: + logger.error(f"❌ Failed to start migrated service: {e}") + +if __name__ == "__main__": + # Run migration + if run_migration(): + logger.info("✅ Migration completed successfully!") + + # Test migrated data + if test_migrated_data(): + logger.info("✅ Data validation passed!") + + # Ask user if they want to start the service + response = input("\n🚀 Start the migrated service? (y/n): ") + if response.lower() in ['y', 'yes']: + start_migrated_service() + else: + logger.info("✅ Migration completed. You can start the service later with:") + logger.info(" python src/main_migrated.py") + else: + logger.error("❌ Data validation failed!") + sys.exit(1) + else: + logger.error("❌ Migration failed!") + sys.exit(1) diff --git a/services/tech-stack-selector/postman_collection.json b/services/tech-stack-selector/postman_collection.json new file mode 100644 index 0000000..a6e5b83 --- /dev/null +++ b/services/tech-stack-selector/postman_collection.json @@ -0,0 +1,1337 @@ +{ + "info": { + "_postman_id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890", + "name": "Enhanced Tech Stack Selector - Migrated Version", + "description": "Complete Postman collection for the Enhanced Tech Stack Selector API v15.0.0\n\nThis collection includes all endpoints for:\n- Health checks and diagnostics\n- Tech stack recommendations\n- Price tier analysis\n- Technology and tool queries\n- Data integrity validation\n- Compatibility analysis\n\nBase URL: http://localhost:8002", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "12345678" + }, + "item": [ + { + "name": "Health & Diagnostics", + "item": [ + { + "name": "Health Check", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Response has required fields\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('status');", + " pm.expect(jsonData).to.have.property('service');", + " pm.expect(jsonData).to.have.property('version');", + " pm.expect(jsonData).to.have.property('features');", + "});", + "", + "pm.test(\"Service is healthy\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.status).to.eql('healthy');", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/health", + "host": [ + "{{base_url}}" + ], + "path": [ + "health" + ] + }, + "description": "Basic health check endpoint to verify the service is running" + }, + "response": [] + }, + { + "name": "Diagnostics", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Diagnostics response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('service');", + " pm.expect(jsonData).to.have.property('version');", + " pm.expect(jsonData).to.have.property('checks');", + " pm.expect(jsonData.checks).to.have.property('neo4j');", + "});", + "", + "pm.test(\"Neo4j connection status\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.checks.neo4j).to.have.property('status');", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/diagnostics", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "diagnostics" + ] + }, + "description": "Comprehensive diagnostics including Neo4j connection status and data integrity checks" + }, + "response": [] + } + ], + "description": "Health and diagnostics endpoints" + }, + { + "name": "Recommendations", + "item": [ + { + "name": "Get Best Recommendations", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Recommendations response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('recommendations');", + " pm.expect(jsonData).to.have.property('count');", + " pm.expect(jsonData.success).to.be.true;", + "});", + "", + "pm.test(\"Recommendations array is not empty\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.recommendations).to.be.an('array');", + " pm.expect(jsonData.count).to.be.above(0);", + "});", + "", + "pm.test(\"Each recommendation has required fields\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.recommendations.length > 0) {", + " const firstRec = jsonData.recommendations[0];", + " pm.expect(firstRec).to.have.property('stack_name');", + " pm.expect(firstRec).to.have.property('monthly_cost');", + " pm.expect(firstRec).to.have.property('frontend');", + " pm.expect(firstRec).to.have.property('backend');", + " pm.expect(firstRec).to.have.property('database');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"web development\",\n \"budget\": 500.0,\n \"preferredTechnologies\": [\"React\", \"Node.js\", \"PostgreSQL\"]\n}" + }, + "url": { + "raw": "{{base_url}}/recommend/best", + "host": [ + "{{base_url}}" + ], + "path": [ + "recommend", + "best" + ] + }, + "description": "Get the best tech stack recommendations based on budget, domain, and preferred technologies" + }, + "response": [] + }, + { + "name": "Get Recommendations - E-commerce", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Domain-specific recommendations\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.domain).to.eql('e-commerce');", + " pm.expect(jsonData.budget).to.eql(1000);", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"e-commerce\",\n \"budget\": 1000.0,\n \"preferredTechnologies\": [\"Vue.js\", \"Django\", \"Redis\"]\n}" + }, + "url": { + "raw": "{{base_url}}/recommend/best", + "host": [ + "{{base_url}}" + ], + "path": [ + "recommend", + "best" + ] + }, + "description": "Get recommendations specifically for e-commerce domain with higher budget" + }, + "response": [] + }, + { + "name": "Get Recommendations - Startup Budget", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Low budget recommendations\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.budget).to.eql(100);", + " if (jsonData.recommendations.length > 0) {", + " jsonData.recommendations.forEach(rec => {", + " pm.expect(rec.monthly_cost).to.be.at.most(100);", + " });", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"startup\",\n \"budget\": 100.0\n}" + }, + "url": { + "raw": "{{base_url}}/recommend/best", + "host": [ + "{{base_url}}" + ], + "path": [ + "recommend", + "best" + ] + }, + "description": "Get recommendations for startup with limited budget" + }, + "response": [] + } + ], + "description": "Tech stack recommendation endpoints" + }, + { + "name": "Price Tiers", + "item": [ + { + "name": "Get All Price Tiers", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Price tiers response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('price_tiers');", + " pm.expect(jsonData).to.have.property('count');", + " pm.expect(jsonData.success).to.be.true;", + "});", + "", + "pm.test(\"Price tiers have required fields\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.price_tiers.length > 0) {", + " const firstTier = jsonData.price_tiers[0];", + " pm.expect(firstTier).to.have.property('tier_name');", + " pm.expect(firstTier).to.have.property('min_price');", + " pm.expect(firstTier).to.have.property('max_price');", + " pm.expect(firstTier).to.have.property('target_audience');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/price-tiers", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "price-tiers" + ] + }, + "description": "Get analysis of all price tiers with technology and tool counts" + }, + "response": [] + } + ], + "description": "Price tier analysis endpoints" + }, + { + "name": "Technologies", + "item": [ + { + "name": "Get Technologies by Tier - Free", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Technologies response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('tier_name');", + " pm.expect(jsonData).to.have.property('technologies');", + " pm.expect(jsonData.tier_name).to.eql('Free');", + "});", + "", + "pm.test(\"Technologies have required fields\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.technologies.length > 0) {", + " const firstTech = jsonData.technologies[0];", + " pm.expect(firstTech).to.have.property('name');", + " pm.expect(firstTech).to.have.property('category');", + " pm.expect(firstTech).to.have.property('monthly_cost');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/technologies/Free", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "technologies", + "Free" + ] + }, + "description": "Get all technologies in the Free price tier" + }, + "response": [] + }, + { + "name": "Get Technologies by Tier - Micro Budget", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/technologies/Micro%20Budget", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "technologies", + "Micro Budget" + ] + }, + "description": "Get all technologies in the Micro Budget price tier" + }, + "response": [] + }, + { + "name": "Get Technologies by Tier - Startup Budget", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/technologies/Startup%20Budget", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "technologies", + "Startup Budget" + ] + }, + "description": "Get all technologies in the Startup Budget price tier" + }, + "response": [] + }, + { + "name": "Get Technologies by Tier - Growth Stage", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/technologies/Growth%20Stage", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "technologies", + "Growth Stage" + ] + }, + "description": "Get all technologies in the Growth Stage price tier" + }, + "response": [] + } + ], + "description": "Technology queries by price tier" + }, + { + "name": "Tools", + "item": [ + { + "name": "Get Tools by Tier - Free", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Tools response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('tier_name');", + " pm.expect(jsonData).to.have.property('tools');", + " pm.expect(jsonData.tier_name).to.eql('Free');", + "});", + "", + "pm.test(\"Tools have required fields\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.tools.length > 0) {", + " const firstTool = jsonData.tools[0];", + " pm.expect(firstTool).to.have.property('name');", + " pm.expect(firstTool).to.have.property('category');", + " pm.expect(firstTool).to.have.property('monthly_cost');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/tools/Free", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "tools", + "Free" + ] + }, + "description": "Get all tools in the Free price tier" + }, + "response": [] + }, + { + "name": "Get Tools by Tier - Micro Budget", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/tools/Micro%20Budget", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "tools", + "Micro Budget" + ] + }, + "description": "Get all tools in the Micro Budget price tier" + }, + "response": [] + }, + { + "name": "Get Tools by Tier - Startup Budget", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/tools/Startup%20Budget", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "tools", + "Startup Budget" + ] + }, + "description": "Get all tools in the Startup Budget price tier" + }, + "response": [] + }, + { + "name": "Get Tools by Tier - Growth Stage", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/tools/Growth%20Stage", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "tools", + "Growth Stage" + ] + }, + "description": "Get all tools in the Growth Stage price tier" + }, + "response": [] + } + ], + "description": "Tool queries by price tier" + }, + { + "name": "Analysis & Optimization", + "item": [ + { + "name": "Get Optimal Combinations - Frontend", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Optimal combinations response\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('combinations');", + " pm.expect(jsonData).to.have.property('budget');", + " pm.expect(jsonData).to.have.property('category');", + " pm.expect(jsonData.category).to.eql('frontend');", + "});", + "", + "pm.test(\"All combinations within budget\", function () {", + " const jsonData = pm.response.json();", + " const budget = jsonData.budget;", + " if (jsonData.combinations.length > 0) {", + " jsonData.combinations.forEach(combo => {", + " pm.expect(combo.monthly_cost).to.be.at.most(budget);", + " });", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/combinations/optimal?budget=300&category=frontend", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "combinations", + "optimal" + ], + "query": [ + { + "key": "budget", + "value": "300" + }, + { + "key": "category", + "value": "frontend" + } + ] + }, + "description": "Get optimal frontend technology combinations within budget" + }, + "response": [] + }, + { + "name": "Get Optimal Combinations - Backend", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/combinations/optimal?budget=500&category=backend", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "combinations", + "optimal" + ], + "query": [ + { + "key": "budget", + "value": "500" + }, + { + "key": "category", + "value": "backend" + } + ] + }, + "description": "Get optimal backend technology combinations within budget" + }, + "response": [] + }, + { + "name": "Get Optimal Combinations - Database", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/combinations/optimal?budget=200&category=database", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "combinations", + "optimal" + ], + "query": [ + { + "key": "budget", + "value": "200" + }, + { + "key": "category", + "value": "database" + } + ] + }, + "description": "Get optimal database technology combinations within budget" + }, + "response": [] + } + ], + "description": "Analysis and optimization endpoints" + }, + { + "name": "Compatibility", + "item": [ + { + "name": "Get Compatibility - React", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Compatibility response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('tech_name');", + " pm.expect(jsonData).to.have.property('compatible_technologies');", + " pm.expect(jsonData.tech_name).to.eql('React');", + "});", + "", + "pm.test(\"Compatible technologies have scores\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.compatible_technologies.length > 0) {", + " const firstCompat = jsonData.compatible_technologies[0];", + " pm.expect(firstCompat).to.have.property('compatible_tech');", + " pm.expect(firstCompat).to.have.property('score');", + " pm.expect(firstCompat).to.have.property('category');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/compatibility/React", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "compatibility", + "React" + ] + }, + "description": "Get compatibility analysis for React technology" + }, + "response": [] + }, + { + "name": "Get Compatibility - Node.js", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/compatibility/Node.js", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "compatibility", + "Node.js" + ] + }, + "description": "Get compatibility analysis for Node.js technology" + }, + "response": [] + }, + { + "name": "Get Compatibility - PostgreSQL", + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/compatibility/PostgreSQL", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "compatibility", + "PostgreSQL" + ] + }, + "description": "Get compatibility analysis for PostgreSQL database" + }, + "response": [] + } + ], + "description": "Technology compatibility analysis" + }, + { + "name": "Data Validation", + "item": [ + { + "name": "Validate Data Integrity", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Data integrity response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('integrity_check');", + " pm.expect(jsonData).to.have.property('summary');", + " pm.expect(jsonData.success).to.be.true;", + "});", + "", + "pm.test(\"Summary has stack counts\", function () {", + " const jsonData = pm.response.json();", + " const summary = jsonData.summary;", + " pm.expect(summary).to.have.property('total_stacks');", + " pm.expect(summary).to.have.property('complete_stacks');", + " pm.expect(summary).to.have.property('incomplete_stacks');", + " pm.expect(summary.total_stacks).to.be.a('number');", + "});", + "", + "pm.test(\"Stack counts are consistent\", function () {", + " const jsonData = pm.response.json();", + " const summary = jsonData.summary;", + " pm.expect(summary.complete_stacks + summary.incomplete_stacks)", + " .to.eql(summary.total_stacks);", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/validate/integrity", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "validate", + "integrity" + ] + }, + "description": "Validate the integrity of migrated data in Neo4j" + }, + "response": [] + }, + { + "name": "Validate Stack Completeness", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Stack completeness response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('validation_results');", + " pm.expect(jsonData).to.have.property('incomplete_stacks');", + " pm.expect(jsonData.success).to.be.true;", + "});", + "", + "pm.test(\"Validation results have required fields\", function () {", + " const jsonData = pm.response.json();", + " const results = jsonData.validation_results;", + " pm.expect(results).to.have.property('total_stacks');", + " pm.expect(results).to.have.property('complete_stacks');", + " pm.expect(results).to.have.property('incomplete_count');", + "});", + "", + "pm.test(\"Incomplete stacks have missing components\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.incomplete_stacks.length > 0) {", + " const firstIncomplete = jsonData.incomplete_stacks[0];", + " pm.expect(firstIncomplete).to.have.property('stack_name');", + " pm.expect(firstIncomplete).to.have.property('missing_components');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/validate/stacks", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "validate", + "stacks" + ] + }, + "description": "Validate that all tech stacks have complete frontend, backend, and database components" + }, + "response": [] + }, + { + "name": "Validate Price Consistency", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Price consistency response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('price_validation');", + " pm.expect(jsonData).to.have.property('inconsistencies');", + " pm.expect(jsonData.success).to.be.true;", + "});", + "", + "pm.test(\"Price validation summary\", function () {", + " const jsonData = pm.response.json();", + " const validation = jsonData.price_validation;", + " pm.expect(validation).to.have.property('total_stacks_checked');", + " pm.expect(validation).to.have.property('consistent_stacks');", + " pm.expect(validation).to.have.property('inconsistent_count');", + "});", + "", + "pm.test(\"Inconsistencies have details\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.inconsistencies.length > 0) {", + " const firstInconsistency = jsonData.inconsistencies[0];", + " pm.expect(firstInconsistency).to.have.property('stack_name');", + " pm.expect(firstInconsistency).to.have.property('calculated_cost');", + " pm.expect(firstInconsistency).to.have.property('stored_cost');", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/validate/prices", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "validate", + "prices" + ] + }, + "description": "Validate price consistency between calculated and stored monthly costs" + }, + "response": [] + } + ], + "description": "Data validation and integrity checks" + }, + { + "name": "Search & Filtering", + "item": [ + { + "name": "Search Technologies", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Search response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('query');", + " pm.expect(jsonData).to.have.property('results');", + " pm.expect(jsonData).to.have.property('count');", + "});", + "", + "pm.test(\"Search results contain query term\", function () {", + " const jsonData = pm.response.json();", + " const query = jsonData.query.toLowerCase();", + " if (jsonData.results.length > 0) {", + " jsonData.results.forEach(result => {", + " const name = result.name.toLowerCase();", + " const category = result.category.toLowerCase();", + " pm.expect(name.includes(query) || category.includes(query)).to.be.true;", + " });", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/search/technologies?q=javascript", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "search", + "technologies" + ], + "query": [ + { + "key": "q", + "value": "javascript" + } + ] + }, + "description": "Search technologies by name or category" + }, + "response": [] + }, + { + "name": "Search Tools", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Tools search response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('query');", + " pm.expect(jsonData).to.have.property('results');", + " pm.expect(jsonData).to.have.property('count');", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/search/tools?q=docker", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "search", + "tools" + ], + "query": [ + { + "key": "q", + "value": "docker" + } + ] + }, + "description": "Search tools by name or category" + }, + "response": [] + }, + { + "name": "Filter by Budget Range", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Budget filter response structure\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success');", + " pm.expect(jsonData).to.have.property('budget_range');", + " pm.expect(jsonData).to.have.property('stacks');", + " pm.expect(jsonData).to.have.property('count');", + "});", + "", + "pm.test(\"All stacks within budget range\", function () {", + " const jsonData = pm.response.json();", + " const minBudget = jsonData.budget_range.min;", + " const maxBudget = jsonData.budget_range.max;", + " ", + " if (jsonData.stacks.length > 0) {", + " jsonData.stacks.forEach(stack => {", + " pm.expect(stack.monthly_cost).to.be.at.least(minBudget);", + " pm.expect(stack.monthly_cost).to.be.at.most(maxBudget);", + " });", + " }", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{base_url}}/api/filter/budget?min=100&max=500", + "host": [ + "{{base_url}}" + ], + "path": [ + "api", + "filter", + "budget" + ], + "query": [ + { + "key": "min", + "value": "100" + }, + { + "key": "max", + "value": "500" + } + ] + }, + "description": "Filter tech stacks by budget range" + }, + "response": [] + } + ], + "description": "Search and filtering endpoints" + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "type": "text/javascript", + "exec": [ + "// Set base URL if not already set", + "if (!pm.environment.get('base_url')) {", + " pm.environment.set('base_url', 'http://localhost:8002');", + "}", + "", + "// Add timestamp for unique test runs", + "pm.environment.set('timestamp', new Date().toISOString());" + ] + } + }, + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Global test to ensure response time is reasonable", + "pm.test(\"Response time is less than 5000ms\", function () {", + " pm.expect(pm.response.responseTime).to.be.below(5000);", + "});", + "", + "// Global test to ensure content type is JSON for API endpoints", + "const url = pm.request.url.toString();", + "if (url.includes('/api/') || url.includes('/recommend/')) {", + " pm.test(\"Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get('Content-Type')).to.include('application/json');", + " });", + "}" + ] + } + } + ], + "variable": [ + { + "key": "base_url", + "value": "http://localhost:8002", + "type": "string", + "description": "Base URL for the Enhanced Tech Stack Selector API" + }, + { + "key": "api_version", + "value": "v15.0.0", + "type": "string", + "description": "Current API version" + }, + { + "key": "test_budget_low", + "value": "100", + "type": "string", + "description": "Low budget for testing (startup tier)" + }, + { + "key": "test_budget_medium", + "value": "500", + "type": "string", + "description": "Medium budget for testing (professional tier)" + }, + { + "key": "test_budget_high", + "value": "1000", + "type": "string", + "description": "High budget for testing (enterprise tier)" + } + ] +} \ No newline at end of file diff --git a/services/tech-stack-selector/requirements.txt b/services/tech-stack-selector/requirements.txt index 4b89dde..388154e 100644 --- a/services/tech-stack-selector/requirements.txt +++ b/services/tech-stack-selector/requirements.txt @@ -38,3 +38,5 @@ yarl>=1.9.0 six>=1.16.0 pytz>=2023.3 greenlet>=3.0.0 +psycopg2-binary==2.9.9 +neo4j>=5.0.0 diff --git a/services/tech-stack-selector/src/main.py b/services/tech-stack-selector/src/main.py deleted file mode 100644 index 0a4ea1a..0000000 --- a/services/tech-stack-selector/src/main.py +++ /dev/null @@ -1,944 +0,0 @@ - - -# import os -# import sys -# import json -# from datetime import datetime -# from typing import Dict, Any, Optional, List -# from pydantic import BaseModel -# from fastapi import FastAPI, HTTPException, Request -# from fastapi.middleware.cors import CORSMiddleware -# from loguru import logger - -# # AI integration -# try: -# import anthropic -# CLAUDE_AVAILABLE = True -# except ImportError: -# CLAUDE_AVAILABLE = False - -# # Configure logging -# logger.remove() -# logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}") - -# # API Key -# CLAUDE_API_KEY = "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" - -# if not os.getenv("CLAUDE_API_KEY") and CLAUDE_API_KEY: -# os.environ["CLAUDE_API_KEY"] = CLAUDE_API_KEY - -# # ================================================================================================ -# # ENHANCED TECH STACK SELECTOR - WITH FUNCTIONAL REQUIREMENTS DISPLAY -# # ================================================================================================ - -# class EnhancedTechStackSelector: -# """Enhanced selector that handles business context + functional requirements""" - -# def __init__(self): -# self.claude_client = anthropic.Anthropic(api_key=CLAUDE_API_KEY) if CLAUDE_AVAILABLE else None -# logger.info("Enhanced Tech Stack Selector initialized") - -# # ================================================================================================ -# # FASTAPI APPLICATION -# # ================================================================================================ - -# app = FastAPI( -# title="Enhanced Tech Stack Selector", -# description="Enhanced tech stack recommendations with functional requirements display", -# version="11.0.0" -# ) - -# app.add_middleware( -# CORSMiddleware, -# allow_origins=["*"], -# allow_credentials=True, -# allow_methods=["*"], -# allow_headers=["*"], -# ) - -# # Initialize enhanced selector -# enhanced_selector = EnhancedTechStackSelector() - -# @app.get("/health") -# async def health_check(): -# """Health check""" -# return { -# "status": "healthy", -# "service": "enhanced-tech-stack-selector", -# "version": "11.0.0", -# "approach": "functional_requirements_aware_recommendations" -# } - -# @app.post("/api/v1/select") -# async def select_enhanced_tech_stack(request: Request): -# """ENHANCED VERSION - Shows functional requirements + tech recommendations for architecture-designer""" -# try: -# request_data = await request.json() - -# # Log exactly what we receive -# logger.info("=== RECEIVED ENHANCED DATA START ===") -# logger.info(json.dumps(request_data, indent=2, default=str)) -# logger.info("=== RECEIVED ENHANCED DATA END ===") - -# # Extract enhanced data components -# extracted_data = extract_enhanced_data(request_data) - -# if not extracted_data["features"] and not extracted_data["feature_name"]: -# logger.error("❌ NO FEATURES OR FEATURE DATA FOUND") -# return { -# "error": "No features or feature data found in request", -# "received_data_keys": list(request_data.keys()) if isinstance(request_data, dict) else "not_dict", -# "extraction_attempted": "enhanced_data_extraction" -# } - -# # Build comprehensive context for Claude -# context = build_comprehensive_context(extracted_data) - -# # Generate enhanced tech stack recommendations -# recommendations = await generate_enhanced_recommendations(context) - -# # NEW: Build complete response with functional requirements for architecture-designer -# complete_response = { -# "success": True, -# "enhanced_analysis": True, - -# # PROJECT CONTEXT - For Web Dashboard Display -# "project_context": { -# "project_name": extracted_data["project_name"], -# "project_type": extracted_data["project_type"], -# "features_analyzed": len(extracted_data["features"]), -# "business_questions_answered": len(extracted_data["business_answers"]), -# "complexity": extracted_data["complexity"] -# }, - -# # FUNCTIONAL REQUIREMENTS - For Web Dashboard Display & Architecture Designer -# "functional_requirements": { -# "feature_name": extracted_data["feature_name"], -# "description": extracted_data["description"], -# "technical_requirements": extracted_data["requirements"], -# "business_logic_rules": extracted_data["logic_rules"], -# "complexity_level": extracted_data["complexity"], -# "all_features": extracted_data["features"], -# "business_context": { -# "questions": extracted_data["business_questions"], -# "answers": extracted_data["business_answers"] -# } -# }, - -# # TECHNOLOGY RECOMMENDATIONS - Claude Generated -# "claude_recommendations": recommendations, - -# # COMPLETE DATA FOR ARCHITECTURE DESIGNER -# "architecture_designer_input": { -# "project_data": { -# "project_name": extracted_data["project_name"], -# "project_type": extracted_data["project_type"], -# "complexity": extracted_data["complexity"] -# }, -# "functional_specifications": { -# "primary_feature": { -# "name": extracted_data["feature_name"], -# "description": extracted_data["description"], -# "requirements": extracted_data["requirements"], -# "logic_rules": extracted_data["logic_rules"] -# }, -# "all_features": extracted_data["features"], -# "business_context": extracted_data["business_answers"] -# }, -# "technology_stack": recommendations, -# "business_requirements": context["business_context"] -# }, - -# "analysis_timestamp": datetime.utcnow().isoformat(), -# "ready_for_architecture_design": True -# } - -# logger.info(f"✅ Enhanced tech stack analysis completed with functional requirements") -# logger.info(f" Feature: {extracted_data['feature_name']}") -# logger.info(f" Requirements: {len(extracted_data['requirements'])}") -# logger.info(f" Logic Rules: {len(extracted_data['logic_rules'])}") -# logger.info(f" Business Answers: {len(extracted_data['business_answers'])}") - -# return complete_response - -# except Exception as e: -# logger.error(f"💥 ERROR in enhanced tech stack selection: {e}") -# return { -# "error": str(e), -# "debug": "Check service logs for detailed error information" -# } - -# def extract_enhanced_data(request_data: Dict) -> Dict: -# """Extract enhanced data from web dashboard request""" -# extracted = { -# "project_name": "Unknown Project", -# "project_type": "unknown", -# "feature_name": "", -# "description": "", -# "requirements": [], -# "complexity": "medium", -# "logic_rules": [], -# "business_questions": [], -# "business_answers": [], -# "features": [], -# "all_features": [] -# } - -# logger.info("🔍 Extracting enhanced data with functional requirements...") - -# # Path 1: Direct enhanced data format from web dashboard -# if isinstance(request_data, dict): -# # Extract main feature data -# extracted["feature_name"] = request_data.get("featureName", "") -# extracted["description"] = request_data.get("description", "") -# extracted["requirements"] = request_data.get("requirements", []) -# extracted["complexity"] = request_data.get("complexity", "medium") -# extracted["logic_rules"] = request_data.get("logicRules", []) -# extracted["business_questions"] = request_data.get("businessQuestions", []) -# extracted["business_answers"] = request_data.get("businessAnswers", []) -# extracted["project_name"] = request_data.get("projectName", "Unknown Project") -# extracted["project_type"] = request_data.get("projectType", "unknown") -# extracted["all_features"] = request_data.get("allFeatures", []) - -# # If we have business answers in object format, convert to list -# if isinstance(extracted["business_answers"], dict): -# ba_list = [] -# for key, value in extracted["business_answers"].items(): -# if isinstance(value, str) and value.strip(): -# question_idx = int(key) if key.isdigit() else 0 -# if question_idx < len(extracted["business_questions"]): -# ba_list.append({ -# "question": extracted["business_questions"][question_idx], -# "answer": value.strip() -# }) -# extracted["business_answers"] = ba_list - -# # Extract features list -# if extracted["feature_name"]: -# extracted["features"] = [extracted["feature_name"]] - -# # Add all features if available -# if extracted["all_features"]: -# feature_names = [] -# for feature in extracted["all_features"]: -# if isinstance(feature, dict): -# feature_names.append(feature.get("name", feature.get("featureName", ""))) -# else: -# feature_names.append(str(feature)) -# extracted["features"].extend([f for f in feature_names if f]) - -# logger.info(f"✅ Extracted enhanced data with functional requirements:") -# logger.info(f" Project: {extracted['project_name']} ({extracted['project_type']})") -# logger.info(f" Main feature: {extracted['feature_name']}") -# logger.info(f" Requirements: {len(extracted['requirements'])}") -# logger.info(f" Logic Rules: {len(extracted['logic_rules'])}") -# logger.info(f" Complexity: {extracted['complexity']}") -# logger.info(f" Business answers: {len(extracted['business_answers'])}") -# logger.info(f" Total features: {len(extracted['features'])}") - -# return extracted - -# def build_comprehensive_context(extracted_data: Dict) -> Dict: -# """Build comprehensive context for Claude analysis""" - -# # Combine all requirements and business insights -# functional_requirements = [] -# if extracted_data["feature_name"]: -# functional_requirements.append(f"Core Feature: {extracted_data['feature_name']}") - -# if extracted_data["requirements"]: -# functional_requirements.extend([f"• {req}" for req in extracted_data["requirements"]]) - -# if extracted_data["features"]: -# for feature in extracted_data["features"]: -# if feature and feature != extracted_data["feature_name"]: -# functional_requirements.append(f"• {feature}") - -# # Business context from answers -# business_context = {} -# if extracted_data["business_answers"]: -# for answer_data in extracted_data["business_answers"]: -# if isinstance(answer_data, dict): -# question = answer_data.get("question", "") -# answer = answer_data.get("answer", "") -# if question and answer: -# # Categorize business answers -# if any(keyword in question.lower() for keyword in ["user", "scale", "concurrent"]): -# business_context["scale_requirements"] = business_context.get("scale_requirements", []) -# business_context["scale_requirements"].append(f"{question}: {answer}") -# elif any(keyword in question.lower() for keyword in ["compliance", "security", "encryption"]): -# business_context["security_requirements"] = business_context.get("security_requirements", []) -# business_context["security_requirements"].append(f"{question}: {answer}") -# elif any(keyword in question.lower() for keyword in ["budget", "timeline", "timeline"]): -# business_context["project_constraints"] = business_context.get("project_constraints", []) -# business_context["project_constraints"].append(f"{question}: {answer}") -# else: -# business_context["other_requirements"] = business_context.get("other_requirements", []) -# business_context["other_requirements"].append(f"{question}: {answer}") - -# return { -# "project_name": extracted_data["project_name"], -# "project_type": extracted_data["project_type"], -# "complexity": extracted_data["complexity"], -# "functional_requirements": functional_requirements, -# "business_context": business_context, -# "logic_rules": extracted_data["logic_rules"] -# } - -# async def generate_enhanced_recommendations(context: Dict) -> Dict: -# """Generate enhanced tech stack recommendations using Claude with business context""" - -# if not enhanced_selector.claude_client: -# logger.error("❌ Claude client not available") -# return { -# "error": "Claude AI not available", -# "fallback": "Basic recommendations would go here" -# } - -# # Build comprehensive prompt with business context -# functional_reqs_text = "\n".join(context["functional_requirements"]) - -# business_context_text = "" -# for category, requirements in context["business_context"].items(): -# business_context_text += f"\n{category.replace('_', ' ').title()}:\n" -# business_context_text += "\n".join([f" - {req}" for req in requirements]) + "\n" - -# logic_rules_text = "\n".join([f" - {rule}" for rule in context["logic_rules"]]) - -# prompt = f"""You are a senior software architect. Analyze this comprehensive project context and recommend the optimal technology stack. - -# PROJECT CONTEXT: -# - Name: {context["project_name"]} -# - Type: {context["project_type"]} -# - Complexity: {context["complexity"]} - -# FUNCTIONAL REQUIREMENTS: -# {functional_reqs_text} - -# BUSINESS CONTEXT & CONSTRAINTS: -# {business_context_text} - -# BUSINESS LOGIC RULES: -# {logic_rules_text} - -# Based on this comprehensive analysis, provide detailed technology recommendations as a JSON object: - -# {{ -# "technology_recommendations": {{ -# "frontend": {{ -# "framework": "recommended framework", -# "libraries": ["lib1", "lib2", "lib3"], -# "reasoning": "detailed reasoning based on requirements and business context" -# }}, -# "backend": {{ -# "framework": "recommended backend framework", -# "language": "programming language", -# "libraries": ["lib1", "lib2", "lib3"], -# "reasoning": "detailed reasoning based on complexity and business needs" -# }}, -# "database": {{ -# "primary": "primary database choice", -# "secondary": ["cache", "search", "analytics"], -# "reasoning": "database choice based on data requirements and scale" -# }}, -# "infrastructure": {{ -# "cloud_provider": "recommended cloud provider", -# "orchestration": "container/orchestration choice", -# "services": ["service1", "service2", "service3"], -# "reasoning": "infrastructure reasoning based on scale and budget" -# }}, -# "security": {{ -# "authentication": "auth strategy", -# "authorization": "authorization approach", -# "data_protection": "data protection measures", -# "compliance": "compliance approach", -# "reasoning": "security reasoning based on business context" -# }}, -# "third_party_services": {{ -# "communication": "communication services", -# "monitoring": "monitoring solution", -# "payment": "payment processing", -# "other_services": ["service1", "service2"], -# "reasoning": "third-party service reasoning" -# }} -# }}, -# "implementation_strategy": {{ -# "architecture_pattern": "recommended architecture pattern", -# "development_phases": ["phase1", "phase2", "phase3"], -# "deployment_strategy": "deployment approach", -# "scalability_approach": "scalability strategy", -# "timeline_estimate": "development timeline estimate" -# }}, -# "business_alignment": {{ -# "addresses_scale_requirements": "how recommendations address scale needs", -# "addresses_security_requirements": "how recommendations address security needs", -# "addresses_budget_constraints": "how recommendations fit budget", -# "addresses_timeline_constraints": "how recommendations fit timeline", -# "compliance_considerations": "compliance alignment" -# }} -# }} - -# CRITICAL: Return ONLY valid JSON, no additional text. Base all recommendations on the provided functional requirements and business context.""" - -# try: -# logger.info("📞 Calling Claude for enhanced recommendations with functional requirements...") -# message = enhanced_selector.claude_client.messages.create( -# model="claude-3-5-sonnet-20241022", -# max_tokens=8000, -# temperature=0.1, -# messages=[{"role": "user", "content": prompt}] -# ) - -# claude_response = message.content[0].text.strip() -# logger.info("✅ Received Claude response for enhanced recommendations") - -# # Parse JSON response -# try: -# recommendations = json.loads(claude_response) -# logger.info("✅ Successfully parsed enhanced recommendations JSON") -# return recommendations -# except json.JSONDecodeError as e: -# logger.error(f"❌ JSON parse error: {e}") -# return { -# "parse_error": str(e), -# "raw_response": claude_response[:1000] + "..." if len(claude_response) > 1000 else claude_response -# } - -# except Exception as e: -# logger.error(f"❌ Claude API error: {e}") -# return { -# "error": str(e), -# "fallback": "Enhanced recommendations generation failed" -# } - -# if __name__ == "__main__": -# import uvicorn - -# logger.info("="*60) -# logger.info("🚀 ENHANCED TECH STACK SELECTOR v11.0 - FUNCTIONAL REQUIREMENTS AWARE") -# logger.info("="*60) -# logger.info("✅ Enhanced data extraction from web dashboard") -# logger.info("✅ Functional requirements display") -# logger.info("✅ Business context analysis") -# logger.info("✅ Complete data for architecture-designer") -# logger.info("✅ Comprehensive Claude recommendations") -# logger.info("="*60) - -# uvicorn.run("main:app", host="0.0.0.0", port=8002, log_level="info") - - - -# ENHANCED TECH STACK SELECTOR - SHOWS FUNCTIONAL REQUIREMENTS + TECH RECOMMENDATIONS -# Now includes requirement-processor data in output for architecture-designer -# ENHANCED: Added tagged rules support while preserving ALL working functionality - -import os -import sys -import json -from datetime import datetime -from typing import Dict, Any, Optional, List -from pydantic import BaseModel -from fastapi import FastAPI, HTTPException, Request -from fastapi.middleware.cors import CORSMiddleware -from loguru import logger - -# AI integration -try: - import anthropic - CLAUDE_AVAILABLE = True -except ImportError: - CLAUDE_AVAILABLE = False - -# Configure logging -logger.remove() -logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}") - -# API Key -CLAUDE_API_KEY = "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" - -if not os.getenv("CLAUDE_API_KEY") and CLAUDE_API_KEY: - os.environ["CLAUDE_API_KEY"] = CLAUDE_API_KEY - -# ================================================================================================ -# ENHANCED TECH STACK SELECTOR - WITH FUNCTIONAL REQUIREMENTS DISPLAY -# ================================================================================================ - -class EnhancedTechStackSelector: - """Enhanced selector that handles business context + functional requirements""" - - def __init__(self): - self.claude_client = anthropic.Anthropic(api_key=CLAUDE_API_KEY) if CLAUDE_AVAILABLE else None - logger.info("Enhanced Tech Stack Selector initialized") - -# ================================================================================================ -# FASTAPI APPLICATION -# ================================================================================================ - -app = FastAPI( - title="Enhanced Tech Stack Selector", - description="Enhanced tech stack recommendations with functional requirements display", - version="11.1.0" -) - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -# Initialize enhanced selector -enhanced_selector = EnhancedTechStackSelector() - -@app.get("/health") -async def health_check(): - """Health check""" - return { - "status": "healthy", - "service": "enhanced-tech-stack-selector", - "version": "11.1.0", - "approach": "functional_requirements_aware_recommendations", - "new_features": ["tagged_rules_support"] - } - -@app.post("/api/v1/select") -async def select_enhanced_tech_stack(request: Request): - """ENHANCED VERSION - Shows functional requirements + tech recommendations for architecture-designer""" - try: - request_data = await request.json() - - # Log exactly what we receive - logger.info("=== RECEIVED ENHANCED DATA START ===") - logger.info(json.dumps(request_data, indent=2, default=str)) - logger.info("=== RECEIVED ENHANCED DATA END ===") - - # Extract enhanced data components - ENHANCED with tagged rules - extracted_data = extract_enhanced_data(request_data) - - if not extracted_data["features"] and not extracted_data["feature_name"]: - logger.error("❌ NO FEATURES OR FEATURE DATA FOUND") - return { - "error": "No features or feature data found in request", - "received_data_keys": list(request_data.keys()) if isinstance(request_data, dict) else "not_dict", - "extraction_attempted": "enhanced_data_extraction" - } - - # Build comprehensive context for Claude - ENHANCED with tagged rules - context = build_comprehensive_context(extracted_data) - - # Generate enhanced tech stack recommendations - SAME working logic - recommendations = await generate_enhanced_recommendations(context) - - # NEW: Build complete response with functional requirements for architecture-designer - ENHANCED - complete_response = { - "success": True, - "enhanced_analysis": True, - - # PROJECT CONTEXT - For Web Dashboard Display - "project_context": { - "project_name": extracted_data["project_name"], - "project_type": extracted_data["project_type"], - "features_analyzed": len(extracted_data["features"]), - "business_questions_answered": len(extracted_data["business_answers"]), - "complexity": extracted_data["complexity"], - # NEW: Tagged rules info - "detailed_requirements_count": len(extracted_data.get("detailed_requirements", [])), - "total_tagged_rules": extracted_data.get("total_tagged_rules", 0) - }, - - # FUNCTIONAL REQUIREMENTS - For Web Dashboard Display & Architecture Designer - ENHANCED - "functional_requirements": { - "feature_name": extracted_data["feature_name"], - "description": extracted_data["description"], - "technical_requirements": extracted_data["requirements"], - "business_logic_rules": extracted_data["logic_rules"], - "complexity_level": extracted_data["complexity"], - "all_features": extracted_data["features"], - # NEW: Tagged rules data - "detailed_requirements": extracted_data.get("detailed_requirements", []), - "tagged_rules": extracted_data.get("tagged_rules", []), - "business_context": { - "questions": extracted_data["business_questions"], - "answers": extracted_data["business_answers"] - } - }, - - # TECHNOLOGY RECOMMENDATIONS - Claude Generated - SAME working logic - "claude_recommendations": recommendations, - - # COMPLETE DATA FOR ARCHITECTURE DESIGNER - ENHANCED - "architecture_designer_input": { - "project_data": { - "project_name": extracted_data["project_name"], - "project_type": extracted_data["project_type"], - "complexity": extracted_data["complexity"] - }, - "functional_specifications": { - "primary_feature": { - "name": extracted_data["feature_name"], - "description": extracted_data["description"], - "requirements": extracted_data["requirements"], - "logic_rules": extracted_data["logic_rules"] - }, - "all_features": extracted_data["features"], - # NEW: Tagged rules for architecture designer - "detailed_requirements": extracted_data.get("detailed_requirements", []), - "tagged_rules": extracted_data.get("tagged_rules", []), - "business_context": extracted_data["business_answers"] - }, - "technology_stack": recommendations, - "business_requirements": context["business_context"] - }, - - "analysis_timestamp": datetime.utcnow().isoformat(), - "ready_for_architecture_design": True - } - - logger.info(f"✅ Enhanced tech stack analysis completed with functional requirements") - logger.info(f" Feature: {extracted_data['feature_name']}") - logger.info(f" Requirements: {len(extracted_data['requirements'])}") - logger.info(f" Logic Rules: {len(extracted_data['logic_rules'])}") - logger.info(f" Business Answers: {len(extracted_data['business_answers'])}") - # NEW: Tagged rules logging - logger.info(f" Detailed Requirements: {len(extracted_data.get('detailed_requirements', []))}") - logger.info(f" Tagged Rules: {extracted_data.get('total_tagged_rules', 0)}") - - return complete_response - - except Exception as e: - logger.error(f"💥 ERROR in enhanced tech stack selection: {e}") - return { - "error": str(e), - "debug": "Check service logs for detailed error information" - } - -def extract_enhanced_data(request_data: Dict) -> Dict: - """Extract enhanced data from web dashboard request - ENHANCED with tagged rules support""" - extracted = { - "project_name": "Unknown Project", - "project_type": "unknown", - "feature_name": "", - "description": "", - "requirements": [], - "complexity": "medium", - "logic_rules": [], - "business_questions": [], - "business_answers": [], - "features": [], - "all_features": [], - # NEW: Tagged rules support - "detailed_requirements": [], - "tagged_rules": [], - "total_tagged_rules": 0 - } - - logger.info("🔍 Extracting enhanced data with functional requirements and tagged rules...") - - # Path 1: Direct enhanced data format from web dashboard - SAME working logic - if isinstance(request_data, dict): - # Extract main feature data - SAME as before - extracted["feature_name"] = request_data.get("featureName", "") - extracted["description"] = request_data.get("description", "") - extracted["requirements"] = request_data.get("requirements", []) - extracted["complexity"] = request_data.get("complexity", "medium") - extracted["logic_rules"] = request_data.get("logicRules", []) - extracted["business_questions"] = request_data.get("businessQuestions", []) - extracted["business_answers"] = request_data.get("businessAnswers", []) - extracted["project_name"] = request_data.get("projectName", "Unknown Project") - extracted["project_type"] = request_data.get("projectType", "unknown") - extracted["all_features"] = request_data.get("allFeatures", []) - - # If we have business answers in object format, convert to list - SAME as before - if isinstance(extracted["business_answers"], dict): - ba_list = [] - for key, value in extracted["business_answers"].items(): - if isinstance(value, str) and value.strip(): - question_idx = int(key) if key.isdigit() else 0 - if question_idx < len(extracted["business_questions"]): - ba_list.append({ - "question": extracted["business_questions"][question_idx], - "answer": value.strip() - }) - extracted["business_answers"] = ba_list - - # Extract features list - SAME as before - if extracted["feature_name"]: - extracted["features"] = [extracted["feature_name"]] - - # Add all features if available - ENHANCED with tagged rules extraction - if extracted["all_features"]: - feature_names = [] - for feature in extracted["all_features"]: - if isinstance(feature, dict): - feature_name = feature.get("name", feature.get("featureName", "")) - feature_names.append(feature_name) - - # NEW: Extract tagged rules from requirementAnalysis - requirement_analysis = feature.get("requirementAnalysis", []) - if requirement_analysis: - logger.info(f"📋 Found tagged rules for feature: {feature_name}") - - for req_analysis in requirement_analysis: - requirement_name = req_analysis.get("requirement", "Unknown Requirement") - requirement_rules = req_analysis.get("logicRules", []) - - # Create detailed requirement entry - detailed_req = { - "feature_name": feature_name, - "requirement_name": requirement_name, - "description": feature.get("description", ""), - "complexity": req_analysis.get("complexity", "medium"), - "rules": requirement_rules - } - extracted["detailed_requirements"].append(detailed_req) - - # Add tagged rules - for rule_idx, rule in enumerate(requirement_rules): - if rule and rule.strip(): - tagged_rule = { - "rule_id": f"R{rule_idx + 1}", - "rule_text": rule.strip(), - "feature_name": feature_name, - "requirement_name": requirement_name - } - extracted["tagged_rules"].append(tagged_rule) - extracted["total_tagged_rules"] += 1 - - # Fallback: Add regular logic rules to main logic_rules if no tagged rules - elif feature.get("logicRules"): - regular_rules = feature.get("logicRules", []) - extracted["logic_rules"].extend(regular_rules) - - else: - feature_names.append(str(feature)) - - extracted["features"].extend([f for f in feature_names if f]) - - logger.info(f"✅ Extracted enhanced data with functional requirements and tagged rules:") - logger.info(f" Project: {extracted['project_name']} ({extracted['project_type']})") - logger.info(f" Main feature: {extracted['feature_name']}") - logger.info(f" Requirements: {len(extracted['requirements'])}") - logger.info(f" Logic Rules: {len(extracted['logic_rules'])}") - logger.info(f" Complexity: {extracted['complexity']}") - logger.info(f" Business answers: {len(extracted['business_answers'])}") - logger.info(f" Total features: {len(extracted['features'])}") - # NEW: Tagged rules logging - logger.info(f" Detailed Requirements: {len(extracted['detailed_requirements'])}") - logger.info(f" Tagged Rules: {extracted['total_tagged_rules']}") - - return extracted - -def build_comprehensive_context(extracted_data: Dict) -> Dict: - """Build comprehensive context for Claude analysis - ENHANCED with tagged rules""" - - # Combine all requirements and business insights - SAME working logic - functional_requirements = [] - if extracted_data["feature_name"]: - functional_requirements.append(f"Core Feature: {extracted_data['feature_name']}") - - if extracted_data["requirements"]: - functional_requirements.extend([f"• {req}" for req in extracted_data["requirements"]]) - - if extracted_data["features"]: - for feature in extracted_data["features"]: - if feature and feature != extracted_data["feature_name"]: - functional_requirements.append(f"• {feature}") - - # NEW: Add detailed requirements with tagged rules to functional requirements - detailed_requirements_text = [] - for detailed_req in extracted_data.get("detailed_requirements", []): - req_text = f"📋 {detailed_req['feature_name']} → {detailed_req['requirement_name']}:" - for rule in detailed_req["rules"]: - req_text += f"\n - {rule}" - detailed_requirements_text.append(req_text) - - if detailed_requirements_text: - functional_requirements.extend(detailed_requirements_text) - - # Business context from answers - SAME working logic - business_context = {} - if extracted_data["business_answers"]: - for answer_data in extracted_data["business_answers"]: - if isinstance(answer_data, dict): - question = answer_data.get("question", "") - answer = answer_data.get("answer", "") - if question and answer: - # Categorize business answers - SAME logic - if any(keyword in question.lower() for keyword in ["user", "scale", "concurrent"]): - business_context["scale_requirements"] = business_context.get("scale_requirements", []) - business_context["scale_requirements"].append(f"{question}: {answer}") - elif any(keyword in question.lower() for keyword in ["compliance", "security", "encryption"]): - business_context["security_requirements"] = business_context.get("security_requirements", []) - business_context["security_requirements"].append(f"{question}: {answer}") - elif any(keyword in question.lower() for keyword in ["budget", "timeline", "timeline"]): - business_context["project_constraints"] = business_context.get("project_constraints", []) - business_context["project_constraints"].append(f"{question}: {answer}") - else: - business_context["other_requirements"] = business_context.get("other_requirements", []) - business_context["other_requirements"].append(f"{question}: {answer}") - - return { - "project_name": extracted_data["project_name"], - "project_type": extracted_data["project_type"], - "complexity": extracted_data["complexity"], - "functional_requirements": functional_requirements, - "business_context": business_context, - "logic_rules": extracted_data["logic_rules"], - # NEW: Include tagged rules data - "detailed_requirements": extracted_data.get("detailed_requirements", []), - "tagged_rules": extracted_data.get("tagged_rules", []) - } - -async def generate_enhanced_recommendations(context: Dict) -> Dict: - """Generate enhanced tech stack recommendations using Claude with business context - SAME working logic + tagged rules""" - - if not enhanced_selector.claude_client: - logger.error("❌ Claude client not available") - return { - "error": "Claude AI not available", - "fallback": "Basic recommendations would go here" - } - - # Build comprehensive prompt with business context - SAME working logic - functional_reqs_text = "\n".join(context["functional_requirements"]) - - business_context_text = "" - for category, requirements in context["business_context"].items(): - business_context_text += f"\n{category.replace('_', ' ').title()}:\n" - business_context_text += "\n".join([f" - {req}" for req in requirements]) + "\n" - - logic_rules_text = "\n".join([f" - {rule}" for rule in context["logic_rules"]]) - - # NEW: Add tagged rules info to prompt (only if tagged rules exist) - tagged_rules_text = "" - if context.get("tagged_rules"): - tagged_rules_text = f"\n\nDETAILED TAGGED RULES:\n" - for tagged_rule in context["tagged_rules"][:10]: # Limit to first 10 for prompt size - tagged_rules_text += f" {tagged_rule['rule_id']}: {tagged_rule['rule_text']} (Feature: {tagged_rule['feature_name']})\n" - if len(context["tagged_rules"]) > 10: - tagged_rules_text += f" ... and {len(context['tagged_rules']) - 10} more tagged rules\n" - - # SAME working prompt structure with minimal enhancement - prompt = f"""You are a senior software architect. Analyze this comprehensive project context and recommend the optimal technology stack. - -PROJECT CONTEXT: -- Name: {context["project_name"]} -- Type: {context["project_type"]} -- Complexity: {context["complexity"]} - -FUNCTIONAL REQUIREMENTS: -{functional_reqs_text} - -BUSINESS CONTEXT & CONSTRAINTS: -{business_context_text} - -BUSINESS LOGIC RULES: -{logic_rules_text} -{tagged_rules_text} - -Based on this comprehensive analysis, provide detailed technology recommendations as a JSON object: - -{{ - "technology_recommendations": {{ - "frontend": {{ - "framework": "recommended framework", - "libraries": ["lib1", "lib2", "lib3"], - "reasoning": "detailed reasoning based on requirements and business context" - }}, - "backend": {{ - "framework": "recommended backend framework", - "language": "programming language", - "libraries": ["lib1", "lib2", "lib3"], - "reasoning": "detailed reasoning based on complexity and business needs" - }}, - "database": {{ - "primary": "primary database choice", - "secondary": ["cache", "search", "analytics"], - "reasoning": "database choice based on data requirements and scale" - }}, - "infrastructure": {{ - "cloud_provider": "recommended cloud provider", - "orchestration": "container/orchestration choice", - "services": ["service1", "service2", "service3"], - "reasoning": "infrastructure reasoning based on scale and budget" - }}, - "security": {{ - "authentication": "auth strategy", - "authorization": "authorization approach", - "data_protection": "data protection measures", - "compliance": "compliance approach", - "reasoning": "security reasoning based on business context" - }}, - "third_party_services": {{ - "communication": "communication services", - "monitoring": "monitoring solution", - "payment": "payment processing", - "other_services": ["service1", "service2"], - "reasoning": "third-party service reasoning" - }} - }}, - "implementation_strategy": {{ - "architecture_pattern": "recommended architecture pattern", - "development_phases": ["phase1", "phase2", "phase3"], - "deployment_strategy": "deployment approach", - "scalability_approach": "scalability strategy", - "timeline_estimate": "development timeline estimate" - }}, - "business_alignment": {{ - "addresses_scale_requirements": "how recommendations address scale needs", - "addresses_security_requirements": "how recommendations address security needs", - "addresses_budget_constraints": "how recommendations fit budget", - "addresses_timeline_constraints": "how recommendations fit timeline", - "compliance_considerations": "compliance alignment" - }} -}} - -CRITICAL: Return ONLY valid JSON, no additional text. Base all recommendations on the provided functional requirements and business context.""" - - try: - logger.info("📞 Calling Claude for enhanced recommendations with functional requirements and tagged rules...") - message = enhanced_selector.claude_client.messages.create( - model="claude-3-5-sonnet-20241022", - max_tokens=8000, - temperature=0.1, - messages=[{"role": "user", "content": prompt}] - ) - - claude_response = message.content[0].text.strip() - logger.info("✅ Received Claude response for enhanced recommendations") - - # Parse JSON response - SAME working logic - try: - recommendations = json.loads(claude_response) - logger.info("✅ Successfully parsed enhanced recommendations JSON") - return recommendations - except json.JSONDecodeError as e: - logger.error(f"❌ JSON parse error: {e}") - return { - "parse_error": str(e), - "raw_response": claude_response[:1000] + "..." if len(claude_response) > 1000 else claude_response - } - - except Exception as e: - logger.error(f"❌ Claude API error: {e}") - return { - "error": str(e), - "fallback": "Enhanced recommendations generation failed" - } - -if __name__ == "__main__": - import uvicorn - - logger.info("="*60) - logger.info("🚀 ENHANCED TECH STACK SELECTOR v11.1 - FUNCTIONAL REQUIREMENTS + TAGGED RULES") - logger.info("="*60) - logger.info("✅ Enhanced data extraction from web dashboard") - logger.info("✅ Functional requirements display") - logger.info("✅ Business context analysis") - logger.info("✅ NEW: Tagged rules support") - logger.info("✅ Complete data for architecture-designer") - logger.info("✅ Comprehensive Claude recommendations") - logger.info("="*60) - - uvicorn.run("main:app", host="0.0.0.0", port=8002, log_level="info") \ No newline at end of file diff --git a/services/tech-stack-selector/src/main.py.backup b/services/tech-stack-selector/src/main.py.backup index 6c458f9..a24d8ce 100644 --- a/services/tech-stack-selector/src/main.py.backup +++ b/services/tech-stack-selector/src/main.py.backup @@ -1,6 +1,5 @@ -# WORKING TECH STACK SELECTOR - STRUCTURED JSON VERSION -# Simple, effective feature extraction and Claude analysis with structured JSON output -# NO complex logic, just works with n8n data +# ENHANCED TECH STACK SELECTOR - INTEGRATED VERSION WITH POSTGRESQL +# Combines FastAPI, Neo4j, PostgreSQL migration, and all endpoints into one file import os import sys @@ -11,43 +10,1159 @@ from pydantic import BaseModel from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from loguru import logger +import atexit +import anthropic -# AI integration +# Neo4j imports +from neo4j import GraphDatabase + +# PostgreSQL imports try: - import anthropic - CLAUDE_AVAILABLE = True + import psycopg2 + from psycopg2.extras import RealDictCursor + POSTGRES_AVAILABLE = True except ImportError: - CLAUDE_AVAILABLE = False - -# Configure logging -logger.remove() -logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}") - -# API Key -CLAUDE_API_KEY = "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" - -if not os.getenv("CLAUDE_API_KEY") and CLAUDE_API_KEY: - os.environ["CLAUDE_API_KEY"] = CLAUDE_API_KEY + POSTGRES_AVAILABLE = False # ================================================================================================ -# WORKING TECH STACK SELECTOR - SIMPLE AND EFFECTIVE +# NEO4J SERVICE CLASS # ================================================================================================ -class WorkingTechStackSelector: - """Simple selector that works with n8n data and Claude""" +class Neo4jService: + def __init__(self, uri, user, password): + self.driver = GraphDatabase.driver( + uri, + auth=(user, password), + connection_timeout=5 + ) + try: + self.driver.verify_connectivity() + except Exception: + pass - def __init__(self): - self.claude_client = anthropic.Anthropic(api_key=CLAUDE_API_KEY) if CLAUDE_AVAILABLE else None - logger.info("Working Tech Stack Selector initialized") + def close(self): + self.driver.close() + + def run_query(self, query: str, parameters: Optional[Dict[str, Any]] = None): + with self.driver.session() as session: + result = session.run(query, parameters or {}) + return [record.data() for record in result] + + def get_best_stack(self, domain: Optional[str], budget: Optional[int], preferred: Optional[List[str]]): + """Return top recommended tech stacks based on domain, budget, and preferred technologies.""" + query = """ + MATCH (s:TechStack) + WHERE ($domain IS NULL OR toLower(s.name) CONTAINS toLower($domain) OR + toLower(s.name) CONTAINS toLower(replace($domain, 'ecommerce', 'e-commerce')) OR + EXISTS { MATCH (s)-[:SUITABLE_FOR]->(d:Domain) WHERE toLower(d.name) CONTAINS toLower($domain) }) + AND ($budget IS NULL OR s.monthly_cost <= $budget) + WITH s, $preferred AS pref + OPTIONAL MATCH (s)-[:USES_FRONTEND]->(frontend:Technology) + OPTIONAL MATCH (s)-[:USES_BACKEND]->(backend:Technology) + OPTIONAL MATCH (s)-[:USES_DATABASE]->(database:Technology) + OPTIONAL MATCH (s)-[:USES_CLOUD]->(cloud:Technology) + OPTIONAL MATCH (s)-[:USES_TESTING]->(testing:Technology) + OPTIONAL MATCH (s)-[:USES_MOBILE]->(mobile:Technology) + OPTIONAL MATCH (s)-[:USES_DEVOPS]->(devops:Technology) + OPTIONAL MATCH (s)-[:USES_AI_ML]->(ai_ml:Technology) + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, pref, + (s.satisfaction_score * 0.4 + s.success_rate * 0.3 + + CASE WHEN $budget IS NOT NULL THEN (100 - (s.monthly_cost / $budget * 100)) * 0.3 ELSE 30 END) AS base_score + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, base_score, + CASE WHEN pref IS NOT NULL THEN + size([x IN pref WHERE + toLower(x) IN [toLower(frontend.name), toLower(backend.name), toLower(database.name), + toLower(cloud.name), toLower(testing.name), toLower(mobile.name), + toLower(devops.name), toLower(ai_ml.name)]]) * 5 + ELSE 0 END AS preference_bonus + RETURN s.name AS stack_name, + s.monthly_cost AS monthly_cost, + s.setup_cost AS setup_cost, + s.team_size_range AS team_size, + s.development_time_months AS development_time, + s.satisfaction_score AS satisfaction, + s.success_rate AS success_rate, + CASE WHEN frontend IS NOT NULL THEN frontend.name ELSE 'React' END AS frontend, + CASE WHEN backend IS NOT NULL THEN backend.name ELSE 'Node.js' END AS backend, + CASE WHEN database IS NOT NULL THEN database.name ELSE 'PostgreSQL' END AS database, + CASE WHEN cloud IS NOT NULL THEN cloud.name ELSE 'DigitalOcean' END AS cloud, + CASE WHEN testing IS NOT NULL THEN testing.name ELSE 'Jest' END AS testing, + CASE WHEN mobile IS NOT NULL THEN mobile.name ELSE 'React Native' END AS mobile, + CASE WHEN devops IS NOT NULL THEN devops.name ELSE 'GitHub Actions' END AS devops, + CASE WHEN ai_ml IS NOT NULL THEN ai_ml.name ELSE 'Hugging Face' END AS ai_ml, + base_score + preference_bonus AS recommendation_score + ORDER BY recommendation_score DESC, s.monthly_cost ASC + LIMIT 5 + """ + return self.run_query(query, {"domain": domain, "budget": budget, "preferred": preferred}) + + def get_price_performance(self): + query = """ + MATCH (t:Technology) + RETURN t.name AS technology, + coalesce(t.performance_rating,0) AS performance, + coalesce(t.maturity_score,0) AS maturity, + coalesce(t.performance_rating,0) * 10 AS estimated_monthly_cost, + round((coalesce(t.performance_rating,0) * 1.0) / (CASE WHEN coalesce(t.performance_rating,0) = 0 THEN 1 ELSE 10 END),2) AS price_performance_index + ORDER BY performance DESC, maturity DESC + LIMIT 10 + """ + return self.run_query(query, {}) + + # === Added: Queries from user spec === + def get_technology_ecosystem(self): + query = """ + MATCH (t1:Technology)-[r:COMPATIBLE_WITH|OPTIMIZED_FOR]-(t2:Technology) + RETURN t1.name as tech1, + t1.category as category1, + type(r) as relationship, + t2.name as tech2, + t2.category as category2, + r.score as compatibility_score, + r.reason as reason + ORDER BY compatibility_score DESC + """ + return self.run_query(query, {}) + + def get_stack_trends(self): + query = """ + MATCH (s:TechStack)-[:SUITABLE_FOR]->(d:Domain) + WITH d.name as domain, + collect(s) as stacks, + avg(s.satisfaction_score) as avg_satisfaction, + avg(s.monthly_cost) as avg_cost + UNWIND stacks as stack + MATCH (stack)-[:USES_FRONTEND|USES_BACKEND|USES_DATABASE|USES_CLOUD]->(t:Technology) + RETURN domain, + avg_satisfaction, + avg_cost, + collect(DISTINCT t.name) as popular_technologies, + count(DISTINCT stack) as stack_variations + ORDER BY avg_satisfaction DESC + """ + return self.run_query(query, {}) + + def validate_relationships(self): + query = """ + MATCH (s:TechStack)-[r]->(n) + RETURN type(r) as relationship_type, + labels(n) as target_labels, + count(*) as relationship_count + ORDER BY relationship_count DESC + """ + return self.run_query(query, {}) + + def validate_data_completeness(self): + query = """ + MATCH (s:TechStack) + RETURN s.name AS name, + exists((s)-[:BELONGS_TO_TIER]->()) as has_price_tier, + exists((s)-[:USES_FRONTEND]->()) as has_frontend, + exists((s)-[:USES_BACKEND]->()) as has_backend, + exists((s)-[:USES_DATABASE]->()) as has_database, + exists((s)-[:USES_CLOUD]->()) as has_cloud + """ + return self.run_query(query, {}) + + def validate_price_consistency(self): + # Get inconsistencies (stacks with costs outside their tier range) + inconsistencies_query = """ + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + WHERE NOT (s.monthly_cost >= p.min_price AND s.monthly_cost <= p.max_price) + RETURN s.name AS stack, + s.monthly_cost AS monthly_cost, + p.name AS price_tier, + p.min_price AS min_price, + p.max_price AS max_price + """ + inconsistencies = self.run_query(inconsistencies_query, {}) + + # Get summary statistics + summary_query = """ + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + RETURN count(s) AS total_stacks, + count(CASE WHEN s.monthly_cost >= p.min_price AND s.monthly_cost <= p.max_price THEN 1 END) AS consistent_stacks, + count(CASE WHEN NOT (s.monthly_cost >= p.min_price AND s.monthly_cost <= p.max_price) THEN 1 END) AS inconsistent_stacks + """ + summary = self.run_query(summary_query, {}) + + # Get all stacks with their price tier info for reference + all_stacks_query = """ + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + RETURN s.name AS stack, + s.monthly_cost AS monthly_cost, + p.name AS price_tier, + p.min_price AS min_price, + p.max_price AS max_price, + CASE WHEN s.monthly_cost >= p.min_price AND s.monthly_cost <= p.max_price THEN 'consistent' ELSE 'inconsistent' END AS status + ORDER BY s.monthly_cost + """ + all_stacks = self.run_query(all_stacks_query, {}) + + return { + "summary": summary[0] if summary else {"total_stacks": 0, "consistent_stacks": 0, "inconsistent_stacks": 0}, + "inconsistencies": inconsistencies, + "all_stacks": all_stacks, + "validation_passed": len(inconsistencies) == 0 + } + + def export_stacks_with_pricing(self): + query = """ + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + OPTIONAL MATCH (s)-[:USES_FRONTEND|USES_BACKEND|USES_DATABASE|USES_CLOUD]->(t:Technology) + RETURN s.name as stack_name, + s.monthly_cost as monthly_cost, + s.setup_cost as setup_cost, + s.team_size_range as team_size, + s.development_time_months as development_time, + s.satisfaction_score as satisfaction_score, + s.success_rate as success_rate, + p.name as price_tier, + s.suitable_domains as domains, + collect(DISTINCT {name: t.name, category: t.category, cost: t.monthly_cost}) as technologies + """ + return self.run_query(query, {}) + + def export_price_tiers(self): + query = """ + MATCH (p:PriceTier) + OPTIONAL MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p) + RETURN p.name as tier_name, + p.min_price as min_price, + p.max_price as max_price, + p.target_audience as audience, + p.description as description, + count(s) as stack_count, + collect(s.name) as available_stacks + """ + return self.run_query(query, {}) + + def apply_cql_script(self, file_path: str) -> Dict[str, Any]: + executed: int = 0 + failed: int = 0 + errors: List[Dict[str, str]] = [] + if not os.path.isfile(file_path): + raise FileNotFoundError(f"CQL file not found: {file_path}") + + try: + with open(file_path, "r", encoding="utf-8") as f: + raw = f.read() + + # Strip line comments and build statements by semicolons + lines = [] + for line in raw.splitlines(): + stripped = line.strip() + # Skip empty lines and comments + if not stripped or stripped.startswith("//") or stripped.startswith("--"): + continue + lines.append(line) + + merged = "\n".join(lines) + statements = [s.strip() for s in merged.split(";") if s.strip()] + + logger.info(f"📝 Processing {len(statements)} CQL statements from {file_path}") + + with self.driver.session() as session: + for i, stmt in enumerate(statements): + try: + if stmt.strip(): # Only execute non-empty statements + session.run(stmt) + executed += 1 + if executed % 10 == 0: # Log progress every 10 statements + logger.info(f"✅ Executed {executed} statements...") + except Exception as e: + failed += 1 + error_msg = str(e) + # Log the error but continue processing + logger.warning(f"⚠️ Statement {i+1} failed: {error_msg[:100]}...") + errors.append({ + "statement_number": i + 1, + "statement": stmt[:120] + ("..." if len(stmt) > 120 else ""), + "error": error_msg + }) + + logger.info(f"📊 CQL execution completed: {executed} successful, {failed} failed") + return {"executed": executed, "failed": failed, "errors": errors} + + except Exception as e: + logger.error(f"❌ Error reading or processing CQL file: {e}") + return {"executed": 0, "failed": 1, "errors": [{"error": str(e)}]} + + def recommend_by_budget(self, budget: float, domain: Optional[str], limit: int = 5): + """ + Recommend tech stacks based on EXACT budget constraint. + + Args: + budget: User's exact budget amount (monthly cost) + domain: Optional domain filter (e.g., 'E-commerce', 'SaaS') + limit: Maximum number of results to return + + Returns: + Tech stacks that cost <= budget, ordered by best value + """ + query = """ + MATCH (s:TechStack) + WHERE s.monthly_cost <= $budget + OPTIONAL MATCH (s)-[:SUITABLE_FOR]->(d:Domain) + WHERE ($domain IS NULL OR (d IS NOT NULL AND toLower(d.name) CONTAINS toLower($domain))) + OPTIONAL MATCH (s)-[:USES_FRONTEND]->(frontend:Technology) + OPTIONAL MATCH (s)-[:USES_BACKEND]->(backend:Technology) + OPTIONAL MATCH (s)-[:USES_DATABASE]->(database:Technology) + OPTIONAL MATCH (s)-[:USES_CLOUD]->(cloud:Technology) + OPTIONAL MATCH (s)-[:USES_TESTING]->(testing:Technology) + OPTIONAL MATCH (s)-[:USES_MOBILE]->(mobile:Technology) + OPTIONAL MATCH (s)-[:USES_DEVOPS]->(devops:Technology) + OPTIONAL MATCH (s)-[:USES_AI_ML]->(ai_ml:Technology) + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, + (s.satisfaction_score * 0.4 + s.success_rate * 0.3 + + (100 - (s.monthly_cost / $budget * 100)) * 0.3) AS recommendation_score + RETURN s.name AS stack_name, + s.monthly_cost AS monthly_cost, + s.setup_cost AS setup_cost, + s.team_size_range AS team_size, + s.development_time_months AS development_time, + s.satisfaction_score AS satisfaction, + s.success_rate AS success_rate, + COALESCE(frontend.name, 'React') AS frontend, + COALESCE(backend.name, 'Node.js') AS backend, + COALESCE(database.name, 'PostgreSQL') AS database, + COALESCE(cloud.name, 'DigitalOcean') AS cloud, + COALESCE(testing.name, 'Jest') AS testing, + COALESCE(mobile.name, 'React Native') AS mobile, + COALESCE(devops.name, 'GitHub Actions') AS devops, + COALESCE(ai_ml.name, 'Hugging Face') AS ai_ml, + recommendation_score, + s.monthly_cost / $budget AS budget_utilization + ORDER BY recommendation_score DESC, s.monthly_cost ASC + LIMIT $limit + """ + return self.run_query(query, { + "budget": float(budget), + "domain": domain, + "limit": limit + }) + + def recommend_by_cost_limits(self, monthly_cost: Optional[float], setup_cost: Optional[float], domain: Optional[str]): + """Recommend stacks that do not exceed the given monthly and setup cost limits.""" + query = """ + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + OPTIONAL MATCH (s)-[:SUITABLE_FOR]->(d:Domain) + WITH s, p, d + WHERE ($domain IS NULL OR toLower(d.name) CONTAINS toLower($domain)) + AND ($monthly_cost IS NULL OR s.monthly_cost <= $monthly_cost) + AND ($setup_cost IS NULL OR s.setup_cost <= $setup_cost) + OPTIONAL MATCH (s)-[:USES_FRONTEND]->(frontend:Technology) + OPTIONAL MATCH (s)-[:USES_BACKEND]->(backend:Technology) + OPTIONAL MATCH (s)-[:USES_DATABASE]->(database:Technology) + OPTIONAL MATCH (s)-[:USES_CLOUD]->(cloud:Technology) + OPTIONAL MATCH (s)-[:USES_TESTING]->(testing:Technology) + OPTIONAL MATCH (s)-[:USES_MOBILE]->(mobile:Technology) + OPTIONAL MATCH (s)-[:USES_DEVOPS]->(devops:Technology) + OPTIONAL MATCH (s)-[:USES_AI_ML]->(ai_ml:Technology) + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, + (s.satisfaction_score * 0.4 + s.success_rate * 0.3 + 30) AS recommendation_score + RETURN s.name AS stack_name, + s.monthly_cost AS monthly_cost, + s.setup_cost AS setup_cost, + s.team_size_range AS team_size, + s.development_time_months AS development_time, + s.satisfaction_score AS satisfaction, + s.success_rate AS success_rate, + COALESCE(frontend.name, 'React') AS frontend, + COALESCE(backend.name, 'Node.js') AS backend, + COALESCE(database.name, 'PostgreSQL') AS database, + COALESCE(cloud.name, 'DigitalOcean') AS cloud, + COALESCE(testing.name, 'Jest') AS testing, + COALESCE(mobile.name, 'React Native') AS mobile, + COALESCE(devops.name, 'GitHub Actions') AS devops, + COALESCE(ai_ml.name, 'Hugging Face') AS ai_ml, + recommendation_score + ORDER BY s.monthly_cost ASC, recommendation_score DESC + LIMIT 5 + """ + return self.run_query(query, { + "monthly_cost": None if monthly_cost is None else float(monthly_cost), + "setup_cost": None if setup_cost is None else float(setup_cost), + "domain": domain + }) + + def get_technology(self, tech_id): + with self.driver.session() as session: + result = session.run( + "MATCH (t:Technology {id: $tech_id}) RETURN t", + tech_id=tech_id + ) + return result.single()[0] if result else None + + def get_compatible_tech(self, tech_id): + with self.driver.session() as session: + result = session.run(""" + MATCH (t:Technology {id: $tech_id})-[r:COMPATIBLE_WITH]->(other:Technology) + RETURN other, r.compatibility_score as score + ORDER BY score DESC + """, + tech_id=tech_id + ) + return [{"tech": record["other"], "score": record["score"]} for record in result] + + def get_tech_by_requirements(self, requirements): + with self.driver.session() as session: + # Convert requirements to a list of strings if it's a dict + if isinstance(requirements, dict): + req_list = [] + for key, value in requirements.items(): + if isinstance(value, str): + req_list.append(value) + elif isinstance(value, list): + req_list.extend([str(v) for v in value]) + requirements = req_list + elif not isinstance(requirements, list): + requirements = [str(requirements)] + + result = session.run(""" + MATCH (t:Technology) + WHERE ANY(req IN $requirements + WHERE ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS toLower(req)) + OR any(strength IN t.strengths WHERE toLower(strength) CONTAINS toLower(req)) + OR toLower(t.name) CONTAINS toLower(req) + OR toLower(t.category) CONTAINS toLower(req) + OR (req = 'web-application' AND ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS 'web')) + OR (req = 'payment' AND ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS 'application')) + OR (req = 'security' AND ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS 'application')) + OR (req = 'reporting' AND ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS 'application')) + OR (req = 'platform' AND ANY(use_case IN t.primary_use_cases WHERE toLower(use_case) CONTAINS 'application'))) + RETURN t + ORDER BY t.maturity_score DESC + LIMIT 10 + """, + requirements=requirements + ) + return [record["t"] for record in result] + + def create_compatibility_relationships(self): + """Create compatibility relationships between technologies""" + with self.driver.session() as session: + # Create relationships between technologies based on compatibility + result = session.run(""" + MATCH (t1:Technology), (t2:Technology) + WHERE t1.id <> t2.id + AND ( + (t1.category = t2.category AND t1.type <> t2.type) OR + (t1.category = 'Frontend Framework' AND t2.category = 'Backend Framework') OR + (t1.category = 'Backend Framework' AND t2.category = 'Database') OR + (t1.category = 'Database' AND t2.category = 'Backend Framework') + ) + MERGE (t1)-[r:COMPATIBLE_WITH { + compatibility_score: CASE + WHEN t1.category = t2.category THEN 0.8 + WHEN (t1.category = 'Frontend Framework' AND t2.category = 'Backend Framework') THEN 0.9 + WHEN (t1.category = 'Backend Framework' AND t2.category = 'Database') THEN 0.9 + ELSE 0.7 + END, + integration_effort: CASE + WHEN t1.category = t2.category THEN 'Low' + WHEN (t1.category = 'Frontend Framework' AND t2.category = 'Backend Framework') THEN 'Medium' + WHEN (t1.category = 'Backend Framework' AND t2.category = 'Database') THEN 'Low' + ELSE 'High' + END, + notes: 'Auto-generated compatibility relationship' + }]->(t2) + RETURN count(r) as relationships_created + """) + return result.single()["relationships_created"] + + def get_all_technologies_with_relationships(self): + """Get all technologies with their relationships""" + with self.driver.session() as session: + result = session.run(""" + MATCH (t:Technology) + OPTIONAL MATCH (t)-[r:COMPATIBLE_WITH]->(other:Technology) + RETURN t, collect({ + target: other, + relationship: r + }) as relationships + """) + technologies = [] + for record in result: + tech = record["t"] + relationships = record["relationships"] + technologies.append({ + "technology": dict(tech), + "relationships": [rel for rel in relationships if rel["target"] is not None] + }) + return technologies + +# ================================================================================================ +# POSTGRESQL MIGRATION SERVICE +# ================================================================================================ + +class PostgreSQLMigrationService: + def __init__(self, + host="localhost", + port=5432, + user="pipeline_admin", + password="secure_pipeline_2024", + database="dev_pipeline"): + self.config = { + "host": host, + "port": port, + "user": user, + "password": password, + "database": database + } + self.connection = None + self.cursor = None + self.last_error: Optional[str] = None + + def is_open(self) -> bool: + try: + return ( + self.connection is not None and + getattr(self.connection, "closed", 1) == 0 and + self.cursor is not None and + not getattr(self.cursor, "closed", True) + ) + except Exception: + return False + + def connect(self): + if not POSTGRES_AVAILABLE: + raise Exception("PostgreSQL connector (psycopg2) not available") + + try: + # If already open, reuse + if self.is_open(): + self.last_error = None + return True + # Attempt fresh connection + self.connection = psycopg2.connect(**self.config) + self.cursor = self.connection.cursor(cursor_factory=RealDictCursor) + logger.info("Connected to PostgreSQL successfully") + self.last_error = None + return True + except Exception as e: + logger.error(f"Error connecting to PostgreSQL: {e}") + self.last_error = str(e) + return False + + def close(self): + try: + if self.cursor and not getattr(self.cursor, "closed", True): + self.cursor.close() + finally: + self.cursor = None + try: + if self.connection and getattr(self.connection, "closed", 1) == 0: + self.connection.close() + finally: + self.connection = None + + def create_tables_if_not_exist(self): + """Create tables if they don't exist""" + if not self.is_open(): + if not self.connect(): + return False + + try: + create_technologies_table = """ + CREATE TABLE IF NOT EXISTS technologies ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + category VARCHAR(100), + type VARCHAR(100), + maturity_score INTEGER DEFAULT 0, + learning_curve VARCHAR(50), + performance_rating INTEGER DEFAULT 0, + community_size VARCHAR(50), + cost_model VARCHAR(100), + primary_use_cases TEXT, + strengths TEXT[], + weaknesses TEXT[], + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + """ + + create_compatibility_table = """ + CREATE TABLE IF NOT EXISTS tech_compatibility ( + id SERIAL PRIMARY KEY, + tech_a_id INTEGER REFERENCES technologies(id), + tech_b_id INTEGER REFERENCES technologies(id), + compatibility_score DECIMAL(3,2) DEFAULT 0.0, + integration_effort VARCHAR(50), + notes TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + """ + + self.cursor.execute(create_technologies_table) + self.cursor.execute(create_compatibility_table) + self.connection.commit() + + logger.info("PostgreSQL tables created/verified successfully") + return True + + except Exception as e: + logger.error(f"Error creating PostgreSQL tables: {e}") + self.last_error = str(e) + return False + + def get_sample_data(self): + """Insert some sample data if tables are empty""" + try: + # Check if data exists + self.cursor.execute("SELECT COUNT(*) FROM technologies") + count = self.cursor.fetchone()['count'] + + if count == 0: + sample_technologies = [ + { + 'name': 'React', + 'category': 'Frontend Framework', + 'type': 'Library', + 'maturity_score': 9, + 'learning_curve': 'Medium', + 'performance_rating': 8, + 'community_size': 'Very Large', + 'cost_model': 'Open Source', + 'primary_use_cases': 'Single Page Applications, Component-based UIs', + 'strengths': ['Virtual DOM', 'Large ecosystem', 'Component reusability'], + 'weaknesses': ['Learning curve', 'Rapid changes', 'SEO challenges'] + }, + { + 'name': 'Node.js', + 'category': 'Backend Runtime', + 'type': 'Runtime Environment', + 'maturity_score': 9, + 'learning_curve': 'Medium', + 'performance_rating': 8, + 'community_size': 'Very Large', + 'cost_model': 'Open Source', + 'primary_use_cases': 'API development, Real-time applications, Microservices', + 'strengths': ['JavaScript everywhere', 'NPM ecosystem', 'Non-blocking I/O'], + 'weaknesses': ['Single-threaded', 'CPU-intensive tasks', 'Callback complexity'] + }, + { + 'name': 'PostgreSQL', + 'category': 'Database', + 'type': 'Relational Database', + 'maturity_score': 10, + 'learning_curve': 'Medium', + 'performance_rating': 9, + 'community_size': 'Large', + 'cost_model': 'Open Source', + 'primary_use_cases': 'ACID transactions, Complex queries, Data integrity', + 'strengths': ['ACID compliance', 'JSON support', 'Extensible', 'Full-text search'], + 'weaknesses': ['Memory usage', 'Complexity for simple apps'] + }, + { + 'name': 'FastAPI', + 'category': 'Backend Framework', + 'type': 'Web Framework', + 'maturity_score': 8, + 'learning_curve': 'Low', + 'performance_rating': 9, + 'community_size': 'Growing', + 'cost_model': 'Open Source', + 'primary_use_cases': 'REST APIs, GraphQL, Microservices', + 'strengths': ['Fast performance', 'Automatic docs', 'Type hints', 'Async support'], + 'weaknesses': ['Relatively new', 'Smaller ecosystem'] + } + ] + + for tech in sample_technologies: + insert_query = """ + INSERT INTO technologies (name, category, type, maturity_score, learning_curve, + performance_rating, community_size, cost_model, + primary_use_cases, strengths, weaknesses) + VALUES (%(name)s, %(category)s, %(type)s, %(maturity_score)s, %(learning_curve)s, + %(performance_rating)s, %(community_size)s, %(cost_model)s, + %(primary_use_cases)s, %(strengths)s, %(weaknesses)s) + """ + self.cursor.execute(insert_query, tech) + + self.connection.commit() + logger.info(f"Inserted {len(sample_technologies)} sample technologies") + + return True + + except Exception as e: + logger.error(f"Error inserting sample data: {e}") + self.last_error = str(e) + return False + + def migrate_to_neo4j(self, neo4j_service): + if not self.is_open(): + if not self.connect(): + return [] + + try: + # Migrate technologies + self.cursor.execute("SELECT * FROM technologies") + technologies = self.cursor.fetchall() + + with neo4j_service.driver.session() as session: + for tech in technologies: + # Convert RealDictRow to regular dict + tech_dict = dict(tech) + session.write_transaction(self._create_technology_node, tech_dict) + + logger.info(f"Migrated {len(technologies)} technologies to Neo4j") + return True + + except Exception as e: + logger.error(f"Error during migration: {e}") + self.last_error = str(e) + return False + + def _create_technology_node(self, tx, tech): + tx.run(""" + CREATE (:Technology { + id: $id, + name: $name, + category: $category, + type: $type, + maturity_score: $maturity_score, + learning_curve: $learning_curve, + performance_rating: $performance_rating, + community_size: $community_size, + cost_model: $cost_model, + primary_use_cases: $primary_use_cases, + strengths: $strengths, + weaknesses: $weaknesses + }) + """, **tech) + + def get_all_technologies(self): + """Get all technologies from PostgreSQL""" + if not self.connection: + if not self.connect(): + return [] + + try: + self.cursor.execute(""" + SELECT id, name, category, type, maturity_score, learning_curve, + performance_rating, community_size, cost_model, + primary_use_cases, strengths, weaknesses + FROM technologies + ORDER BY maturity_score DESC, name + """) + technologies = self.cursor.fetchall() + return [dict(tech) for tech in technologies] + except Exception as e: + logger.error(f"Error fetching technologies: {e}") + return [] + + def get_tools_by_price_tier(self, price_tier_id: int): + """Get tools filtered by price tier""" + if not self.connection: + if not self.connect(): + return [] + + try: + self.cursor.execute(""" + SELECT t.id, t.name, t.category, t.description, t.primary_use_cases, + t.popularity_score, t.monthly_cost_usd, t.setup_cost_usd, + t.license_cost_usd, t.training_cost_usd, t.total_cost_of_ownership_score, + t.price_performance_ratio, pt.tier_name + FROM tools t + LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id + WHERE t.price_tier_id = %s + ORDER BY t.monthly_cost_usd ASC, t.popularity_score DESC + """, (price_tier_id,)) + tools = self.cursor.fetchall() + return [dict(tool) for tool in tools] + except Exception as e: + logger.error(f"Error fetching tools by price tier: {e}") + return [] + + def get_tools_within_budget(self, max_monthly_cost: float, max_setup_cost: float): + """Get tools within specified budget constraints""" + if not self.connection: + if not self.connect(): + return [] + + try: + self.cursor.execute(""" + SELECT t.id, t.name, t.category, t.description, t.primary_use_cases, + t.popularity_score, t.monthly_cost_usd, t.setup_cost_usd, + t.license_cost_usd, t.training_cost_usd, t.total_cost_of_ownership_score, + t.price_performance_ratio, pt.tier_name + FROM tools t + LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id + WHERE t.monthly_cost_usd <= %s AND t.setup_cost_usd <= %s + ORDER BY t.monthly_cost_usd ASC, t.total_cost_of_ownership_score DESC + """, (max_monthly_cost, max_setup_cost)) + tools = self.cursor.fetchall() + return [dict(tool) for tool in tools] + except Exception as e: + logger.error(f"Error fetching tools within budget: {e}") + return [] + + def get_tools_by_category(self, category: str): + """Get tools by category with pricing information""" + if not self.connection: + if not self.connect(): + return [] + + try: + self.cursor.execute(""" + SELECT t.id, t.name, t.category, t.description, t.primary_use_cases, + t.popularity_score, t.monthly_cost_usd, t.setup_cost_usd, + t.license_cost_usd, t.training_cost_usd, t.total_cost_of_ownership_score, + t.price_performance_ratio, pt.tier_name + FROM tools t + LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id + WHERE t.category = %s + ORDER BY t.monthly_cost_usd ASC, t.popularity_score DESC + """, (category,)) + tools = self.cursor.fetchall() + return [dict(tool) for tool in tools] + except Exception as e: + logger.error(f"Error fetching tools by category: {e}") + return [] + + def get_all_tools(self): + """Get all tools with pricing information""" + if not self.connection: + if not self.connect(): + return [] + + try: + self.cursor.execute(""" + SELECT t.id, t.name, t.category, t.description, t.primary_use_cases, + t.popularity_score, t.monthly_cost_usd, t.setup_cost_usd, + t.license_cost_usd, t.training_cost_usd, t.total_cost_of_ownership_score, + t.price_performance_ratio, pt.tier_name + FROM tools t + LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id + ORDER BY t.monthly_cost_usd ASC, t.popularity_score DESC + """) + tools = self.cursor.fetchall() + return [dict(tool) for tool in tools] + except Exception as e: + logger.error(f"Error fetching all tools: {e}") + return [] + + def apply_migration(self, file_path: str): + """Apply SQL migration file""" + executed = 0 + failed = 0 + errors = [] + + if not os.path.isfile(file_path): + raise FileNotFoundError(f"Migration file not found: {file_path}") + + if not self.is_open(): + if not self.connect(): + raise Exception("Could not connect to PostgreSQL") + + try: + with open(file_path, "r", encoding="utf-8") as f: + raw = f.read() + + # Strip line comments + lines = [] + for line in raw.splitlines(): + stripped = line.strip() + if stripped.startswith("--") or stripped.startswith("//"): + continue + lines.append(line) + + merged = "\n".join(lines) + + # Split by semicolon but handle dollar-quoted strings + statements = [] + current_stmt = "" + in_dollar_quote = False + dollar_tag = "" + + i = 0 + while i < len(merged): + char = merged[i] + + if not in_dollar_quote: + if char == '$': + # Check for start of dollar-quoted string + j = i + 1 + while j < len(merged) and merged[j] != '$': + j += 1 + if j < len(merged): + dollar_tag = merged[i:j+1] + in_dollar_quote = True + current_stmt += char + elif char == ';': + # End of statement + if current_stmt.strip(): + statements.append(current_stmt.strip()) + current_stmt = "" + else: + current_stmt += char + else: + # Inside dollar-quoted string + current_stmt += char + if merged[i:i+len(dollar_tag)] == dollar_tag: + in_dollar_quote = False + dollar_tag = "" + + i += 1 + + # Add final statement if exists + if current_stmt.strip(): + statements.append(current_stmt.strip()) + + # Filter out empty statements + statements = [s for s in statements if s.strip()] + + for stmt in statements: + try: + self.cursor.execute(stmt) + self.connection.commit() + executed += 1 + except Exception as e: + failed += 1 + errors.append({"statement": stmt[:100] + "...", "error": str(e)}) + logger.error(f"Migration failed: {e}") + + return {"executed": executed, "failed": failed, "errors": errors} + + except Exception as e: + logger.error(f"Error applying migration: {e}") + return {"executed": executed, "failed": failed, "errors": errors} + + def recommend_stacks_by_budget(self, budget: float, domain: Optional[str] = None, limit: int = 5): + """ + Recommend tech stacks based on exact budget constraint using PostgreSQL. + + Args: + budget: User's exact monthly budget + domain: Optional domain filter + limit: Maximum number of results + + Returns: + List of tech stacks that fit within budget + """ + if not self.connect(): + return {"error": "Could not connect to PostgreSQL"} + + try: + # Query for stacks within budget + query = """ + SELECT + pbs.id, + pbs.stack_name, + pbs.total_monthly_cost_usd, + pbs.total_setup_cost_usd, + pbs.frontend_tech, + pbs.backend_tech, + pbs.database_tech, + pbs.cloud_tech, + pbs.testing_tech, + pbs.mobile_tech, + pbs.devops_tech, + pbs.ai_ml_tech, + pbs.team_size_range, + pbs.development_time_months, + pbs.maintenance_complexity, + pbs.scalability_ceiling, + pbs.recommended_domains, + pbs.success_rate_percentage, + pbs.user_satisfaction_score, + pbs.description, + pbs.pros, + pbs.cons, + pt.tier_name, + pt.target_audience, + (pbs.user_satisfaction_score * 0.4 + pbs.success_rate_percentage * 0.3 + + (100 - (pbs.total_monthly_cost_usd / %s * 100)) * 0.3) AS recommendation_score, + (pbs.total_monthly_cost_usd / %s) AS budget_utilization + FROM price_based_stacks pbs + JOIN price_tiers pt ON pbs.price_tier_id = pt.id + WHERE pbs.total_monthly_cost_usd <= %s + AND (%s IS NULL OR %s = ANY(pbs.recommended_domains)) + ORDER BY recommendation_score DESC, pbs.total_monthly_cost_usd ASC + LIMIT %s + """ + + self.cursor.execute(query, (budget, budget, budget, domain, domain, limit)) + stacks = self.cursor.fetchall() + + return { + "success": True, + "budget": budget, + "domain": domain, + "stacks_found": len(stacks), + "stacks": [dict(stack) for stack in stacks] + } + + except Exception as e: + logger.error(f"Error in budget recommendation: {e}") + return {"error": str(e)} + finally: + self.close() + + def calculate_custom_stack_cost(self, frontend: str, backend: str, database: str, cloud: str, + testing: str = None, mobile: str = None, devops: str = None, ai_ml: str = None): + """ + Calculate the cost of a custom tech stack by looking up individual technology costs. + + Args: + frontend, backend, database, cloud: Required technologies + testing, mobile, devops, ai_ml: Optional technologies + + Returns: + Dictionary with cost breakdown + """ + if not self.connect(): + return {"error": "Could not connect to PostgreSQL"} + + try: + # Get costs for all technologies + tech_categories = { + 'frontend': frontend, + 'backend': backend, + 'database': database, + 'cloud': cloud + } + + if testing: + tech_categories['testing'] = testing + if mobile: + tech_categories['mobile'] = mobile + if devops: + tech_categories['devops'] = devops + if ai_ml: + tech_categories['ai-ml'] = ai_ml + + total_monthly_cost = 0 + total_setup_cost = 0 + cost_breakdown = {} + + for category, tech_name in tech_categories.items(): + query = """ + SELECT + tech_name, + tech_category, + monthly_operational_cost_usd, + development_cost_usd + training_cost_usd as setup_cost, + total_cost_of_ownership_score, + price_performance_ratio + FROM tech_pricing + WHERE tech_name = %s AND tech_category = %s + """ + + self.cursor.execute(query, (tech_name, category)) + result = self.cursor.fetchone() + + if result: + monthly_cost = float(result['monthly_operational_cost_usd'] or 0) + setup_cost = float(result['setup_cost'] or 0) + + total_monthly_cost += monthly_cost + total_setup_cost += setup_cost + + cost_breakdown[tech_name] = { + 'category': category, + 'monthly_cost': monthly_cost, + 'setup_cost': setup_cost, + 'tco_score': result['total_cost_of_ownership_score'], + 'price_performance': result['price_performance_ratio'] + } + else: + # If technology not found, use default costs + default_monthly = 10 if category in ['frontend', 'backend'] else 20 + default_setup = 100 if category in ['frontend', 'backend'] else 200 + + total_monthly_cost += default_monthly + total_setup_cost += default_setup + + cost_breakdown[tech_name] = { + 'category': category, + 'monthly_cost': default_monthly, + 'setup_cost': default_setup, + 'tco_score': 70, + 'price_performance': 70, + 'note': 'Estimated cost - technology not found in database' + } + + return { + "success": True, + "total_monthly_cost": total_monthly_cost, + "total_setup_cost": total_setup_cost, + "cost_breakdown": cost_breakdown, + "technologies": list(tech_categories.values()) + } + + except Exception as e: + logger.error(f"Error calculating custom stack cost: {e}") + return {"error": str(e)} + finally: + self.close() + + def find_alternatives_within_budget(self, current_tech: str, tech_category: str, budget: float): + """ + Find alternative technologies within the same category that fit the budget. + + Args: + current_tech: Current technology name + tech_category: Technology category (frontend, backend, etc.) + budget: Maximum monthly cost + + Returns: + List of alternative technologies within budget + """ + if not self.connect(): + return {"error": "Could not connect to PostgreSQL"} + + try: + query = """ + SELECT + tech_name, + monthly_operational_cost_usd, + development_cost_usd + training_cost_usd as setup_cost, + total_cost_of_ownership_score, + price_performance_ratio + FROM tech_pricing + WHERE tech_category = %s + AND monthly_operational_cost_usd <= %s + AND tech_name != %s + ORDER BY price_performance_ratio DESC, monthly_operational_cost_usd ASC + """ + + self.cursor.execute(query, (tech_category, budget, current_tech)) + alternatives = self.cursor.fetchall() + + return { + "success": True, + "current_tech": current_tech, + "category": tech_category, + "budget": budget, + "alternatives": [dict(alt) for alt in alternatives] + } + + except Exception as e: + logger.error(f"Error finding alternatives: {e}") + return {"error": str(e)} + finally: + self.close() + +# ================================================================================================ +# ENHANCED TECH STACK SELECTOR +# ================================================================================================ + +class EnhancedTechStackSelector: + def __init__(self, api_key): + self.claude_client = anthropic.Anthropic(api_key=api_key) + logger.info("Enhanced Tech Stack Selector initialized") # ================================================================================================ # FASTAPI APPLICATION # ================================================================================================ app = FastAPI( - title="Working Tech Stack Selector", - description="Simple, effective tech stack recommendations with structured JSON output", - version="9.0.0" + title="Enhanced Tech Stack Selector - PostgreSQL Integrated", + description="Complete tech stack selector with Neo4j, PostgreSQL migration, and AI recommendations", + version="13.0.0" ) app.add_middleware( @@ -58,270 +1173,1241 @@ app.add_middleware( allow_headers=["*"], ) -# Initialize working selector -working_selector = WorkingTechStackSelector() +# ================================================================================================ +# CONFIGURATION +# ================================================================================================ + +logger.remove() +logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}") + +CLAUDE_API_KEY = "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" + +if not os.getenv("CLAUDE_API_KEY") and CLAUDE_API_KEY: + os.environ["CLAUDE_API_KEY"] = CLAUDE_API_KEY + +# Debug logging for API key +api_key = os.getenv("CLAUDE_API_KEY") or CLAUDE_API_KEY +logger.info(f"🔑 Claude API Key loaded: {api_key[:20]}..." if api_key else "❌ No Claude API Key found") + +# Initialize services +NEO4J_URI = os.getenv("NEO4J_URI", "bolt://localhost:7687") +NEO4J_USER = os.getenv("NEO4J_USER", "neo4j") +NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD", "password") + +neo4j_service = Neo4jService( + uri=NEO4J_URI, + user=NEO4J_USER, + password=NEO4J_PASSWORD +) + +# PostgreSQL configuration - using environment variables +postgres_migration_service = PostgreSQLMigrationService( + host=os.getenv("POSTGRES_HOST", "localhost"), + port=int(os.getenv("POSTGRES_PORT", "5432")), + user=os.getenv("POSTGRES_USER", "pipeline_admin"), + password=os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024"), + database=os.getenv("POSTGRES_DB", "dev_pipeline") +) + +enhanced_selector = EnhancedTechStackSelector(os.getenv("CLAUDE_API_KEY") or CLAUDE_API_KEY) + +# ================================================================================================ +# SHUTDOWN HANDLER +# ================================================================================================ + +@app.on_event("shutdown") +async def shutdown_event(): + neo4j_service.close() + postgres_migration_service.close() + +atexit.register(lambda: neo4j_service.close()) +atexit.register(lambda: postgres_migration_service.close()) + +# ================================================================================================ +# STARTUP EVENT +# ================================================================================================ + +@app.on_event("startup") +async def startup_event(): + """Initialize PostgreSQL tables and sample data on startup""" + try: + if postgres_migration_service.connect(): + postgres_migration_service.create_tables_if_not_exist() + postgres_migration_service.get_sample_data() + postgres_migration_service.close() + logger.info("✅ PostgreSQL initialization completed") + else: + logger.warning("⚠️ PostgreSQL connection failed during startup") + + # Automatic migration: PostgreSQL -> Neo4j, then apply Neo4j.cql + try: + if POSTGRES_AVAILABLE: + logger.info("🔁 Starting automatic migration Postgres -> Neo4j...") + # Ensure a fresh connection for migration + if postgres_migration_service.connect(): + migrated = postgres_migration_service.migrate_to_neo4j(neo4j_service) + postgres_migration_service.close() + logger.info(f"✅ Migration completed: {migrated}") + else: + logger.warning("⚠️ Skipping migration: PostgreSQL not connected") + + # Apply bundled Neo4j.cql if present + default_cql = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "Neo4j.cql")) + if os.path.isfile(default_cql): + logger.info(f"📥 Applying Neo4j CQL from {default_cql}...") + try: + result = neo4j_service.apply_cql_script(default_cql) + if result.get("failed", 0) > 0: + logger.warning(f"⚠️ CQL apply completed with {result['failed']} failures out of {result.get('executed', 0) + result.get('failed', 0)} statements") + # Log detailed errors if present (limit to first 5 errors to avoid spam) + for i, error in enumerate(result.get("errors", [])[:5]): + logger.error(f"❌ CQL Error {i+1}: {error.get('error', 'Unknown error')}") + if len(result.get("errors", [])) > 5: + logger.warning(f"⚠️ ... and {len(result.get('errors', [])) - 5} more errors (see logs above)") + else: + logger.info(f"✅ Neo4j CQL applied successfully: {result.get('executed', 0)} statements executed") + except Exception as cql_err: + logger.error(f"❌ Failed to apply CQL script: {cql_err}") + else: + logger.info("ℹ️ No bundled Neo4j.cql found; skipping graph schema apply") + + # Apply tools pricing migration + tools_migration = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "db", "003_tools_pricing_migration.sql")) + if os.path.isfile(tools_migration): + logger.info(f"📥 Applying tools pricing migration from {tools_migration}...") + try: + result = postgres_migration_service.apply_migration(tools_migration) + if result.get("failed", 0) > 0: + logger.warning(f"⚠️ Tools migration completed with {result['failed']} failures out of {result.get('executed', 0) + result.get('failed', 0)} statements") + for i, error in enumerate(result.get("errors", [])[:5]): + logger.error(f"❌ Tools Migration Error {i+1}: {error.get('error', 'Unknown error')}") + if len(result.get("errors", [])) > 5: + logger.warning(f"⚠️ ... and {len(result.get('errors', [])) - 5} more errors (see logs above)") + else: + logger.info(f"✅ Tools pricing migration applied successfully: {result.get('executed', 0)} statements executed") + except Exception as tools_err: + logger.error(f"❌ Failed to apply tools migration: {tools_err}") + else: + logger.info("ℹ️ No tools pricing migration found; skipping tools pricing setup") + except Exception as mig_err: + logger.error(f"❌ Automatic migration/apply-cql error: {mig_err}") + except Exception as e: + logger.error(f"❌ PostgreSQL startup error: {e}") + +# ================================================================================================ +# ENDPOINTS +# ================================================================================================ @app.get("/health") async def health_check(): - """Health check""" return { "status": "healthy", - "service": "working-tech-stack-selector", - "version": "9.0.0", - "approach": "structured_json_claude_analysis" + "service": "enhanced-tech-stack-selector-postgresql", + "version": "13.0.0", + "features": ["neo4j", "postgresql_migration", "claude_ai", "fastapi"] } +@app.get("/api/diagnostics") +async def diagnostics(): + diagnostics_result = { + "service": "enhanced-tech-stack-selector-postgresql", + "version": "13.0.0", + "timestamp": datetime.utcnow().isoformat(), + "checks": {} + } + + # Check Neo4j + neo4j_check = {"status": "unknown"} + try: + with neo4j_service.driver.session() as session: + result = session.run("MATCH (n) RETURN count(n) AS count") + node_count = result.single().get("count", 0) + neo4j_check.update({ + "status": "ok", + "node_count": int(node_count) + }) + except Exception as e: + neo4j_check.update({ + "status": "error", + "error": str(e) + }) + diagnostics_result["checks"]["neo4j"] = neo4j_check + + # Check Claude + claude_check = { + "status": "unknown", + "api_key_present": bool(os.getenv("CLAUDE_API_KEY")) + } + try: + client = enhanced_selector.claude_client + if client is None: + claude_check.update({ + "status": "error", + "error": "Claude client not initialized" + }) + else: + try: + # Simple test to check if client works + claude_check.update({ + "status": "ok", + "client_initialized": True + }) + except Exception as api_err: + claude_check.update({ + "status": "error", + "error": str(api_err) + }) + except Exception as e: + claude_check.update({ + "status": "error", + "error": str(e) + }) + diagnostics_result["checks"]["claude_anthropic"] = claude_check + + # Check PostgreSQL + postgres_check = {"status": "unknown"} + try: + if POSTGRES_AVAILABLE: + if postgres_migration_service.connect(): + # Test query + postgres_migration_service.cursor.execute("SELECT version()") + version = postgres_migration_service.cursor.fetchone() + postgres_migration_service.cursor.execute("SELECT COUNT(*) FROM technologies") + tech_count = postgres_migration_service.cursor.fetchone()['count'] + + postgres_check.update({ + "status": "ok", + "available": True, + "version": version[0] if version else "unknown", + "technologies_count": tech_count + }) + postgres_migration_service.close() + else: + postgres_check.update({"status": "error", "available": False}) + else: + postgres_check.update({"status": "not_available", "available": False}) + except Exception as e: + postgres_check.update({"status": "error", "error": str(e)}) + diagnostics_result["checks"]["postgresql"] = postgres_check + + return diagnostics_result + +@app.get("/api/postgres/technologies") +async def get_all_postgres_technologies(): + """Get all technologies from PostgreSQL""" + try: + technologies = postgres_migration_service.get_all_technologies() + return {"success": True, "data": technologies, "count": len(technologies)} + except Exception as e: + return {"success": False, "error": str(e)} + +@app.post("/api/postgres/init") +async def initialize_postgres_tables(): + """Initialize PostgreSQL tables and sample data""" + try: + if not postgres_migration_service.connect(): + return {"success": False, "error": "Could not connect to PostgreSQL"} + + tables_created = postgres_migration_service.create_tables_if_not_exist() + sample_data_inserted = postgres_migration_service.get_sample_data() + postgres_migration_service.close() + + return { + "success": True, + "tables_created": tables_created, + "sample_data_inserted": sample_data_inserted, + "message": "PostgreSQL initialization completed" + } + except Exception as e: + return {"success": False, "error": str(e)} + +@app.get("/api/neo4j/technologies") +async def get_all_technologies(): + try: + with neo4j_service.driver.session() as session: + result = session.run("MATCH (t:Technology) RETURN t") + technologies = [] + for record in result: + t = record["t"] + technologies.append({ + "id": t.get("id", f"tech_{t.get('name', 'unknown').lower().replace(' ', '_')}"), + "name": t.get("name", "Unknown Technology"), + "category": t.get("category", "unknown"), + "type": t.get("type") or t.get("framework_type") or t.get("language_base") or t.get("database_type") or t.get("service_type") or "general", + "maturity_score": t.get("maturity_score", 50), + "learning_curve": t.get("learning_curve", "medium"), + "performance_rating": t.get("performance_rating", 70), + "community_size": t.get("community_size", "medium"), + "cost_model": t.get("cost_model") or ("free" if t.get("monthly_cost", 0) == 0 else "paid"), + "primary_use_cases": t.get("primary_use_cases", ["General purpose"]), + "strengths": t.get("strengths", ["Good performance", "Active community"]), + "weaknesses": t.get("weaknesses", ["Learning curve", "Documentation could be better"]) + }) + return {"success": True, "data": technologies} + except Exception as e: + return {"success": False, "error": str(e)} + +@app.get("/api/neo4j/tech_compatibility") +async def get_tech_compatibility(): + try: + with neo4j_service.driver.session() as session: + query = """ + MATCH (a:Technology)-[r:COMPATIBLE_WITH|OPTIMIZED_FOR]->(b:Technology) + RETURN a.name AS tech_a_name, + b.name AS tech_b_name, + coalesce(r.score, r.compatibility_score) AS score, + coalesce(r.effort, r.integration_effort) AS effort, + coalesce(r.reason, r.notes) AS notes, + type(r) AS relationship + """ + result = session.run(query) + compatibilities = [record.data() for record in result] + return {"success": True, "data": compatibilities} + except Exception as e: + return {"success": False, "error": str(e)} + +@app.post("/api/migrate/postgres-to-neo4j") +async def migrate_postgres_to_neo4j(): + """Migrate data from PostgreSQL to Neo4j""" + try: + if not POSTGRES_AVAILABLE: + return {"success": False, "error": "PostgreSQL connector not available"} + + success = postgres_migration_service.migrate_to_neo4j(neo4j_service) + if success: + # Create relationships after migration + relationships_created = neo4j_service.create_compatibility_relationships() + return { + "success": True, + "message": "Migration from PostgreSQL to Neo4j completed successfully", + "relationships_created": relationships_created + } + else: + return {"success": False, "error": "Migration failed", "details": postgres_migration_service.last_error} + except Exception as e: + return {"success": False, "error": str(e)} + +@app.post("/api/neo4j/create-relationships") +async def create_neo4j_relationships(): + """Create compatibility relationships in Neo4j""" + try: + relationships_created = neo4j_service.create_compatibility_relationships() + return { + "success": True, + "message": f"Created {relationships_created} compatibility relationships", + "relationships_created": relationships_created + } + except Exception as e: + return {"success": False, "error": str(e)} + +@app.get("/api/neo4j/technologies-with-relationships") +async def get_technologies_with_relationships(): + """Get all technologies with their relationships""" + try: + technologies = neo4j_service.get_all_technologies_with_relationships() + return {"success": True, "data": technologies, "count": len(technologies)} + except Exception as e: + return {"success": False, "error": str(e)} + +@app.get("/api/test/neo4j") +async def test_neo4j_connection(): + try: + try: + neo4j_service.driver.verify_connectivity() + connectivity = "ok" + except Exception as conn_err: + connectivity = f"error: {conn_err}" + + with neo4j_service.driver.session() as session: + result = session.run("MATCH (t:Technology) RETURN count(t) as count") + single = result.single() + count = single["count"] if single else 0 + + sample_tech = [] + with neo4j_service.driver.session() as session: + result = session.run(""" + MATCH (t:Technology) + RETURN coalesce(t.name, 'Unknown') as name, coalesce(t.category, 'Unknown') as category + LIMIT 5 + """) + for record in result: + sample_tech.append(dict(record)) + + return { + "status": "success", + "neo4j_connection": connectivity, + "total_technologies": count, + "sample_technologies": sample_tech + } + + except Exception as e: + return { + "status": "error", + "message": str(e) + } + +class RecommendBestRequest(BaseModel): + domain: Optional[str] = None + budget: Optional[int] = None + preferredTechnologies: Optional[List[str]] = None + +class ApplyCQLRequest(BaseModel): + path: Optional[str] = None # defaults to bundled file + +class BudgetRecommendRequest(BaseModel): + budget: Optional[float] = None + monthly_cost: Optional[float] = None + setup_cost: Optional[float] = None + domain: Optional[str] = None + limit: Optional[int] = 5 # Maximum number of results to return + +class CustomStackCostRequest(BaseModel): + frontend: str + backend: str + database: str + cloud: str + testing: Optional[str] = None + mobile: Optional[str] = None + devops: Optional[str] = None + ai_ml: Optional[str] = None + +class AlternativeTechRequest(BaseModel): + current_tech: str + tech_category: str + budget: float + +@app.post("/recommend/best") +async def recommend_best(req: RecommendBestRequest): + try: + rows = neo4j_service.get_best_stack(req.domain, req.budget, req.preferredTechnologies) + return rows + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/analysis/price-performance") +async def analysis_price_performance(): + try: + rows = neo4j_service.get_price_performance() + return rows + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +# === Added: Routes for user's queries === +@app.get("/analysis/technology-ecosystem") +async def analysis_technology_ecosystem(): + try: + return neo4j_service.get_technology_ecosystem() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/analysis/stack-trends") +async def analysis_stack_trends(): + try: + return neo4j_service.get_stack_trends() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/validate/relationships") +async def validate_relationships(): + try: + return neo4j_service.validate_relationships() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/validate/completeness") +async def validate_completeness(): + try: + return neo4j_service.validate_data_completeness() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/validate/price-consistency") +async def validate_price_consistency(): + try: + return neo4j_service.validate_price_consistency() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/export/stacks-with-pricing") +async def export_stacks_with_pricing(): + try: + return neo4j_service.export_stacks_with_pricing() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/export/price-tiers") +async def export_price_tiers(): + try: + return neo4j_service.export_price_tiers() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/api/neo4j/apply-cql") +async def apply_cql(req: ApplyCQLRequest): + try: + default_path = os.path.join(os.path.dirname(__file__), "..", "Neo4j.cql") + default_path = os.path.abspath(default_path) + cql_path = req.path or default_path + result = neo4j_service.apply_cql_script(cql_path) + return {"success": result.get("failed", 0) == 0, **result, "path": cql_path} + except FileNotFoundError as nf: + raise HTTPException(status_code=404, detail=str(nf)) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/api/neo4j/query") +async def run_neo4j_query(req: dict): + """Run a custom Neo4j query""" + try: + query = req.get("query", "") + params = req.get("params", {}) + result = neo4j_service.run_query(query, params) + return {"success": True, "data": result} + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/recommend/budget") +async def recommend_by_budget(req: BudgetRecommendRequest): + try: + # New behavior: allow monthly_cost/setup_cost caps + if req.monthly_cost is not None or req.setup_cost is not None: + return neo4j_service.recommend_by_cost_limits( + monthly_cost=req.monthly_cost, + setup_cost=req.setup_cost, + domain=req.domain + ) + # Backward compatibility with original budget field + if req.budget is None: + raise HTTPException(status_code=400, detail="budget or monthly_cost/setup_cost is required") + return neo4j_service.recommend_by_budget(req.budget, req.domain, req.limit) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/api/v2/recommend/budget") +async def recommend_stacks_by_budget_v2(req: BudgetRecommendRequest): + """ + ROBUST BUDGET-BASED TECH STACK RECOMMENDATION + + This endpoint provides accurate budget-based recommendations using PostgreSQL. + It finds all stacks that cost <= user's budget, not just stacks from a price tier. + + Example: If user gives $15, it will return stacks costing $15 or less. + """ + try: + if req.budget is None: + raise HTTPException(status_code=400, detail="budget is required") + + if req.budget <= 0: + raise HTTPException(status_code=400, detail="budget must be greater than 0") + + result = postgres_migration_service.recommend_stacks_by_budget( + budget=req.budget, + domain=req.domain, + limit=req.limit + ) + + if "error" in result: + raise HTTPException(status_code=500, detail=result["error"]) + + return { + "success": True, + "message": f"Found {result['stacks_found']} tech stacks within ${req.budget} budget", + "data": result + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error in budget recommendation v2: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/api/v2/calculate/custom-stack") +async def calculate_custom_stack_cost(req: CustomStackCostRequest): + """ + Calculate the cost of a custom tech stack by looking up individual technology costs. + + This allows users to see the exact cost of their preferred technology combination. + """ + try: + result = postgres_migration_service.calculate_custom_stack_cost( + frontend=req.frontend, + backend=req.backend, + database=req.database, + cloud=req.cloud, + testing=req.testing, + mobile=req.mobile, + devops=req.devops, + ai_ml=req.ai_ml + ) + + if "error" in result: + raise HTTPException(status_code=500, detail=result["error"]) + + return { + "success": True, + "message": "Custom stack cost calculated successfully", + "data": result + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error calculating custom stack cost: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/api/v2/find/alternatives") +async def find_alternatives_within_budget(req: AlternativeTechRequest): + """ + Find alternative technologies within the same category that fit the budget. + + Useful when a user wants to replace a specific technology with a cheaper alternative. + """ + try: + result = postgres_migration_service.find_alternatives_within_budget( + current_tech=req.current_tech, + tech_category=req.tech_category, + budget=req.budget + ) + + if "error" in result: + raise HTTPException(status_code=500, detail=result["error"]) + + return { + "success": True, + "message": f"Found {len(result['alternatives'])} alternatives for {req.current_tech} within ${req.budget} budget", + "data": result + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error finding alternatives: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/v2/budget/analysis") +async def budget_analysis(budget: float, domain: Optional[str] = None): + """ + Get comprehensive budget analysis including: + - Available stacks within budget + - Budget utilization + - Cost breakdown by category + - Recommendations for optimization + """ + try: + if budget <= 0: + raise HTTPException(status_code=400, detail="budget must be greater than 0") + + # Get stacks within budget + stacks_result = postgres_migration_service.recommend_stacks_by_budget( + budget=budget, + domain=domain, + limit=10 + ) + + if "error" in stacks_result: + raise HTTPException(status_code=500, detail=stacks_result["error"]) + + # Analyze budget utilization + if stacks_result["stacks"]: + avg_cost = sum(float(stack["total_monthly_cost_usd"]) for stack in stacks_result["stacks"]) / len(stacks_result["stacks"]) + budget_utilization = (avg_cost / float(budget)) * 100 + + # Get cost breakdown by category + cost_breakdown = {} + for stack in stacks_result["stacks"]: + for tech in ["frontend_tech", "backend_tech", "database_tech", "cloud_tech"]: + tech_name = stack.get(tech) + if tech_name: + category = tech.replace("_tech", "") + if category not in cost_breakdown: + cost_breakdown[category] = [] + cost_breakdown[category].append(tech_name) + else: + avg_cost = 0 + budget_utilization = 0 + cost_breakdown = {} + + return { + "success": True, + "budget_analysis": { + "user_budget": budget, + "stacks_found": stacks_result["stacks_found"], + "average_stack_cost": round(avg_cost, 2), + "budget_utilization_percentage": round(budget_utilization, 2), + "cost_breakdown_by_category": cost_breakdown, + "recommendations": { + "budget_efficiency": "Excellent" if budget_utilization > 80 else "Good" if budget_utilization > 60 else "Consider increasing budget", + "savings_potential": f"${budget - avg_cost:.2f} per month" if avg_cost < budget else "No savings available", + "scaling_room": f"${budget * 0.2:.2f} available for scaling" if budget_utilization < 80 else "Limited scaling room" + } + }, + "stacks": stacks_result["stacks"] + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error in budget analysis: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/test/postgres") +async def test_postgres_connection(): + """Test PostgreSQL connection""" + try: + if not POSTGRES_AVAILABLE: + return { + "status": "error", + "message": "PostgreSQL connector (psycopg2) not available" + } + + if postgres_migration_service.connect(): + # Test basic query + postgres_migration_service.cursor.execute("SELECT version()") + version_info = postgres_migration_service.cursor.fetchone() + + postgres_migration_service.cursor.execute("SELECT COUNT(*) FROM technologies") + tech_count = postgres_migration_service.cursor.fetchone()['count'] + + # Get sample technologies + postgres_migration_service.cursor.execute(""" + SELECT name, category FROM technologies LIMIT 5 + """) + sample_tech = postgres_migration_service.cursor.fetchall() + + postgres_migration_service.close() + + return { + "status": "success", + "postgres_connection": "ok", + "version": version_info[0] if version_info else "unknown", + "total_technologies": tech_count, + "sample_technologies": [dict(tech) for tech in sample_tech] + } + else: + return { + "status": "error", + "message": "Could not connect to PostgreSQL" + } + + except Exception as e: + return { + "status": "error", + "message": str(e) + } + +@app.get("/api/tools") +async def get_tools(category: Optional[str] = None, price_tier_id: Optional[int] = None): + """Get all tools with optional filtering""" + try: + if category: + tools = postgres_migration_service.get_tools_by_category(category) + elif price_tier_id: + tools = postgres_migration_service.get_tools_by_price_tier(price_tier_id) + else: + tools = postgres_migration_service.get_all_tools() + return {"success": True, "data": tools, "count": len(tools)} + except Exception as e: + logger.error(f"Error fetching tools: {e}") + return {"success": False, "error": str(e)} + +@app.get("/api/tools/budget") +async def get_tools_within_budget(max_monthly_cost: float, max_setup_cost: float): + """Get tools within specified budget constraints""" + try: + tools = postgres_migration_service.get_tools_within_budget(max_monthly_cost, max_setup_cost) + return {"success": True, "data": tools, "count": len(tools)} + except Exception as e: + logger.error(f"Error fetching tools within budget: {e}") + return {"success": False, "error": str(e)} + +@app.get("/api/tools/categories") +async def get_tool_categories(): + """Get all tool categories""" + try: + if not postgres_migration_service.is_open(): + if not postgres_migration_service.connect(): + return {"success": False, "error": "Could not connect to PostgreSQL"} + + postgres_migration_service.cursor.execute("SELECT DISTINCT category FROM tools ORDER BY category") + categories = [row[0] for row in postgres_migration_service.cursor.fetchall()] + return {"success": True, "data": categories, "count": len(categories)} + except Exception as e: + logger.error(f"Error fetching tool categories: {e}") + return {"success": False, "error": str(e)} + +@app.get("/api/tools/price-tiers") +async def get_tools_by_price_tier(price_tier_id: int): + """Get tools by price tier""" + try: + tools = postgres_migration_service.get_tools_by_price_tier(price_tier_id) + return {"success": True, "data": tools, "count": len(tools)} + except Exception as e: + logger.error(f"Error fetching tools by price tier: {e}") + return {"success": False, "error": str(e)} + @app.post("/api/v1/select") -async def select_working_tech_stack(request: Request): - """STRUCTURED JSON VERSION - Claude returns structured JSON recommendations""" +async def select_enhanced_tech_stack(request: Request): try: request_data = await request.json() - # Log exactly what we receive - logger.info("=== RECEIVED DATA START ===") - logger.info(json.dumps(request_data, indent=2)) - logger.info("=== RECEIVED DATA END ===") + logger.info("=== RECEIVED ENHANCED DATA START ===") + logger.info(json.dumps(request_data, indent=2, default=str)) + logger.info("=== RECEIVED ENHANCED DATA END ===") - # Try EVERY possible path to find features - all_features = [] - project_name = "Unknown Project" - scale_info = {} + extracted_data = extract_enhanced_data(request_data) - # Path 1: request_data["data"]["all_features"] - if isinstance(request_data, dict) and "data" in request_data: - if isinstance(request_data["data"], dict) and "all_features" in request_data["data"]: - all_features = request_data["data"]["all_features"] - project_name = request_data["data"].get("project_name", "Unknown Project") - scale_info = request_data["data"].get("scale_information", {}) - logger.info(f"✅ Found features via path 1: {len(all_features)} features") - - # Path 2: request_data["all_features"] - if not all_features and isinstance(request_data, dict) and "all_features" in request_data: - all_features = request_data["all_features"] - project_name = request_data.get("project_name", "Unknown Project") - scale_info = request_data.get("scale_information", {}) - logger.info(f"✅ Found features via path 2: {len(all_features)} features") - - # Path 3: success wrapper format - if not all_features and isinstance(request_data, dict) and "success" in request_data and "data" in request_data: - data_section = request_data["data"] - if isinstance(data_section, dict) and "all_features" in data_section: - all_features = data_section["all_features"] - project_name = data_section.get("project_name", "Unknown Project") - scale_info = data_section.get("scale_information", {}) - logger.info(f"✅ Found features via path 3: {len(all_features)} features") - - # Path 4: Deep recursive search - if not all_features: - def find_all_features(obj, path="root"): - if isinstance(obj, dict): - if "all_features" in obj and isinstance(obj["all_features"], list): - logger.info(f"✅ Found all_features at path: {path}") - return obj["all_features"], obj.get("project_name", "Unknown Project"), obj.get("scale_information", {}) - for key, value in obj.items(): - result = find_all_features(value, f"{path}.{key}") - if result[0]: - return result - elif isinstance(obj, list): - for i, item in enumerate(obj): - result = find_all_features(item, f"{path}[{i}]") - if result[0]: - return result - return [], "Unknown Project", {} + # If no features found, try to extract from description + if not extracted_data["features"] and not extracted_data["feature_name"]: + logger.warning("⚠️ No features found, attempting to extract from description") - all_features, project_name, scale_info = find_all_features(request_data) - if all_features: - logger.info(f"✅ Found features via deep search: {len(all_features)} features") + # Try to extract features from description + description = extracted_data.get("description", "") + if description: + extracted_features = extract_features_from_description(description) + if extracted_features: + extracted_data["features"] = extracted_features + extracted_data["feature_name"] = extracted_features[0] if extracted_features else "" + logger.info(f"✅ Extracted {len(extracted_features)} features from description: {extracted_features}") + else: + # If still no features, create a generic feature from the project name + project_name = extracted_data.get("project_name", "Unknown Project") + extracted_data["features"] = [project_name] + extracted_data["feature_name"] = project_name + logger.info(f"✅ Created generic feature from project name: {project_name}") + else: + logger.error("❌ NO FEATURES OR FEATURE DATA FOUND") + return { + "error": "No features or feature data found in request", + "received_data_keys": list(request_data.keys()) if isinstance(request_data, dict) else "not_dict", + "extraction_attempted": "enhanced_data_extraction" + } - logger.info(f"🎯 FINAL RESULTS:") - logger.info(f" Features found: {len(all_features)}") - logger.info(f" Project name: {project_name}") - logger.info(f" Scale info: {scale_info}") - if all_features: - logger.info(f" First 10 features: {all_features[:10]}") + context = build_comprehensive_context(extracted_data) + claude_recommendations = await generate_enhanced_recommendations(context) - if not all_features: - logger.error("❌ NO FEATURES FOUND ANYWHERE") - return { - "error": "Still no features found after exhaustive search", - "received_data_keys": list(request_data.keys()) if isinstance(request_data, dict) else "not_dict", - "received_data_sample": str(request_data)[:500] + "..." if len(str(request_data)) > 500 else str(request_data) - } + # Neo4j recommendations - extract technical requirements from features and description + technical_requirements = [] - # SUCCESS - Call Claude with found features - logger.info(f"🚀 Calling Claude with {len(all_features)} features") + # Add features as requirements + if extracted_data["features"]: + technical_requirements.extend(extracted_data["features"]) - features_text = "\n".join([f"- {feature.replace('_', ' ').title()}" for feature in all_features]) + # Add description keywords + description = extracted_data.get("description", "") + if description: + # Extract technical keywords from description + tech_keywords = ["payment", "security", "reporting", "multi-currency", "web", "application", "platform", "transaction", "financial", "enterprise", "api", "database", "frontend", "backend"] + for keyword in tech_keywords: + if keyword.lower() in description.lower(): + technical_requirements.append(keyword) - # STRUCTURED JSON PROMPT - Claude returns structured JSON - prompt = f"""You are an expert software architect. Analyze these functional requirements and recommend the optimal technology stack. + # Add project type + if extracted_data.get("project_type"): + technical_requirements.append(extracted_data["project_type"]) + + recommendations_from_neo4j = [] + + try: + logger.info(f"🔍 Searching Neo4j with technical requirements: {technical_requirements}") + matching_tech = neo4j_service.get_tech_by_requirements(technical_requirements) + logger.info(f"📊 Found {len(matching_tech)} matching technologies from Neo4j") + for tech in matching_tech: + compatible_tech = neo4j_service.get_compatible_tech(tech["id"]) + recommendations_from_neo4j.append({ + "technology": dict(tech), + "compatible_technologies": [ + {"name": t["tech"]["name"], "score": t["score"]} + for t in compatible_tech + ] + }) + except Exception as neo_err: + logger.error(f"Neo4j integration failed: {neo_err}") + recommendations_from_neo4j = [{"error": str(neo_err)}] + + # PostgreSQL recommendations + postgres_recommendations = [] + try: + postgres_technologies = postgres_migration_service.get_all_technologies() + postgres_recommendations = postgres_technologies[:5] # Top 5 for demo + except Exception as pg_err: + logger.error(f"PostgreSQL integration failed: {pg_err}") + postgres_recommendations = [{"error": str(pg_err)}] + + complete_response = { + "success": True, + "enhanced_analysis": True, + + "project_context": { + "project_name": extracted_data["project_name"], + "project_type": extracted_data["project_type"], + "features_analyzed": len(extracted_data["features"]), + "business_questions_answered": len(extracted_data["business_answers"]), + "complexity": extracted_data["complexity"], + "detailed_requirements_count": len(extracted_data.get("detailed_requirements", [])), + "total_tagged_rules": extracted_data.get("total_tagged_rules", 0) + }, + + "functional_requirements": { + "feature_name": extracted_data["feature_name"], + "description": extracted_data["description"], + "technical_requirements": extracted_data["requirements"], + "business_logic_rules": extracted_data["logic_rules"], + "complexity_level": extracted_data["complexity"], + "all_features": extracted_data["features"], + "detailed_requirements": extracted_data.get("detailed_requirements", []), + "tagged_rules": extracted_data.get("tagged_rules", []), + "business_context": { + "questions": extracted_data["business_questions"], + "answers": extracted_data["business_answers"] + } + }, + + "claude_recommendations": claude_recommendations, + "neo4j_recommendations": recommendations_from_neo4j, + "postgres_recommendations": postgres_recommendations, + + "analysis_timestamp": datetime.utcnow().isoformat(), + "ready_for_architecture_design": True + } + + logger.info("✅ Enhanced tech stack analysis + Neo4j + PostgreSQL integration completed") + return complete_response + + except Exception as e: + logger.error(f"💥 ERROR in merged enhanced tech stack selection: {e}") + return { + "error": str(e), + "debug": "Check service logs for detailed error information" + } -PROJECT: {project_name} +# ================================================================================================ +# HELPER FUNCTIONS +# ================================================================================================ -FUNCTIONAL REQUIREMENTS TO IMPLEMENT ({len(all_features)} features): -{features_text} +def extract_features_from_description(description: str) -> List[str]: + """Extract features from project description using keyword matching""" + if not description: + return [] + + # Define feature keywords and their mappings + feature_keywords = { + "payment": ["payment", "pay", "transaction", "billing", "invoice", "checkout"], + "security": ["security", "secure", "authentication", "authorization", "encryption", "ssl", "https"], + "reporting": ["report", "reporting", "analytics", "dashboard", "metrics", "statistics"], + "multi-currency": ["multi-currency", "currency", "multi currency", "international", "forex"], + "user-management": ["user", "users", "profile", "account", "registration", "login"], + "api": ["api", "rest", "graphql", "endpoint", "service"], + "database": ["database", "data", "storage", "persistence"], + "frontend": ["frontend", "ui", "interface", "web", "mobile", "responsive"], + "backend": ["backend", "server", "service", "microservice"], + "real-time": ["real-time", "realtime", "live", "instant", "websocket"], + "notification": ["notification", "alert", "email", "sms", "push"], + "search": ["search", "filter", "query", "find"], + "file-upload": ["upload", "file", "document", "media", "image"], + "integration": ["integration", "connect", "sync", "import", "export"], + "workflow": ["workflow", "process", "automation", "pipeline"] + } + + extracted_features = [] + description_lower = description.lower() + + for feature, keywords in feature_keywords.items(): + if any(keyword in description_lower for keyword in keywords): + extracted_features.append(feature) + + return extracted_features -SCALE & CONTEXT: -{json.dumps(scale_info, indent=2) if scale_info else "Enterprise-scale application"} +def extract_enhanced_data(request_data: Dict) -> Dict: + extracted = { + "project_name": "Unknown Project", + "project_type": "unknown", + "feature_name": "", + "description": "", + "requirements": [], + "complexity": "medium", + "logic_rules": [], + "business_questions": [], + "business_answers": [], + "features": [], + "all_features": [], + "detailed_requirements": [], + "tagged_rules": [], + "total_tagged_rules": 0 + } + + if isinstance(request_data, dict): + extracted["feature_name"] = request_data.get("featureName", "") + extracted["description"] = request_data.get("description", "") + extracted["requirements"] = request_data.get("requirements", []) + extracted["complexity"] = request_data.get("complexity", "medium") + extracted["logic_rules"] = request_data.get("logicRules", []) + extracted["business_questions"] = request_data.get("businessQuestions", []) + extracted["business_answers"] = request_data.get("businessAnswers", []) + extracted["project_name"] = request_data.get("projectName", "Unknown Project") + extracted["project_type"] = request_data.get("projectType", "unknown") + extracted["all_features"] = request_data.get("allFeatures", []) + + if isinstance(extracted["business_answers"], dict): + ba_list = [] + for key, value in extracted["business_answers"].items(): + if isinstance(value, str) and value.strip(): + question_idx = int(key) if key.isdigit() else 0 + if question_idx < len(extracted["business_questions"]): + ba_list.append({ + "question": extracted["business_questions"][question_idx], + "answer": value.strip() + }) + extracted["business_answers"] = ba_list + + if extracted["feature_name"]: + extracted["features"] = [extracted["feature_name"]] + + if extracted["all_features"]: + feature_names = [] + for feature in extracted["all_features"]: + if isinstance(feature, dict): + feature_name = feature.get("name", feature.get("featureName", "")) + feature_names.append(feature_name) + + requirement_analysis = feature.get("requirementAnalysis", []) + if requirement_analysis: + for req_analysis in requirement_analysis: + requirement_name = req_analysis.get("requirement", "Unknown Requirement") + requirement_rules = req_analysis.get("logicRules", []) + + detailed_req = { + "feature_name": feature_name, + "requirement_name": requirement_name, + "description": feature.get("description", ""), + "complexity": req_analysis.get("complexity", "medium"), + "rules": requirement_rules + } + extracted["detailed_requirements"].append(detailed_req) + + for rule_idx, rule in enumerate(requirement_rules): + if rule and rule.strip(): + tagged_rule = { + "rule_id": f"R{rule_idx + 1}", + "rule_text": rule.strip(), + "feature_name": feature_name, + "requirement_name": requirement_name + } + extracted["tagged_rules"].append(tagged_rule) + extracted["total_tagged_rules"] += 1 + + elif feature.get("logicRules"): + regular_rules = feature.get("logicRules", []) + extracted["logic_rules"].extend(regular_rules) + + else: + feature_names.append(str(feature)) + + extracted["features"].extend([f for f in feature_names if f]) + + return extracted -CRITICAL: Return your response as a valid JSON object with this exact structure: +def build_comprehensive_context(extracted_data: Dict) -> Dict: + functional_requirements = [] + if extracted_data["feature_name"]: + functional_requirements.append(f"Core Feature: {extracted_data['feature_name']}") + + if extracted_data["requirements"]: + functional_requirements.extend([f"• {req}" for req in extracted_data["requirements"]]) + + if extracted_data["features"]: + for feature in extracted_data["features"]: + if feature and feature != extracted_data["feature_name"]: + functional_requirements.append(f"• {feature}") + + detailed_requirements_text = [] + for detailed_req in extracted_data.get("detailed_requirements", []): + req_text = f"📋 {detailed_req['feature_name']} → {detailed_req['requirement_name']}:" + for rule in detailed_req["rules"]: + req_text += f"\n - {rule}" + detailed_requirements_text.append(req_text) + + if detailed_requirements_text: + functional_requirements.extend(detailed_requirements_text) + + business_context = {} + if extracted_data["business_answers"]: + for answer_data in extracted_data["business_answers"]: + if isinstance(answer_data, dict): + question = answer_data.get("question", "") + answer = answer_data.get("answer", "") + if question and answer: + if any(keyword in question.lower() for keyword in ["user", "scale", "concurrent"]): + business_context["scale_requirements"] = business_context.get("scale_requirements", []) + business_context["scale_requirements"].append(f"{question}: {answer}") + elif any(keyword in question.lower() for keyword in ["compliance", "security", "encryption"]): + business_context["security_requirements"] = business_context.get("security_requirements", []) + business_context["security_requirements"].append(f"{question}: {answer}") + elif any(keyword in question.lower() for keyword in ["budget", "timeline"]): + business_context["project_constraints"] = business_context.get("project_constraints", []) + business_context["project_constraints"].append(f"{question}: {answer}") + else: + business_context["other_requirements"] = business_context.get("other_requirements", []) + business_context["other_requirements"].append(f"{question}: {answer}") + + return { + "project_name": extracted_data["project_name"], + "project_type": extracted_data["project_type"], + "complexity": extracted_data["complexity"], + "functional_requirements": functional_requirements, + "business_context": business_context, + "logic_rules": extracted_data["logic_rules"], + "detailed_requirements": extracted_data.get("detailed_requirements", []), + "tagged_rules": extracted_data.get("tagged_rules", []) + } + +async def generate_enhanced_recommendations(context: Dict) -> Dict: + if not enhanced_selector.claude_client: + logger.error("❌ Claude client not available") + return { + "error": "Claude AI not available", + "fallback": "Basic recommendations would go here" + } + + functional_reqs_text = "\n".join(context["functional_requirements"]) + + business_context_text = "" + for category, requirements in context["business_context"].items(): + business_context_text += f"\n{category.replace('_', ' ').title()}:\n" + business_context_text += "\n".join([f" - {req}" for req in requirements]) + "\n" + + logic_rules_text = "\n".join([f" - {rule}" for rule in context["logic_rules"]]) + + tagged_rules_text = "" + if context.get("tagged_rules"): + tagged_rules_text = f"\n\nDETAILED TAGGED RULES:\n" + for tagged_rule in context["tagged_rules"][:10]: + tagged_rules_text += f" {tagged_rule['rule_id']}: {tagged_rule['rule_text']} (Feature: {tagged_rule['feature_name']})\n" + if len(context["tagged_rules"]) > 10: + tagged_rules_text += f" ... and {len(context['tagged_rules']) - 10} more tagged rules\n" + + prompt = f"""You are a senior software architect. Analyze this comprehensive project context and recommend the optimal technology stack. + +PROJECT CONTEXT: +- Name: {context["project_name"]} +- Type: {context["project_type"]} +- Complexity: {context["complexity"]} + +FUNCTIONAL REQUIREMENTS: +{functional_reqs_text} + +BUSINESS CONTEXT & CONSTRAINTS: +{business_context_text} + +BUSINESS LOGIC RULES: +{logic_rules_text} +{tagged_rules_text} + +Based on this comprehensive analysis, provide detailed technology recommendations as a JSON object: {{ "technology_recommendations": {{ "frontend": {{ - "framework": "recommended framework with reasoning", - "libraries": ["library1", "library2", "library3"], - "reasoning": "detailed reasoning for frontend choices" + "framework": "recommended framework", + "libraries": ["lib1", "lib2", "lib3"], + "reasoning": "detailed reasoning based on requirements and business context" }}, "backend": {{ "framework": "recommended backend framework", "language": "programming language", - "libraries": ["library1", "library2", "library3"], - "reasoning": "detailed reasoning for backend choices" + "libraries": ["lib1", "lib2", "lib3"], + "reasoning": "detailed reasoning based on complexity and business needs" }}, "database": {{ - "primary": "primary database", + "primary": "primary database choice", "secondary": ["cache", "search", "analytics"], - "reasoning": "detailed reasoning for database choices" + "reasoning": "database choice based on data requirements and scale" }}, "infrastructure": {{ "cloud_provider": "recommended cloud provider", - "orchestration": "container orchestration", + "orchestration": "container/orchestration choice", "services": ["service1", "service2", "service3"], - "reasoning": "detailed reasoning for infrastructure choices" + "reasoning": "infrastructure reasoning based on scale and budget" }}, - "testing": {{ - "unit_testing": "unit testing framework", - "integration_testing": "integration testing tools", - "e2e_testing": "end-to-end testing framework", - "performance_testing": "performance testing tools", - "reasoning": "detailed reasoning for testing strategy" + "security": {{ + "authentication": "auth strategy", + "authorization": "authorization approach", + "data_protection": "data protection measures", + "compliance": "compliance approach", + "reasoning": "security reasoning based on business context" }}, "third_party_services": {{ - "authentication": "auth service recommendation", - "communication": "communication service", + "communication": "communication services", "monitoring": "monitoring solution", "payment": "payment processing", "other_services": ["service1", "service2"], - "reasoning": "detailed reasoning for third-party choices" + "reasoning": "third-party service reasoning" }} }}, "implementation_strategy": {{ "architecture_pattern": "recommended architecture pattern", "development_phases": ["phase1", "phase2", "phase3"], "deployment_strategy": "deployment approach", - "scalability_approach": "how to handle scale" + "scalability_approach": "scalability strategy", + "timeline_estimate": "development timeline estimate" }}, - "justification": {{ - "why_this_stack": "overall reasoning for this technology combination", - "scalability_benefits": "how this stack handles the scale requirements", - "team_benefits": "how this stack benefits a {scale_info.get('team_size', 'large')} team", - "compliance_considerations": "how this stack meets compliance requirements" + "business_alignment": {{ + "addresses_scale_requirements": "how recommendations address scale needs", + "addresses_security_requirements": "how recommendations address security needs", + "addresses_budget_constraints": "how recommendations fit budget", + "addresses_timeline_constraints": "how recommendations fit timeline", + "compliance_considerations": "compliance alignment" }} }} -IMPORTANT: -- Return ONLY valid JSON, no additional text -- Base all recommendations on the {len(all_features)} functional requirements provided -- Consider the scale: {scale_info.get('expected_users', 'enterprise scale')} users -- Ensure all technologies work together seamlessly -- Provide specific technology names, not generic descriptions""" +CRITICAL: Return ONLY valid JSON, no additional text. Base all recommendations on the provided functional requirements and business context.""" - # Call Claude - if working_selector.claude_client: - logger.info("📞 Calling Claude API for structured JSON response...") - message = working_selector.claude_client.messages.create( - model="claude-3-5-sonnet-20241022", - max_tokens=8000, - temperature=0.1, - messages=[{"role": "user", "content": prompt}] - ) - - claude_response = message.content[0].text - logger.info("✅ Successfully received Claude response") - - # Try to parse Claude's JSON response - try: - claude_json = json.loads(claude_response) - logger.info("✅ Successfully parsed Claude JSON response") - - return { - "success": True, - "features_analyzed": len(all_features), - "project_name": project_name, - "scale_context": scale_info, - "all_features": all_features, - "claude_recommendations": claude_json, - "analysis_timestamp": datetime.utcnow().isoformat() - } - - except json.JSONDecodeError as e: - logger.error(f"❌ Failed to parse Claude JSON: {e}") - # Fallback to text response - return { - "success": True, - "features_analyzed": len(all_features), - "project_name": project_name, - "scale_context": scale_info, - "all_features": all_features, - "claude_recommendations": claude_response, - "analysis_timestamp": datetime.utcnow().isoformat(), - "json_parse_error": str(e) - } - else: - logger.error("❌ Claude client not available") - return { - "error": "Claude AI not available", - "features_found": len(all_features), - "debug": "Check Claude API key configuration" - } + try: + logger.info("📞 Calling Claude for enhanced recommendations with functional requirements and tagged rules...") + message = enhanced_selector.claude_client.messages.create( + model="claude-3-5-sonnet-20241022", + max_tokens=8000, + temperature=0.1, + messages=[{"role": "user", "content": prompt}] + ) + claude_response = message.content[0].text.strip() + logger.info("✅ Received Claude response for enhanced recommendations") + + try: + recommendations = json.loads(claude_response) + logger.info("✅ Successfully parsed enhanced recommendations JSON") + return recommendations + except json.JSONDecodeError as e: + logger.error(f"❌ JSON parse error: {e}") + return { + "parse_error": str(e), + "raw_response": claude_response[:1000] + "..." if len(claude_response) > 1000 else claude_response + } + except Exception as e: - logger.error(f"💥 ERROR in tech stack selection: {e}") + logger.error(f"❌ Claude API error: {e}") return { "error": str(e), - "debug": "Check service logs for detailed error information" + "fallback": "Enhanced recommendations generation failed" } -@app.post("/api/v1/debug-n8n") -async def debug_n8n_data(request: Request): - """Debug endpoint to see exactly what n8n sends""" - try: - request_data = await request.json() - - # Extract data if present - if "data" in request_data: - data_section = request_data["data"] - all_features = data_section.get("all_features", []) - else: - data_section = request_data - all_features = request_data.get("all_features", []) - - return { - "raw_data_keys": list(request_data.keys()) if isinstance(request_data, dict) else "not_dict", - "data_section_keys": list(data_section.keys()) if isinstance(data_section, dict) else "not_dict", - "features_found": len(all_features), - "first_5_features": all_features[:5] if all_features else "none", - "data_structure": { - "has_success": "success" in request_data, - "has_data": "data" in request_data, - "has_all_features": "all_features" in data_section if isinstance(data_section, dict) else False - } - } - except Exception as e: - return {"error": str(e)} +# ================================================================================================ +# MAIN ENTRY POINT +# ================================================================================================ if __name__ == "__main__": import uvicorn logger.info("="*60) - logger.info("🚀 WORKING TECH STACK SELECTOR v9.0 - STRUCTURED JSON VERSION") + logger.info("🚀 ENHANCED TECH STACK SELECTOR v13.0 - POSTGRESQL INTEGRATED") logger.info("="*60) - logger.info("✅ Comprehensive logging enabled") - logger.info("✅ Multiple feature extraction paths") - logger.info("✅ Deep recursive search capability") - logger.info("✅ Claude integration with structured JSON output") - logger.info("✅ JSON parsing and validation") + logger.info("✅ FastAPI application") + logger.info("✅ Neo4j service integration") + logger.info("✅ PostgreSQL migration service") + logger.info("✅ Claude AI recommendations") + logger.info("✅ All endpoints integrated") + logger.info("✅ Enhanced data extraction and tagged rules") + logger.info("✅ PostgreSQL table initialization on startup") logger.info("="*60) uvicorn.run("main:app", host="0.0.0.0", port=8002, log_level="info") \ No newline at end of file diff --git a/services/tech-stack-selector/src/main_migrated.py b/services/tech-stack-selector/src/main_migrated.py new file mode 100644 index 0000000..ec5e096 --- /dev/null +++ b/services/tech-stack-selector/src/main_migrated.py @@ -0,0 +1,1030 @@ +# ================================================================================================ +# ENHANCED TECH STACK SELECTOR - MIGRATED VERSION +# Uses PostgreSQL data migrated to Neo4j with proper price-based relationships +# ================================================================================================ + +import os +import sys +import json +from datetime import datetime +from typing import Dict, Any, Optional, List +from pydantic import BaseModel +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from loguru import logger +import atexit +import anthropic +from neo4j import GraphDatabase +import psycopg2 +from psycopg2.extras import RealDictCursor + +# ================================================================================================ +# NEO4J SERVICE FOR MIGRATED DATA +# ================================================================================================ + +class MigratedNeo4jService: + def __init__(self, uri, user, password): + self.driver = GraphDatabase.driver( + uri, + auth=(user, password), + connection_timeout=5 + ) + try: + self.driver.verify_connectivity() + logger.info("✅ Migrated Neo4j Service connected successfully") + except Exception as e: + logger.error(f"❌ Neo4j connection failed: {e}") + + def close(self): + self.driver.close() + + def run_query(self, query: str, parameters: Optional[Dict[str, Any]] = None): + with self.driver.session() as session: + result = session.run(query, parameters or {}) + return [record.data() for record in result] + + def get_recommendations_by_budget(self, budget: float, domain: Optional[str] = None, preferred_techs: Optional[List[str]] = None): + """Get recommendations based on budget using migrated data""" + # Normalize domain for better matching + normalized_domain = domain.lower().strip() if domain else None + + # Create domain mapping for better matching + domain_mapping = { + 'web development': ['portfolio', 'blog', 'website', 'landing', 'documentation', 'personal', 'small', 'learning', 'prototype', 'startup', 'mvp', 'api', 'e-commerce', 'online', 'marketplace', 'retail'], + 'ecommerce': ['e-commerce', 'online', 'marketplace', 'retail', 'store', 'shop'], + 'portfolio': ['portfolio', 'personal', 'blog', 'website'], + 'blog': ['blog', 'content', 'writing', 'documentation'], + 'startup': ['startup', 'mvp', 'prototype', 'small', 'business'], + 'api': ['api', 'backend', 'service', 'microservice'], + 'mobile': ['mobile', 'app', 'ios', 'android', 'react native', 'flutter'], + 'ai': ['ai', 'ml', 'machine learning', 'artificial intelligence', 'data', 'analytics'], + 'gaming': ['game', 'gaming', 'unity', 'unreal'], + 'healthcare': ['healthcare', 'medical', 'health', 'patient', 'clinic'], + 'finance': ['finance', 'fintech', 'banking', 'payment', 'financial'], + 'education': ['education', 'learning', 'course', 'training', 'elearning'] + } + + # Get related domain keywords + related_keywords = [] + if normalized_domain: + for key, keywords in domain_mapping.items(): + if any(keyword in normalized_domain for keyword in [key] + keywords): + related_keywords.extend(keywords) + break + # If no mapping found, use the original domain + if not related_keywords: + related_keywords = [normalized_domain] + + # First try to get existing tech stacks with domain filtering + existing_stacks = self.run_query(""" + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + WHERE s.monthly_cost <= $budget + AND ($domain IS NULL OR + toLower(s.name) CONTAINS $normalized_domain OR + toLower(s.description) CONTAINS $normalized_domain OR + EXISTS { MATCH (d:Domain)-[:RECOMMENDS]->(s) WHERE toLower(d.name) = $normalized_domain } OR + EXISTS { MATCH (d:Domain)-[:RECOMMENDS]->(s) WHERE toLower(d.name) CONTAINS $normalized_domain } OR + (s.recommended_domains IS NOT NULL AND ANY(rd IN s.recommended_domains WHERE + ANY(keyword IN $related_keywords WHERE toLower(rd) CONTAINS keyword)))) + + OPTIONAL MATCH (s)-[:USES_FRONTEND]->(frontend:Technology) + OPTIONAL MATCH (s)-[:USES_BACKEND]->(backend:Technology) + OPTIONAL MATCH (s)-[:USES_DATABASE]->(database:Technology) + OPTIONAL MATCH (s)-[:USES_CLOUD]->(cloud:Technology) + OPTIONAL MATCH (s)-[:USES_TESTING]->(testing:Technology) + OPTIONAL MATCH (s)-[:USES_MOBILE]->(mobile:Technology) + OPTIONAL MATCH (s)-[:USES_DEVOPS]->(devops:Technology) + OPTIONAL MATCH (s)-[:USES_AI_ML]->(ai_ml:Technology) + + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, p, + (s.satisfaction_score * 0.4 + s.success_rate * 0.3 + + CASE WHEN $budget IS NOT NULL THEN (100 - (s.monthly_cost / $budget * 100)) * 0.3 ELSE 30 END) AS base_score + + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, base_score, p, + CASE WHEN $preferred_techs IS NOT NULL THEN + size([x IN $preferred_techs WHERE + toLower(x) IN [toLower(frontend.name), toLower(backend.name), toLower(database.name), + toLower(cloud.name), toLower(testing.name), toLower(mobile.name), + toLower(devops.name), toLower(ai_ml.name)]]) * 5 + ELSE 0 END AS preference_bonus + + RETURN s.name AS stack_name, + s.monthly_cost AS monthly_cost, + s.setup_cost AS setup_cost, + s.team_size_range AS team_size, + s.development_time_months AS development_time, + s.satisfaction_score AS satisfaction, + s.success_rate AS success_rate, + s.price_tier AS price_tier, + s.recommended_domains AS recommended_domains, + s.description AS description, + s.pros AS pros, + s.cons AS cons, + COALESCE(frontend.name, s.frontend_tech, 'Not specified') AS frontend, + COALESCE(backend.name, s.backend_tech, 'Not specified') AS backend, + COALESCE(database.name, s.database_tech, 'Not specified') AS database, + COALESCE(cloud.name, s.cloud_tech, 'Not specified') AS cloud, + COALESCE(testing.name, s.testing_tech, 'Not specified') AS testing, + COALESCE(mobile.name, s.mobile_tech, 'Not specified') AS mobile, + COALESCE(devops.name, s.devops_tech, 'Not specified') AS devops, + COALESCE(ai_ml.name, s.ai_ml_tech, 'Not specified') AS ai_ml, + base_score + preference_bonus AS recommendation_score + ORDER BY recommendation_score DESC, s.monthly_cost ASC + LIMIT 10 + """, { + "budget": budget, + "domain": domain, + "normalized_domain": normalized_domain, + "related_keywords": related_keywords, + "preferred_techs": preferred_techs or [] + }) + + logger.info(f"🔍 Found {len(existing_stacks)} existing stacks from Neo4j with domain filtering") + + if existing_stacks: + logger.info("✅ Using existing Neo4j stacks") + return existing_stacks + + # If no domain-specific stacks found, try without domain filtering + logger.info("🔍 No domain-specific stacks found, trying without domain filter...") + existing_stacks_no_domain = self.run_query(""" + MATCH (s:TechStack)-[:BELONGS_TO_TIER]->(p:PriceTier) + WHERE s.monthly_cost <= $budget + + OPTIONAL MATCH (s)-[:USES_FRONTEND]->(frontend:Technology) + OPTIONAL MATCH (s)-[:USES_BACKEND]->(backend:Technology) + OPTIONAL MATCH (s)-[:USES_DATABASE]->(database:Technology) + OPTIONAL MATCH (s)-[:USES_CLOUD]->(cloud:Technology) + OPTIONAL MATCH (s)-[:USES_TESTING]->(testing:Technology) + OPTIONAL MATCH (s)-[:USES_MOBILE]->(mobile:Technology) + OPTIONAL MATCH (s)-[:USES_DEVOPS]->(devops:Technology) + OPTIONAL MATCH (s)-[:USES_AI_ML]->(ai_ml:Technology) + + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, p, + (s.satisfaction_score * 0.4 + s.success_rate * 0.3 + + CASE WHEN $budget IS NOT NULL THEN (100 - (s.monthly_cost / $budget * 100)) * 0.3 ELSE 30 END) AS base_score + + WITH s, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, base_score, p, + CASE WHEN $preferred_techs IS NOT NULL THEN + size([x IN $preferred_techs WHERE + toLower(x) IN [toLower(frontend.name), toLower(backend.name), toLower(database.name), + toLower(cloud.name), toLower(testing.name), toLower(mobile.name), + toLower(devops.name), toLower(ai_ml.name)]]) * 5 + ELSE 0 END AS preference_bonus + + RETURN s.name AS stack_name, + s.monthly_cost AS monthly_cost, + s.setup_cost AS setup_cost, + s.team_size_range AS team_size, + s.development_time_months AS development_time, + s.satisfaction_score AS satisfaction, + s.success_rate AS success_rate, + s.price_tier AS price_tier, + s.recommended_domains AS recommended_domains, + s.description AS description, + s.pros AS pros, + s.cons AS cons, + COALESCE(frontend.name, s.frontend_tech, 'Not specified') AS frontend, + COALESCE(backend.name, s.backend_tech, 'Not specified') AS backend, + COALESCE(database.name, s.database_tech, 'Not specified') AS database, + COALESCE(cloud.name, s.cloud_tech, 'Not specified') AS cloud, + COALESCE(testing.name, s.testing_tech, 'Not specified') AS testing, + COALESCE(mobile.name, s.mobile_tech, 'Not specified') AS mobile, + COALESCE(devops.name, s.devops_tech, 'Not specified') AS devops, + COALESCE(ai_ml.name, s.ai_ml_tech, 'Not specified') AS ai_ml, + base_score + preference_bonus AS recommendation_score + ORDER BY recommendation_score DESC, s.monthly_cost ASC + LIMIT 10 + """, { + "budget": budget, + "preferred_techs": preferred_techs or [] + }) + + logger.info(f"🔍 Found {len(existing_stacks_no_domain)} existing stacks from Neo4j without domain filtering") + + if existing_stacks_no_domain: + logger.info("✅ Using existing Neo4j stacks (no domain filter)") + return existing_stacks_no_domain + + # If no existing stacks, try Claude AI for intelligent recommendations + logger.info("🤖 No existing stacks found, trying Claude AI...") + claude_recommendations = self.get_claude_ai_recommendations(budget, domain, preferred_techs) + if claude_recommendations: + logger.info(f"✅ Generated {len(claude_recommendations)} Claude AI recommendations") + return claude_recommendations + + # Final fallback to dynamic recommendations using tools and technologies + logger.info("⚠️ Claude AI failed, falling back to dynamic recommendations") + return self.get_dynamic_recommendations(budget, domain, preferred_techs) + + def get_dynamic_recommendations(self, budget: float, domain: Optional[str] = None, preferred_techs: Optional[List[str]] = None): + """Create dynamic recommendations using tools and technologies""" + # Normalize domain for better matching + normalized_domain = domain.lower().strip() if domain else None + + # Get tools within budget + tools_query = """ + MATCH (tool:Tool)-[:BELONGS_TO_TIER]->(p:PriceTier) + WHERE tool.monthly_cost_usd <= $budget + RETURN tool.name as tool_name, + tool.category as category, + tool.monthly_cost_usd as monthly_cost, + tool.total_cost_of_ownership_score as tco_score, + tool.price_performance_ratio as price_performance, + p.tier_name as price_tier + ORDER BY tool.price_performance_ratio DESC, tool.monthly_cost_usd ASC + LIMIT 20 + """ + + tools = self.run_query(tools_query, {"budget": budget}) + + # Get technologies by category (without pricing constraints) + tech_categories = ["frontend", "backend", "database", "cloud", "testing", "mobile", "devops", "ai_ml"] + recommendations = [] + + # Create domain-specific recommendations + domain_specific_stacks = self._create_domain_specific_stacks(normalized_domain, budget) + if domain_specific_stacks: + recommendations.extend(domain_specific_stacks) + + for category in tech_categories: + tech_query = f""" + MATCH (t:Technology {{category: '{category}'}}) + RETURN t.name as name, + t.category as category, + t.maturity_score as maturity_score, + t.learning_curve as learning_curve, + t.performance_rating as performance_rating, + t.total_cost_of_ownership_score as tco_score, + t.price_performance_ratio as price_performance + ORDER BY t.total_cost_of_ownership_score DESC, t.maturity_score DESC + LIMIT 3 + """ + + technologies = self.run_query(tech_query) + + if technologies: + # Create a recommendation entry for this category + best_tech = technologies[0] + recommendation = { + "stack_name": f"Dynamic {category.title()} Stack - {best_tech['name']}", + "monthly_cost": 0.0, # Technologies don't have pricing + "setup_cost": 0.0, + "team_size_range": "2-5", + "development_time_months": 2, + "satisfaction_score": best_tech.get('tco_score') or 80, + "success_rate": best_tech.get('maturity_score') or 80, + "price_tier": "Custom", + "budget_efficiency": 100.0, + "frontend": best_tech['name'] if category == 'frontend' else 'Not specified', + "backend": best_tech['name'] if category == 'backend' else 'Not specified', + "database": best_tech['name'] if category == 'database' else 'Not specified', + "cloud": best_tech['name'] if category == 'cloud' else 'Not specified', + "testing": best_tech['name'] if category == 'testing' else 'Not specified', + "mobile": best_tech['name'] if category == 'mobile' else 'Not specified', + "devops": best_tech['name'] if category == 'devops' else 'Not specified', + "ai_ml": best_tech['name'] if category == 'ai_ml' else 'Not specified', + "recommendation_score": (best_tech.get('tco_score') or 80) + (best_tech.get('maturity_score') or 80) / 2 + } + recommendations.append(recommendation) + + # Add tool-based recommendations + if tools: + # Group tools by category and create recommendations + tool_categories = {} + for tool in tools: + category = tool['category'] + if category not in tool_categories: + tool_categories[category] = [] + tool_categories[category].append(tool) + + for category, category_tools in tool_categories.items(): + if category_tools: + best_tool = category_tools[0] + total_cost = sum(t['monthly_cost'] for t in category_tools[:3]) # Top 3 tools + + if total_cost <= budget: + recommendation = { + "stack_name": f"Tool-based {category.title()} Stack - {best_tool['tool_name']}", + "monthly_cost": total_cost, + "setup_cost": total_cost * 0.5, + "team_size_range": "1-3", + "development_time_months": 1, + "satisfaction_score": best_tool.get('tco_score') or 80, + "success_rate": best_tool.get('price_performance') or 80, + "price_tier": best_tool.get('price_tier', 'Custom'), + "budget_efficiency": 100.0 - ((total_cost / budget) * 20) if budget > 0 else 100.0, + "frontend": "Not specified", + "backend": "Not specified", + "database": "Not specified", + "cloud": "Not specified", + "testing": "Not specified", + "mobile": "Not specified", + "devops": "Not specified", + "ai_ml": "Not specified", + "recommendation_score": (best_tool.get('tco_score') or 80) + (best_tool.get('price_performance') or 80) / 2, + "tools": [t['tool_name'] for t in category_tools[:3]] + } + recommendations.append(recommendation) + + # Sort by recommendation score and return top 10 + recommendations.sort(key=lambda x: x['recommendation_score'], reverse=True) + return recommendations[:10] + + def _create_domain_specific_stacks(self, domain: Optional[str], budget: float): + """Create domain-specific technology stacks""" + if not domain: + return [] + + # Domain-specific technology mappings + domain_tech_mapping = { + 'healthcare': { + 'frontend': 'React', + 'backend': 'Django', + 'database': 'PostgreSQL', + 'cloud': 'AWS', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'TensorFlow' + }, + 'finance': { + 'frontend': 'Angular', + 'backend': 'Java Spring', + 'database': 'PostgreSQL', + 'cloud': 'AWS', + 'testing': 'JUnit', + 'mobile': 'Flutter', + 'devops': 'Kubernetes', + 'ai_ml': 'Scikit-learn' + }, + 'gaming': { + 'frontend': 'Unity', + 'backend': 'Node.js', + 'database': 'MongoDB', + 'cloud': 'AWS', + 'testing': 'Unity Test Framework', + 'mobile': 'Unity', + 'devops': 'Docker', + 'ai_ml': 'TensorFlow' + }, + 'education': { + 'frontend': 'React', + 'backend': 'Django', + 'database': 'PostgreSQL', + 'cloud': 'DigitalOcean', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'Scikit-learn' + }, + 'media': { + 'frontend': 'Next.js', + 'backend': 'Node.js', + 'database': 'MongoDB', + 'cloud': 'Vercel', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'Hugging Face' + }, + 'iot': { + 'frontend': 'React', + 'backend': 'Python', + 'database': 'InfluxDB', + 'cloud': 'AWS', + 'testing': 'Pytest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'TensorFlow' + }, + 'social': { + 'frontend': 'React', + 'backend': 'Node.js', + 'database': 'MongoDB', + 'cloud': 'AWS', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'Hugging Face' + }, + 'elearning': { + 'frontend': 'Vue.js', + 'backend': 'Django', + 'database': 'PostgreSQL', + 'cloud': 'DigitalOcean', + 'testing': 'Jest', + 'mobile': 'Flutter', + 'devops': 'Docker', + 'ai_ml': 'Scikit-learn' + }, + 'realestate': { + 'frontend': 'React', + 'backend': 'Node.js', + 'database': 'PostgreSQL', + 'cloud': 'AWS', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'Not specified' + }, + 'travel': { + 'frontend': 'React', + 'backend': 'Node.js', + 'database': 'MongoDB', + 'cloud': 'AWS', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'Not specified' + }, + 'manufacturing': { + 'frontend': 'Angular', + 'backend': 'Java Spring', + 'database': 'PostgreSQL', + 'cloud': 'AWS', + 'testing': 'JUnit', + 'mobile': 'Flutter', + 'devops': 'Kubernetes', + 'ai_ml': 'TensorFlow' + } + } + + # Get technology mapping for domain + tech_mapping = domain_tech_mapping.get(domain) + if not tech_mapping: + return [] + + # Create domain-specific stack + stack = { + "stack_name": f"Domain-Specific {domain.title()} Stack", + "monthly_cost": min(budget * 0.8, 100.0), # Use 80% of budget or max $100 + "setup_cost": min(budget * 0.4, 500.0), # Use 40% of budget or max $500 + "team_size_range": "3-6", + "development_time_months": 4, + "satisfaction_score": 85, + "success_rate": 88, + "price_tier": "Custom", + "recommended_domains": [domain.title()], + "description": f"Specialized technology stack optimized for {domain} applications", + "pros": [ + f"Optimized for {domain}", + "Domain-specific features", + "Proven technology choices", + "Good performance" + ], + "cons": [ + "Domain-specific complexity", + "Learning curve", + "Customization needs" + ], + "frontend": tech_mapping['frontend'], + "backend": tech_mapping['backend'], + "database": tech_mapping['database'], + "cloud": tech_mapping['cloud'], + "testing": tech_mapping['testing'], + "mobile": tech_mapping['mobile'], + "devops": tech_mapping['devops'], + "ai_ml": tech_mapping['ai_ml'], + "recommendation_score": 90.0 + } + + return [stack] + + def get_available_domains(self): + """Get all available domains from the database""" + query = """ + MATCH (d:Domain) + RETURN d.name as domain_name, + d.project_scale as project_scale, + d.team_experience_level as team_experience_level + ORDER BY d.name + """ + return self.run_query(query) + + def get_technologies_by_price_tier(self, tier_name: str): + """Get technologies for a specific price tier""" + query = """ + MATCH (t:Technology)-[:BELONGS_TO_TIER]->(p:PriceTier {tier_name: $tier_name}) + RETURN t.name as name, + t.category as category, + t.monthly_cost_usd as monthly_cost, + t.total_cost_of_ownership_score as tco_score, + t.price_performance_ratio as price_performance, + t.maturity_score as maturity_score, + t.learning_curve as learning_curve + ORDER BY t.total_cost_of_ownership_score DESC, t.monthly_cost_usd ASC + """ + return self.run_query(query, {"tier_name": tier_name}) + + def get_tools_by_price_tier(self, tier_name: str): + """Get tools for a specific price tier""" + query = """ + MATCH (tool:Tool)-[:BELONGS_TO_TIER]->(p:PriceTier {tier_name: $tier_name}) + RETURN tool.name as name, + tool.category as category, + tool.monthly_cost_usd as monthly_cost, + tool.total_cost_of_ownership_score as tco_score, + tool.price_performance_ratio as price_performance, + tool.popularity_score as popularity_score + ORDER BY tool.price_performance_ratio DESC, tool.monthly_cost_usd ASC + """ + return self.run_query(query, {"tier_name": tier_name}) + + def get_price_tier_analysis(self): + """Get analysis of all price tiers""" + query = """ + MATCH (p:PriceTier) + OPTIONAL MATCH (p)<-[:BELONGS_TO_TIER]-(t:Technology) + OPTIONAL MATCH (p)<-[:BELONGS_TO_TIER]-(tool:Tool) + OPTIONAL MATCH (p)<-[:BELONGS_TO_TIER]-(s:TechStack) + + RETURN p.tier_name as tier_name, + p.min_price_usd as min_price, + p.max_price_usd as max_price, + p.target_audience as target_audience, + p.typical_project_scale as project_scale, + count(DISTINCT t) as technology_count, + count(DISTINCT tool) as tool_count, + count(DISTINCT s) as stack_count, + avg(t.monthly_cost_usd) as avg_tech_cost, + avg(tool.monthly_cost_usd) as avg_tool_cost + ORDER BY p.min_price_usd + """ + return self.run_query(query) + + def get_optimal_combinations(self, budget: float, category: str): + """Get optimal technology combinations within budget for a category""" + query = """ + MATCH (t:Technology {category: $category})-[:BELONGS_TO_TIER]->(p:PriceTier) + WHERE t.monthly_cost_usd <= $budget + RETURN t.name as name, + t.monthly_cost_usd as monthly_cost, + t.total_cost_of_ownership_score as tco_score, + t.price_performance_ratio as price_performance, + p.tier_name as price_tier, + (t.total_cost_of_ownership_score * 0.6 + t.price_performance_ratio * 0.4) as combined_score + ORDER BY combined_score DESC, t.monthly_cost_usd ASC + LIMIT 10 + """ + return self.run_query(query, {"budget": budget, "category": category}) + + def get_compatibility_analysis(self, tech_name: str): + """Get compatibility analysis for a specific technology""" + query = """ + MATCH (t:Technology {name: $tech_name})-[r:COMPATIBLE_WITH]-(compatible:Technology) + RETURN compatible.name as compatible_tech, + compatible.category as category, + r.compatibility_score as score, + r.integration_effort as effort, + r.reason as reason + ORDER BY r.compatibility_score DESC + """ + return self.run_query(query, {"tech_name": tech_name}) + + def validate_data_integrity(self): + """Validate the integrity of migrated data""" + query = """ + MATCH (s:TechStack) + RETURN s.name as stack_name, + exists((s)-[:BELONGS_TO_TIER]->()) as has_price_tier, + exists((s)-[:USES_FRONTEND]->()) as has_frontend, + exists((s)-[:USES_BACKEND]->()) as has_backend, + exists((s)-[:USES_DATABASE]->()) as has_database, + exists((s)-[:USES_CLOUD]->()) as has_cloud, + s.monthly_cost as monthly_cost, + s.price_tier as price_tier + ORDER BY s.monthly_cost + """ + return self.run_query(query) + + def get_claude_ai_recommendations(self, budget: float, domain: Optional[str] = None, preferred_techs: Optional[List[str]] = None): + """Generate recommendations using Claude AI when no knowledge graph data is available""" + try: + client = anthropic.Anthropic(api_key=api_key) + + # Create a comprehensive prompt for Claude AI + prompt = f""" +You are a tech stack recommendation expert. Generate 5-10 technology stack recommendations based on the following requirements: + +**Requirements:** +- Budget: ${budget:,.2f} per month +- Domain: {domain or 'general'} +- Preferred Technologies: {', '.join(preferred_techs) if preferred_techs else 'None specified'} + +**Output Format:** +Return a JSON array with the following structure for each recommendation: +{{ + "stack_name": "Descriptive name for the tech stack", + "monthly_cost": number (monthly operational cost in USD), + "setup_cost": number (one-time setup cost in USD), + "team_size_range": "string (e.g., '1-2', '3-5', '6-10')", + "development_time_months": number (months to complete, 1-12), + "satisfaction_score": number (0-100, user satisfaction score), + "success_rate": number (0-100, project success rate), + "price_tier": "string (e.g., 'Micro Budget', 'Startup Budget', 'Enterprise')", + "budget_efficiency": number (0-100, how well it uses the budget), + "frontend": "string (specific frontend technology like 'React.js', 'Vue.js', 'Angular')", + "backend": "string (specific backend technology like 'Node.js', 'Django', 'Spring Boot')", + "database": "string (specific database like 'PostgreSQL', 'MongoDB', 'MySQL')", + "cloud": "string (specific cloud platform like 'AWS', 'DigitalOcean', 'Azure')", + "testing": "string (specific testing framework like 'Jest', 'pytest', 'Cypress')", + "mobile": "string (mobile technology like 'React Native', 'Flutter', 'Ionic' or 'None')", + "devops": "string (devops tool like 'Docker', 'GitHub Actions', 'Jenkins')", + "ai_ml": "string (AI/ML technology like 'TensorFlow', 'scikit-learn', 'PyTorch' or 'None')", + "recommendation_score": number (0-100, overall recommendation score), + "tools": ["array of specific tools and services"], + "description": "string (brief explanation of the recommendation)" +}} + +**Important Guidelines:** +1. Ensure all technology fields have specific, realistic technology names (not "Not specified") +2. Monthly costs should be realistic and within budget +3. Consider the domain requirements carefully +4. Include preferred technologies when possible +5. Provide diverse recommendations (different approaches, complexity levels) +6. Make sure all numeric values are realistic and consistent +7. Focus on practical, implementable solutions + +Generate recommendations that are: +- Cost-effective and within budget +- Appropriate for the domain +- Include modern, proven technologies +- Provide good value for money +- Are realistic to implement +""" + + response = client.messages.create( + model="claude-3-5-sonnet-20241022", + max_tokens=4000, + temperature=0.7, + messages=[{ + "role": "user", + "content": prompt + }] + ) + + # Parse Claude's response + content = response.content[0].text.strip() + + # Extract JSON from the response + import re + json_match = re.search(r'\[.*\]', content, re.DOTALL) + if json_match: + recommendations = json.loads(json_match.group()) + logger.info(f"✅ Generated {len(recommendations)} Claude AI recommendations") + return recommendations + else: + logger.warning("❌ Could not parse Claude AI response as JSON") + return [] + + except Exception as e: + logger.error(f"❌ Claude AI recommendation failed: {e}") + return [] + +# ================================================================================================ +# POSTGRESQL MIGRATION SERVICE (SAME AS BEFORE) +# ================================================================================================ + +class PostgreSQLMigrationService: + def __init__(self, + host="localhost", + port=5432, + user="pipeline_admin", + password="secure_pipeline_2024", + database="dev_pipeline"): + self.config = { + "host": host, + "port": port, + "user": user, + "password": password, + "database": database + } + self.connection = None + self.cursor = None + self.last_error: Optional[str] = None + + def is_open(self) -> bool: + try: + return ( + self.connection is not None and + getattr(self.connection, "closed", 1) == 0 and + self.cursor is not None and + not getattr(self.cursor, "closed", True) + ) + except Exception: + return False + + def connect(self): + try: + if self.is_open(): + self.last_error = None + return True + self.connection = psycopg2.connect(**self.config) + self.cursor = self.connection.cursor(cursor_factory=RealDictCursor) + logger.info("Connected to PostgreSQL successfully") + self.last_error = None + return True + except Exception as e: + logger.error(f"Error connecting to PostgreSQL: {e}") + self.last_error = str(e) + return False + + def close(self): + try: + if self.cursor and not getattr(self.cursor, "closed", True): + self.cursor.close() + finally: + self.cursor = None + try: + if self.connection and getattr(self.connection, "closed", 1) == 0: + self.connection.close() + finally: + self.connection = None + +# ================================================================================================ +# FASTAPI APPLICATION +# ================================================================================================ + +app = FastAPI( + title="Enhanced Tech Stack Selector - Migrated Version", + description="Tech stack selector using PostgreSQL data migrated to Neo4j with price-based relationships", + version="15.0.0" +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# ================================================================================================ +# CONFIGURATION +# ================================================================================================ + +logger.remove() +logger.add(sys.stdout, level="INFO", format="{time} | {level} | {message}") + +CLAUDE_API_KEY = "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" + +if not os.getenv("CLAUDE_API_KEY") and CLAUDE_API_KEY: + os.environ["CLAUDE_API_KEY"] = CLAUDE_API_KEY + +api_key = os.getenv("CLAUDE_API_KEY") or CLAUDE_API_KEY +logger.info(f"🔑 Claude API Key loaded: {api_key[:20]}..." if api_key else "❌ No Claude API Key found") + +# Initialize services +NEO4J_URI = os.getenv("NEO4J_URI", "bolt://localhost:7687") +NEO4J_USER = os.getenv("NEO4J_USER", "neo4j") +NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD", "password") + +neo4j_service = MigratedNeo4jService( + uri=NEO4J_URI, + user=NEO4J_USER, + password=NEO4J_PASSWORD +) + +postgres_migration_service = PostgreSQLMigrationService( + host=os.getenv("POSTGRES_HOST", "localhost"), + port=int(os.getenv("POSTGRES_PORT", "5432")), + user=os.getenv("POSTGRES_USER", "pipeline_admin"), + password=os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024"), + database=os.getenv("POSTGRES_DB", "dev_pipeline") +) + +# ================================================================================================ +# SHUTDOWN HANDLER +# ================================================================================================ + +@app.on_event("shutdown") +async def shutdown_event(): + neo4j_service.close() + postgres_migration_service.close() + +atexit.register(lambda: neo4j_service.close()) +atexit.register(lambda: postgres_migration_service.close()) + +# ================================================================================================ +# ENDPOINTS +# ================================================================================================ + +@app.get("/health") +async def health_check(): + return { + "status": "healthy", + "service": "enhanced-tech-stack-selector-migrated", + "version": "15.0.0", + "features": ["migrated_neo4j", "postgresql_source", "claude_ai", "price_based_relationships"] + } + +@app.get("/api/diagnostics") +async def diagnostics(): + diagnostics_result = { + "service": "enhanced-tech-stack-selector-migrated", + "version": "15.0.0", + "timestamp": datetime.utcnow().isoformat(), + "checks": {} + } + + # Check Neo4j + neo4j_check = {"status": "unknown"} + try: + with neo4j_service.driver.session() as session: + result = session.run("MATCH (n) RETURN count(n) AS count") + node_count = result.single().get("count", 0) + neo4j_check.update({ + "status": "ok", + "node_count": int(node_count) + }) + except Exception as e: + neo4j_check.update({ + "status": "error", + "error": str(e) + }) + diagnostics_result["checks"]["neo4j"] = neo4j_check + + # Check data integrity + try: + integrity = neo4j_service.validate_data_integrity() + neo4j_check["data_integrity"] = { + "total_stacks": len(integrity), + "complete_stacks": len([s for s in integrity if all([ + s["has_price_tier"], s["has_frontend"], s["has_backend"], + s["has_database"], s["has_cloud"] + ])]) + } + except Exception as e: + neo4j_check["data_integrity"] = {"error": str(e)} + + return diagnostics_result + +# ================================================================================================ +# RECOMMENDATION ENDPOINTS +# ================================================================================================ + +class RecommendBestRequest(BaseModel): + domain: Optional[str] = None + budget: Optional[float] = None + preferredTechnologies: Optional[List[str]] = None + +@app.post("/recommend/best") +async def recommend_best(req: RecommendBestRequest): + """Get recommendations using migrated data with price-based relationships""" + try: + if not req.budget or req.budget <= 0: + raise HTTPException(status_code=400, detail="Budget must be greater than 0") + + recommendations = neo4j_service.get_recommendations_by_budget( + budget=req.budget, + domain=req.domain, + preferred_techs=req.preferredTechnologies + ) + + return { + "success": True, + "recommendations": recommendations, + "count": len(recommendations), + "budget": req.budget, + "domain": req.domain, + "data_source": "migrated_postgresql" + } + except Exception as e: + logger.error(f"Error in recommendations: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/price-tiers") +async def get_price_tiers(): + """Get all price tiers with analysis""" + try: + analysis = neo4j_service.get_price_tier_analysis() + return { + "success": True, + "price_tiers": analysis, + "count": len(analysis) + } + except Exception as e: + logger.error(f"Error fetching price tiers: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/technologies/{tier_name}") +async def get_technologies_by_tier(tier_name: str): + """Get technologies for a specific price tier""" + try: + technologies = neo4j_service.get_technologies_by_price_tier(tier_name) + return { + "success": True, + "tier_name": tier_name, + "technologies": technologies, + "count": len(technologies) + } + except Exception as e: + logger.error(f"Error fetching technologies for tier {tier_name}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/tools/{tier_name}") +async def get_tools_by_tier(tier_name: str): + """Get tools for a specific price tier""" + try: + tools = neo4j_service.get_tools_by_price_tier(tier_name) + return { + "success": True, + "tier_name": tier_name, + "tools": tools, + "count": len(tools) + } + except Exception as e: + logger.error(f"Error fetching tools for tier {tier_name}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/combinations/optimal") +async def get_optimal_combinations(budget: float, category: str): + """Get optimal technology combinations within budget""" + try: + if budget <= 0: + raise HTTPException(status_code=400, detail="Budget must be greater than 0") + + combinations = neo4j_service.get_optimal_combinations(budget, category) + return { + "success": True, + "combinations": combinations, + "count": len(combinations), + "budget": budget, + "category": category + } + except Exception as e: + logger.error(f"Error finding optimal combinations: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/compatibility/{tech_name}") +async def get_compatibility_analysis(tech_name: str): + """Get compatibility analysis for a technology""" + try: + compatibility = neo4j_service.get_compatibility_analysis(tech_name) + return { + "success": True, + "tech_name": tech_name, + "compatible_technologies": compatibility, + "count": len(compatibility) + } + except Exception as e: + logger.error(f"Error fetching compatibility for {tech_name}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/validate/integrity") +async def validate_data_integrity(): + """Validate data integrity of migrated data""" + try: + integrity = neo4j_service.validate_data_integrity() + return { + "success": True, + "integrity_check": integrity, + "summary": { + "total_stacks": len(integrity), + "complete_stacks": len([s for s in integrity if all([ + s["has_price_tier"], s["has_frontend"], s["has_backend"], + s["has_database"], s["has_cloud"] + ])]), + "incomplete_stacks": len([s for s in integrity if not all([ + s["has_price_tier"], s["has_frontend"], s["has_backend"], + s["has_database"], s["has_cloud"] + ])]) + } + } + except Exception as e: + logger.error(f"Error validating data integrity: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/api/domains") +async def get_available_domains(): + """Get all available domains""" + try: + domains = neo4j_service.get_available_domains() + return { + "success": True, + "domains": domains, + "count": len(domains) + } + except Exception as e: + logger.error(f"Error fetching domains: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +# ================================================================================================ +# MAIN ENTRY POINT +# ================================================================================================ + +if __name__ == "__main__": + import uvicorn + + logger.info("="*60) + logger.info("🚀 ENHANCED TECH STACK SELECTOR v15.0 - MIGRATED VERSION") + logger.info("="*60) + logger.info("✅ Migrated PostgreSQL data to Neo4j") + logger.info("✅ Price-based relationships") + logger.info("✅ Real data from PostgreSQL") + logger.info("✅ Claude AI recommendations") + logger.info("✅ Comprehensive pricing analysis") + logger.info("="*60) + + uvicorn.run("main_migrated:app", host="0.0.0.0", port=8002, log_level="info") diff --git a/services/tech-stack-selector/src/postgres_to_neo4j_migration.py b/services/tech-stack-selector/src/postgres_to_neo4j_migration.py new file mode 100644 index 0000000..a1206c2 --- /dev/null +++ b/services/tech-stack-selector/src/postgres_to_neo4j_migration.py @@ -0,0 +1,722 @@ +# ================================================================================================ +# POSTGRESQL TO NEO4J MIGRATION SERVICE +# Migrates existing PostgreSQL data to Neo4j with price-based relationships +# ================================================================================================ + +import os +import sys +from datetime import datetime +from typing import Dict, Any, Optional, List, Tuple +from neo4j import GraphDatabase +import psycopg2 +from psycopg2.extras import RealDictCursor +from loguru import logger + +class PostgresToNeo4jMigration: + def __init__(self, + postgres_config: Dict[str, Any], + neo4j_config: Dict[str, Any]): + """ + Initialize migration service with PostgreSQL and Neo4j configurations + """ + self.postgres_config = postgres_config + self.neo4j_config = neo4j_config + self.postgres_conn = None + self.neo4j_driver = None + + def connect_postgres(self): + """Connect to PostgreSQL database""" + try: + self.postgres_conn = psycopg2.connect(**self.postgres_config) + logger.info("✅ Connected to PostgreSQL successfully") + return True + except Exception as e: + logger.error(f"❌ PostgreSQL connection failed: {e}") + return False + + def connect_neo4j(self): + """Connect to Neo4j database""" + try: + self.neo4j_driver = GraphDatabase.driver( + self.neo4j_config["uri"], + auth=(self.neo4j_config["user"], self.neo4j_config["password"]) + ) + self.neo4j_driver.verify_connectivity() + logger.info("✅ Connected to Neo4j successfully") + return True + except Exception as e: + logger.error(f"❌ Neo4j connection failed: {e}") + return False + + def close_connections(self): + """Close all database connections""" + if self.postgres_conn: + self.postgres_conn.close() + if self.neo4j_driver: + self.neo4j_driver.close() + + def run_postgres_query(self, query: str, params: Optional[Dict] = None): + """Execute PostgreSQL query and return results""" + with self.postgres_conn.cursor(cursor_factory=RealDictCursor) as cursor: + cursor.execute(query, params or {}) + return cursor.fetchall() + + def run_neo4j_query(self, query: str, params: Optional[Dict] = None): + """Execute Neo4j query""" + with self.neo4j_driver.session() as session: + result = session.run(query, params or {}) + return [record.data() for record in result] + + def migrate_price_tiers(self): + """Migrate price tiers from PostgreSQL to Neo4j""" + logger.info("🔄 Migrating price tiers...") + + # Get price tiers from PostgreSQL + price_tiers = self.run_postgres_query(""" + SELECT id, tier_name, min_price_usd, max_price_usd, + target_audience, typical_project_scale, description + FROM price_tiers + ORDER BY min_price_usd + """) + + # Create price tier nodes in Neo4j + for tier in price_tiers: + # Convert decimal values to float + tier_data = dict(tier) + tier_data['min_price_usd'] = float(tier_data['min_price_usd']) + tier_data['max_price_usd'] = float(tier_data['max_price_usd']) + + query = """ + CREATE (p:PriceTier { + id: $id, + tier_name: $tier_name, + min_price_usd: $min_price_usd, + max_price_usd: $max_price_usd, + target_audience: $target_audience, + typical_project_scale: $typical_project_scale, + description: $description, + migrated_at: datetime() + }) + """ + self.run_neo4j_query(query, tier_data) + + logger.info(f"✅ Migrated {len(price_tiers)} price tiers") + return len(price_tiers) + + def migrate_technologies(self): + """Migrate all technology categories from PostgreSQL to Neo4j""" + logger.info("🔄 Migrating technologies...") + + technology_tables = [ + ("frontend_technologies", "frontend"), + ("backend_technologies", "backend"), + ("database_technologies", "database"), + ("cloud_technologies", "cloud"), + ("testing_technologies", "testing"), + ("mobile_technologies", "mobile"), + ("devops_technologies", "devops"), + ("ai_ml_technologies", "ai_ml") + ] + + total_technologies = 0 + + for table_name, category in technology_tables: + logger.info(f" 📊 Migrating {category} technologies...") + + # Get technologies from PostgreSQL + technologies = self.run_postgres_query(f""" + SELECT * FROM {table_name} + ORDER BY name + """) + + # Create technology nodes in Neo4j + for tech in technologies: + # Convert PostgreSQL row to Neo4j properties + properties = dict(tech) + properties['category'] = category + properties['migrated_at'] = datetime.now().isoformat() + + # Convert decimal values to float + for key, value in properties.items(): + if hasattr(value, '__class__') and 'Decimal' in str(value.__class__): + properties[key] = float(value) + + # Create the node (use MERGE to handle duplicates) + query = f""" + MERGE (t:Technology {{name: $name}}) + SET t += {{ + {', '.join([f'{k}: ${k}' for k in properties.keys() if k != 'name'])} + }} + SET t:{category.title()} + """ + self.run_neo4j_query(query, properties) + + logger.info(f" ✅ Migrated {len(technologies)} {category} technologies") + total_technologies += len(technologies) + + logger.info(f"✅ Total technologies migrated: {total_technologies}") + return total_technologies + + def migrate_tech_pricing(self): + """Migrate technology pricing data""" + logger.info("🔄 Migrating technology pricing...") + + # Get tech pricing from PostgreSQL + pricing_data = self.run_postgres_query(""" + SELECT tp.*, pt.tier_name as price_tier_name + FROM tech_pricing tp + JOIN price_tiers pt ON tp.price_tier_id = pt.id + ORDER BY tp.tech_name + """) + + # Update technologies with pricing data + for pricing in pricing_data: + # Convert decimal values to float + pricing_dict = dict(pricing) + for key, value in pricing_dict.items(): + if hasattr(value, '__class__') and 'Decimal' in str(value.__class__): + pricing_dict[key] = float(value) + + # Update technology with pricing + query = """ + MATCH (t:Technology {name: $tech_name}) + SET t.monthly_cost_usd = $monthly_operational_cost_usd, + t.setup_cost_usd = $development_cost_usd, + t.license_cost_usd = $license_cost_usd, + t.training_cost_usd = $training_cost_usd, + t.total_cost_of_ownership_score = $total_cost_of_ownership_score, + t.price_performance_ratio = $price_performance_ratio, + t.price_tier_name = $price_tier_name, + t.min_cpu_cores = $min_cpu_cores, + t.min_ram_gb = $min_ram_gb, + t.min_storage_gb = $min_storage_gb + """ + self.run_neo4j_query(query, pricing_dict) + + logger.info(f"✅ Updated {len(pricing_data)} technologies with pricing data") + return len(pricing_data) + + def migrate_price_based_stacks(self): + """Migrate complete tech stacks from price_based_stacks table""" + logger.info("🔄 Migrating price-based tech stacks...") + + # Get price-based stacks from PostgreSQL + stacks = self.run_postgres_query(""" + SELECT pbs.*, pt.tier_name as price_tier_name + FROM price_based_stacks pbs + JOIN price_tiers pt ON pbs.price_tier_id = pt.id + ORDER BY pbs.total_monthly_cost_usd + """) + + # Create tech stack nodes in Neo4j + for stack in stacks: + # Convert decimal values to float + stack_dict = dict(stack) + for key, value in stack_dict.items(): + if hasattr(value, '__class__') and 'Decimal' in str(value.__class__): + stack_dict[key] = float(value) + + # Create the tech stack node + query = """ + CREATE (s:TechStack { + name: $stack_name, + monthly_cost: $total_monthly_cost_usd, + setup_cost: $total_setup_cost_usd, + team_size_range: $team_size_range, + development_time_months: $development_time_months, + satisfaction_score: $user_satisfaction_score, + success_rate: $success_rate_percentage, + price_tier: $price_tier_name, + maintenance_complexity: $maintenance_complexity, + scalability_ceiling: $scalability_ceiling, + recommended_domains: $recommended_domains, + description: $description, + pros: $pros, + cons: $cons, + frontend_tech: $frontend_tech, + backend_tech: $backend_tech, + database_tech: $database_tech, + cloud_tech: $cloud_tech, + testing_tech: $testing_tech, + mobile_tech: $mobile_tech, + devops_tech: $devops_tech, + ai_ml_tech: $ai_ml_tech, + migrated_at: datetime() + }) + """ + self.run_neo4j_query(query, stack_dict) + + logger.info(f"✅ Migrated {len(stacks)} price-based tech stacks") + return len(stacks) + + def migrate_stack_recommendations(self): + """Migrate domain-specific stack recommendations""" + logger.info("🔄 Migrating stack recommendations...") + + # Get stack recommendations from PostgreSQL + # Handle case where price_tier_id might be NULL + recommendations = self.run_postgres_query(""" + SELECT sr.*, + COALESCE(pt.tier_name, 'Not Specified') as price_tier_name, + pbs.stack_name, + pbs.price_tier_id as stack_price_tier_id + FROM stack_recommendations sr + LEFT JOIN price_tiers pt ON sr.price_tier_id = pt.id + JOIN price_based_stacks pbs ON sr.recommended_stack_id = pbs.id + ORDER BY sr.business_domain, sr.confidence_score DESC + """) + + # Create domain nodes and recommendations + for rec in recommendations: + # Convert arrays to lists + rec_dict = dict(rec) + for key, value in rec_dict.items(): + if hasattr(value, '__class__') and 'list' in str(value.__class__): + rec_dict[key] = list(value) + + # Create domain node + domain_query = """ + MERGE (d:Domain {name: $business_domain}) + SET d.project_scale = $project_scale, + d.team_experience_level = $team_experience_level + """ + self.run_neo4j_query(domain_query, rec_dict) + + # Get the actual price tier for the stack + stack_tier_query = """ + MATCH (s:TechStack {name: $stack_name})-[:BELONGS_TO_TIER]->(pt:PriceTier) + RETURN pt.tier_name as actual_tier_name + """ + tier_result = self.run_neo4j_query(stack_tier_query, {"stack_name": rec_dict["stack_name"]}) + actual_tier = tier_result[0]["actual_tier_name"] if tier_result else rec_dict["price_tier_name"] + + # Create recommendation relationship + rec_query = """ + MATCH (d:Domain {name: $business_domain}) + MATCH (s:TechStack {name: $stack_name}) + CREATE (d)-[:RECOMMENDS { + confidence_score: $confidence_score, + recommendation_reasons: $recommendation_reasons, + potential_risks: $potential_risks, + alternative_stacks: $alternative_stacks, + price_tier: $actual_tier + }]->(s) + """ + rec_dict["actual_tier"] = actual_tier + self.run_neo4j_query(rec_query, rec_dict) + + logger.info(f"✅ Migrated {len(recommendations)} stack recommendations") + return len(recommendations) + + def migrate_tools(self): + """Migrate tools with pricing from PostgreSQL to Neo4j""" + logger.info("🔄 Migrating tools with pricing...") + + # Get tools with pricing from PostgreSQL + tools = self.run_postgres_query(""" + SELECT t.*, pt.tier_name as price_tier_name + FROM tools t + LEFT JOIN price_tiers pt ON t.price_tier_id = pt.id + ORDER BY t.name + """) + + # Create tool nodes in Neo4j + for tool in tools: + properties = dict(tool) + properties['migrated_at'] = datetime.now().isoformat() + + # Convert decimal values to float + for key, value in properties.items(): + if hasattr(value, '__class__') and 'Decimal' in str(value.__class__): + properties[key] = float(value) + + # Create the tool node (use MERGE to handle duplicates) + query = f""" + MERGE (tool:Tool {{name: $name}}) + SET tool += {{ + {', '.join([f'{k}: ${k}' for k in properties.keys() if k != 'name'])} + }} + """ + self.run_neo4j_query(query, properties) + + logger.info(f"✅ Migrated {len(tools)} tools") + return len(tools) + + def create_price_relationships(self): + """Create price-based relationships between technologies/tools and price tiers""" + logger.info("🔗 Creating price-based relationships...") + + # Create relationships for technologies + technology_categories = ["frontend", "backend", "database", "cloud", "testing", "mobile", "devops", "ai_ml"] + + for category in technology_categories: + logger.info(f" 📊 Creating price relationships for {category} technologies...") + + # Get technologies and their price tiers + query = f""" + MATCH (t:Technology {{category: '{category}'}}) + MATCH (p:PriceTier) + WHERE t.monthly_cost_usd >= p.min_price_usd + AND t.monthly_cost_usd <= p.max_price_usd + CREATE (t)-[:BELONGS_TO_TIER {{ + fit_score: CASE + WHEN t.monthly_cost_usd = 0.0 THEN 100.0 + ELSE 100.0 - ((t.monthly_cost_usd - p.min_price_usd) / (p.max_price_usd - p.min_price_usd) * 20.0) + END, + cost_efficiency: t.total_cost_of_ownership_score, + price_performance: t.price_performance_ratio + }}]->(p) + RETURN count(*) as relationships_created + """ + + result = self.run_neo4j_query(query) + if result: + logger.info(f" ✅ Created {result[0]['relationships_created']} price relationships for {category}") + + # Create relationships for tools + logger.info(" 📊 Creating price relationships for tools...") + query = """ + MATCH (tool:Tool) + MATCH (p:PriceTier) + WHERE tool.monthly_cost_usd >= p.min_price_usd + AND tool.monthly_cost_usd <= p.max_price_usd + CREATE (tool)-[:BELONGS_TO_TIER { + fit_score: CASE + WHEN tool.monthly_cost_usd = 0.0 THEN 100.0 + ELSE 100.0 - ((tool.monthly_cost_usd - p.min_price_usd) / (p.max_price_usd - p.min_price_usd) * 20.0) + END, + cost_efficiency: tool.total_cost_of_ownership_score, + price_performance: tool.price_performance_ratio + }]->(p) + RETURN count(*) as relationships_created + """ + + result = self.run_neo4j_query(query) + if result: + logger.info(f" ✅ Created {result[0]['relationships_created']} price relationships for tools") + + def create_technology_compatibility_relationships(self): + """Create compatibility relationships between technologies""" + logger.info("🔗 Creating technology compatibility relationships...") + + query = """ + MATCH (t1:Technology), (t2:Technology) + WHERE t1.name <> t2.name + AND ( + // Same category, different technologies + (t1.category = t2.category AND t1.name <> t2.name) OR + // Frontend-Backend compatibility + (t1.category = "frontend" AND t2.category = "backend") OR + (t1.category = "backend" AND t2.category = "frontend") OR + // Backend-Database compatibility + (t1.category = "backend" AND t2.category = "database") OR + (t1.category = "database" AND t2.category = "backend") OR + // Cloud compatibility with all + (t1.category = "cloud" AND t2.category IN ["frontend", "backend", "database"]) OR + (t2.category = "cloud" AND t1.category IN ["frontend", "backend", "database"]) + ) + MERGE (t1)-[r:COMPATIBLE_WITH { + compatibility_score: CASE + WHEN t1.category = t2.category THEN 0.8 + WHEN (t1.category = "frontend" AND t2.category = "backend") THEN 0.9 + WHEN (t1.category = "backend" AND t2.category = "database") THEN 0.9 + WHEN (t1.category = "cloud" AND t2.category IN ["frontend", "backend", "database"]) THEN 0.85 + ELSE 0.7 + END, + integration_effort: CASE + WHEN t1.category = t2.category THEN "Low" + WHEN (t1.category = "frontend" AND t2.category = "backend") THEN "Medium" + WHEN (t1.category = "backend" AND t2.category = "database") THEN "Low" + WHEN (t1.category = "cloud" AND t2.category IN ["frontend", "backend", "database"]) THEN "Low" + ELSE "High" + END, + reason: "Auto-generated compatibility relationship", + created_at: datetime() + }]->(t2) + RETURN count(r) as relationships_created + """ + + result = self.run_neo4j_query(query) + if result: + logger.info(f"✅ Created {result[0]['relationships_created']} compatibility relationships") + + def create_tech_stack_relationships(self): + """Create relationships between tech stacks and their technologies""" + logger.info("🔗 Creating tech stack relationships...") + + # Create relationships for each technology type separately + tech_relationships = [ + ("frontend_tech", "USES_FRONTEND", "frontend"), + ("backend_tech", "USES_BACKEND", "backend"), + ("database_tech", "USES_DATABASE", "database"), + ("cloud_tech", "USES_CLOUD", "cloud"), + ("testing_tech", "USES_TESTING", "testing"), + ("mobile_tech", "USES_MOBILE", "mobile"), + ("devops_tech", "USES_DEVOPS", "devops"), + ("ai_ml_tech", "USES_AI_ML", "ai_ml") + ] + + total_relationships = 0 + + for tech_field, relationship_type, category in tech_relationships: + # For testing technologies, also check frontend category since some testing tools are categorized as frontend + if category == "testing": + query = f""" + MATCH (s:TechStack) + WHERE s.{tech_field} IS NOT NULL + MATCH (t:Technology {{name: s.{tech_field}}}) + WHERE t.category = '{category}' OR (t.category = 'frontend' AND s.{tech_field} IN ['Jest', 'Cypress', 'Playwright', 'Selenium', 'Vitest', 'Testing Library']) + MERGE (s)-[:{relationship_type} {{role: '{category}', importance: 'critical'}}]->(t) + RETURN count(s) as relationships_created + """ + else: + query = f""" + MATCH (s:TechStack) + WHERE s.{tech_field} IS NOT NULL + MATCH (t:Technology {{name: s.{tech_field}, category: '{category}'}}) + MERGE (s)-[:{relationship_type} {{role: '{category}', importance: 'critical'}}]->(t) + RETURN count(s) as relationships_created + """ + + result = self.run_neo4j_query(query) + if result: + count = result[0]['relationships_created'] + total_relationships += count + logger.info(f" ✅ Created {count} {relationship_type} relationships") + + logger.info(f"✅ Created {total_relationships} total tech stack relationships") + + # Create price tier relationships for tech stacks + price_tier_query = """ + MATCH (s:TechStack) + MATCH (p:PriceTier {tier_name: s.price_tier}) + MERGE (s)-[:BELONGS_TO_TIER {fit_score: 100.0}]->(p) + RETURN count(s) as relationships_created + """ + + result = self.run_neo4j_query(price_tier_query) + if result: + logger.info(f"✅ Created price tier relationships for {result[0]['relationships_created']} tech stacks") + + def create_optimal_tech_stacks(self, max_stacks_per_tier: int = 5): + """Create optimal tech stacks based on price tiers and compatibility""" + logger.info("🏗️ Creating optimal tech stacks...") + + # Get price tiers + price_tiers = self.run_neo4j_query("MATCH (p:PriceTier) RETURN p ORDER BY p.min_price_usd") + + total_stacks = 0 + + for tier in price_tiers: + tier_name = tier['p']['tier_name'] + min_price = tier['p']['min_price_usd'] + max_price = tier['p']['max_price_usd'] + + logger.info(f" 📊 Creating stacks for {tier_name} (${min_price}-${max_price})...") + + # Find optimal combinations within this price tier + query = """ + MATCH (frontend:Technology {category: "frontend"})-[:BELONGS_TO_TIER]->(p:PriceTier {tier_name: $tier_name}) + MATCH (backend:Technology {category: "backend"})-[:BELONGS_TO_TIER]->(p) + MATCH (database:Technology {category: "database"})-[:BELONGS_TO_TIER]->(p) + MATCH (cloud:Technology {category: "cloud"})-[:BELONGS_TO_TIER]->(p) + + WITH frontend, backend, database, cloud, p, + (frontend.monthly_cost_usd + backend.monthly_cost_usd + + database.monthly_cost_usd + cloud.monthly_cost_usd) as total_cost, + (frontend.total_cost_of_ownership_score + backend.total_cost_of_ownership_score + + database.total_cost_of_ownership_score + cloud.total_cost_of_ownership_score) as total_score + + WHERE total_cost >= p.min_price_usd AND total_cost <= p.max_price_usd + + WITH frontend, backend, database, cloud, total_cost, total_score, + (total_score / 4.0) as avg_score, + (100.0 - ((total_cost - p.min_price_usd) / (p.max_price_usd - p.min_price_usd) * 20.0)) as budget_efficiency + + ORDER BY avg_score DESC, budget_efficiency DESC, total_cost ASC + LIMIT $max_stacks + + CREATE (s:TechStack { + name: "Optimal " + $tier_name + " Stack - $" + toString(round(total_cost)) + "/month", + monthly_cost: total_cost, + setup_cost: total_cost * 0.5, + team_size_range: CASE + WHEN $tier_name = "Micro Budget" THEN "1-2" + WHEN $tier_name = "Startup Budget" THEN "2-4" + WHEN $tier_name = "Small Business" THEN "3-6" + WHEN $tier_name = "Growth Stage" THEN "5-10" + ELSE "8-15" + END, + development_time_months: CASE + WHEN $tier_name = "Micro Budget" THEN 1 + WHEN $tier_name = "Startup Budget" THEN 2 + WHEN $tier_name = "Small Business" THEN 3 + WHEN $tier_name = "Growth Stage" THEN 4 + ELSE 6 + END, + satisfaction_score: toInteger(avg_score), + success_rate: toInteger(avg_score * 0.9), + price_tier: $tier_name, + budget_efficiency: budget_efficiency, + created_at: datetime() + }) + + CREATE (s)-[:BELONGS_TO_TIER {fit_score: budget_efficiency}]->(p) + CREATE (s)-[:USES_FRONTEND {role: "frontend", importance: "critical"}]->(frontend) + CREATE (s)-[:USES_BACKEND {role: "backend", importance: "critical"}]->(backend) + CREATE (s)-[:USES_DATABASE {role: "database", importance: "critical"}]->(database) + CREATE (s)-[:USES_CLOUD {role: "cloud", importance: "critical"}]->(cloud) + + RETURN count(s) as stacks_created + """ + + result = self.run_neo4j_query(query, { + "tier_name": tier_name, + "max_stacks": max_stacks_per_tier + }) + + if result and result[0]['stacks_created'] > 0: + stacks_created = result[0]['stacks_created'] + logger.info(f" ✅ Created {stacks_created} optimal stacks for {tier_name}") + total_stacks += stacks_created + + logger.info(f"✅ Total tech stacks created: {total_stacks}") + return total_stacks + + def validate_migration(self): + """Validate the migration results""" + logger.info("🔍 Validating migration...") + + # Count nodes + node_counts = self.run_neo4j_query(""" + MATCH (n) + RETURN labels(n)[0] as label, count(n) as count + ORDER BY count DESC + """) + + logger.info("📊 Node counts:") + for item in node_counts: + logger.info(f" {item['label']}: {item['count']}") + + # Count relationships + rel_counts = self.run_neo4j_query(""" + MATCH ()-[r]->() + RETURN type(r) as type, count(r) as count + ORDER BY count DESC + """) + + logger.info("🔗 Relationship counts:") + for item in rel_counts: + logger.info(f" {item['type']}: {item['count']}") + + # Validate tech stacks + stack_validation = self.run_neo4j_query(""" + MATCH (s:TechStack) + RETURN s.name, + exists((s)-[:BELONGS_TO_TIER]->()) as has_price_tier, + exists((s)-[:USES_FRONTEND]->()) as has_frontend, + exists((s)-[:USES_BACKEND]->()) as has_backend, + exists((s)-[:USES_DATABASE]->()) as has_database, + exists((s)-[:USES_CLOUD]->()) as has_cloud + """) + + complete_stacks = [s for s in stack_validation if all([ + s['has_price_tier'], s['has_frontend'], s['has_backend'], + s['has_database'], s['has_cloud'] + ])] + + logger.info(f"✅ Complete tech stacks: {len(complete_stacks)}/{len(stack_validation)}") + + return { + "node_counts": node_counts, + "relationship_counts": rel_counts, + "complete_stacks": len(complete_stacks), + "total_stacks": len(stack_validation) + } + + def run_full_migration(self): + """Run the complete migration process""" + logger.info("🚀 Starting PostgreSQL to Neo4j migration...") + + try: + # Connect to databases + if not self.connect_postgres(): + return False + if not self.connect_neo4j(): + return False + + # Clear Neo4j + logger.info("🧹 Clearing Neo4j database...") + self.run_neo4j_query("MATCH (n) DETACH DELETE n") + + # Run migrations + price_tiers_count = self.migrate_price_tiers() + technologies_count = self.migrate_technologies() + tech_pricing_count = self.migrate_tech_pricing() + price_based_stacks_count = self.migrate_price_based_stacks() + stack_recommendations_count = self.migrate_stack_recommendations() + tools_count = self.migrate_tools() + + # Create relationships + self.create_price_relationships() + self.create_technology_compatibility_relationships() + self.create_tech_stack_relationships() + + # Create optimal tech stacks (only if no existing stacks) + if price_based_stacks_count == 0: + stacks_count = self.create_optimal_tech_stacks() + else: + stacks_count = price_based_stacks_count + + # Validate migration + validation = self.validate_migration() + + logger.info("🎉 Migration completed successfully!") + logger.info(f"📊 Summary:") + logger.info(f" Price tiers: {price_tiers_count}") + logger.info(f" Technologies: {technologies_count}") + logger.info(f" Tech pricing: {tech_pricing_count}") + logger.info(f" Price-based stacks: {price_based_stacks_count}") + logger.info(f" Stack recommendations: {stack_recommendations_count}") + logger.info(f" Tools: {tools_count}") + logger.info(f" Total tech stacks: {stacks_count}") + logger.info(f" Complete stacks: {validation['complete_stacks']}/{validation['total_stacks']}") + + return True + + except Exception as e: + logger.error(f"❌ Migration failed: {e}") + return False + finally: + self.close_connections() + +# ================================================================================================ +# MAIN EXECUTION +# ================================================================================================ + +if __name__ == "__main__": + # Configuration + postgres_config = { + "host": os.getenv("POSTGRES_HOST", "localhost"), + "port": int(os.getenv("POSTGRES_PORT", "5432")), + "user": os.getenv("POSTGRES_USER", "pipeline_admin"), + "password": os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024"), + "database": os.getenv("POSTGRES_DB", "dev_pipeline") + } + + neo4j_config = { + "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"), + "user": os.getenv("NEO4J_USER", "neo4j"), + "password": os.getenv("NEO4J_PASSWORD", "password") + } + + # Run migration + migration = PostgresToNeo4jMigration(postgres_config, neo4j_config) + success = migration.run_full_migration() + + if success: + logger.info("✅ Migration completed successfully!") + sys.exit(0) + else: + logger.error("❌ Migration failed!") + sys.exit(1) diff --git a/services/tech-stack-selector/start.sh b/services/tech-stack-selector/start.sh new file mode 100644 index 0000000..28b9e03 --- /dev/null +++ b/services/tech-stack-selector/start.sh @@ -0,0 +1,431 @@ +#!/bin/bash + +# ================================================================================================ +# ENHANCED TECH STACK SELECTOR - MIGRATED VERSION STARTUP SCRIPT +# Uses PostgreSQL data migrated to Neo4j with proper price-based relationships +# ================================================================================================ + +set -e + +# Parse command line arguments +FORCE_MIGRATION=false +if [ "$1" = "--force-migration" ] || [ "$1" = "-f" ]; then + FORCE_MIGRATION=true + echo "🔄 Force migration mode enabled" +elif [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --force-migration, -f Force re-run all migrations" + echo " --help, -h Show this help message" + echo "" + echo "Examples:" + echo " $0 # Normal startup with auto-migration detection" + echo " $0 --force-migration # Force re-run all migrations" + exit 0 +fi + +echo "="*60 +echo "🚀 ENHANCED TECH STACK SELECTOR v15.0 - MIGRATED VERSION" +echo "="*60 +echo "✅ PostgreSQL data migrated to Neo4j" +echo "✅ Price-based relationships" +echo "✅ Real data from PostgreSQL" +echo "✅ Comprehensive pricing analysis" +echo "="*60 + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Function to print colored output +print_status() { + echo -e "${GREEN}✅ $1${NC}" +} + +print_warning() { + echo -e "${YELLOW}⚠️ $1${NC}" +} + +print_error() { + echo -e "${RED}❌ $1${NC}" +} + +print_info() { + echo -e "${BLUE}ℹ️ $1${NC}" +} + +# Check if Python is available +if ! command -v python3 &> /dev/null; then + print_error "Python3 is not installed or not in PATH" + exit 1 +fi + +print_status "Python3 found: $(python3 --version)" + +# Check if pip is available +if ! command -v pip3 &> /dev/null; then + print_error "pip3 is not installed or not in PATH" + exit 1 +fi + +print_status "pip3 found: $(pip3 --version)" + +# Check if psql is available +if ! command -v psql &> /dev/null; then + print_error "psql is not installed or not in PATH" + print_info "Please install PostgreSQL client tools:" + print_info " Ubuntu/Debian: sudo apt-get install postgresql-client" + print_info " CentOS/RHEL: sudo yum install postgresql" + print_info " macOS: brew install postgresql" + exit 1 +fi + +print_status "psql found: $(psql --version)" + +# Check if createdb is available +if ! command -v createdb &> /dev/null; then + print_error "createdb is not installed or not in PATH" + print_info "Please install PostgreSQL client tools:" + print_info " Ubuntu/Debian: sudo apt-get install postgresql-client" + print_info " CentOS/RHEL: sudo yum install postgresql" + print_info " macOS: brew install postgresql" + exit 1 +fi + +print_status "createdb found: $(createdb --version)" + +# Install/upgrade required packages +print_info "Installing/upgrading required packages..." +pip3 install --upgrade fastapi uvicorn neo4j psycopg2-binary anthropic loguru pydantic + +# Function to create database if it doesn't exist +create_database_if_not_exists() { + print_info "Checking if database 'dev_pipeline' exists..." + + # Try to connect to the specific database + if python3 -c " +import psycopg2 +try: + conn = psycopg2.connect( + host='localhost', + port=5432, + user='pipeline_admin', + password='secure_pipeline_2024', + database='dev_pipeline' + ) + conn.close() + print('Database dev_pipeline exists') +except Exception as e: + print(f'Database dev_pipeline does not exist: {e}') + exit(1) +" 2>/dev/null; then + print_status "Database 'dev_pipeline' exists" + return 0 + else + print_warning "Database 'dev_pipeline' does not exist - creating it..." + + # Try to create the database + if createdb -h localhost -p 5432 -U pipeline_admin dev_pipeline 2>/dev/null; then + print_status "Database 'dev_pipeline' created successfully" + return 0 + else + print_error "Failed to create database 'dev_pipeline'" + print_info "Please create the database manually:" + print_info " createdb -h localhost -p 5432 -U pipeline_admin dev_pipeline" + return 1 + fi + fi +} + +# Check if PostgreSQL is running +print_info "Checking PostgreSQL connection..." +if ! python3 -c " +import psycopg2 +try: + conn = psycopg2.connect( + host='localhost', + port=5432, + user='pipeline_admin', + password='secure_pipeline_2024', + database='postgres' + ) + conn.close() + print('PostgreSQL connection successful') +except Exception as e: + print(f'PostgreSQL connection failed: {e}') + exit(1) +" 2>/dev/null; then + print_error "PostgreSQL is not running or not accessible" + print_info "Please ensure PostgreSQL is running and accessible" + exit 1 +fi + +print_status "PostgreSQL is running and accessible" + +# Create database if it doesn't exist +if ! create_database_if_not_exists; then + exit 1 +fi + +# Function to check if database needs migration +check_database_migration() { + print_info "Checking if database needs migration..." + + # Check if price_tiers table exists and has data + if ! python3 -c " +import psycopg2 +try: + conn = psycopg2.connect( + host='localhost', + port=5432, + user='pipeline_admin', + password='secure_pipeline_2024', + database='dev_pipeline' + ) + cursor = conn.cursor() + + # Check if price_tiers table exists + cursor.execute(\"\"\" + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'price_tiers' + ); + \"\"\") + table_exists = cursor.fetchone()[0] + + if not table_exists: + print('price_tiers table does not exist - migration needed') + exit(1) + + # Check if price_tiers has data + cursor.execute('SELECT COUNT(*) FROM price_tiers;') + count = cursor.fetchone()[0] + + if count == 0: + print('price_tiers table is empty - migration needed') + exit(1) + + # Check if stack_recommendations has sufficient data (should have more than 8 records) + cursor.execute('SELECT COUNT(*) FROM stack_recommendations;') + rec_count = cursor.fetchone()[0] + + if rec_count < 50: # Expect at least 50 domain recommendations + print(f'stack_recommendations has only {rec_count} records - migration needed for additional domains') + exit(1) + + # Check for specific new domains + cursor.execute(\"\"\" + SELECT COUNT(DISTINCT business_domain) FROM stack_recommendations + WHERE business_domain IN ('healthcare', 'finance', 'gaming', 'education', 'media', 'iot', 'social', 'elearning', 'realestate', 'travel', 'manufacturing', 'ecommerce', 'saas') + \"\"\") + new_domains_count = cursor.fetchone()[0] + + if new_domains_count < 12: # Expect at least 12 domains + print(f'Only {new_domains_count} domains found - migration needed for additional domains') + exit(1) + + print('Database appears to be fully migrated with all domains') + cursor.close() + conn.close() + +except Exception as e: + print(f'Error checking database: {e}') + exit(1) +" 2>/dev/null; then + return 1 # Migration needed + else + return 0 # Migration not needed + fi +} + +# Function to run PostgreSQL migrations +run_postgres_migrations() { + print_info "Running PostgreSQL migrations..." + + # Migration files in order + migration_files=( + "db/001_schema.sql" + "db/002_tools_migration.sql" + "db/003_tools_pricing_migration.sql" + ) + + # Set PGPASSWORD to avoid password prompts + export PGPASSWORD="secure_pipeline_2024" + + for migration_file in "${migration_files[@]}"; do + if [ ! -f "$migration_file" ]; then + print_error "Migration file not found: $migration_file" + exit 1 + fi + + print_info "Running migration: $migration_file" + + # Run migration with error handling + if psql -h localhost -p 5432 -U pipeline_admin -d dev_pipeline -f "$migration_file" -q 2>/dev/null; then + print_status "Migration completed: $migration_file" + else + print_error "Migration failed: $migration_file" + print_info "Check the error logs above for details" + print_info "You may need to run the migration manually:" + print_info " psql -h localhost -p 5432 -U pipeline_admin -d dev_pipeline -f $migration_file" + exit 1 + fi + done + + # Unset password + unset PGPASSWORD + + print_status "All PostgreSQL migrations completed successfully" +} + +# Check if migration is needed and run if necessary +if [ "$FORCE_MIGRATION" = true ]; then + print_warning "Force migration enabled - running migrations..." + run_postgres_migrations + + # Verify migration was successful + print_info "Verifying migration..." + if check_database_migration; then + print_status "Migration verification successful" + else + print_error "Migration verification failed" + exit 1 + fi +elif check_database_migration; then + print_status "Database is already migrated" +else + print_warning "Database needs migration - running migrations..." + run_postgres_migrations + + # Verify migration was successful + print_info "Verifying migration..." + if check_database_migration; then + print_status "Migration verification successful" + else + print_error "Migration verification failed" + exit 1 + fi +fi + +# Show migration summary +print_info "Migration Summary:" +python3 -c " +import psycopg2 +try: + conn = psycopg2.connect( + host='localhost', + port=5432, + user='pipeline_admin', + password='secure_pipeline_2024', + database='dev_pipeline' + ) + cursor = conn.cursor() + + # Get table counts + tables = ['price_tiers', 'frontend_technologies', 'backend_technologies', 'database_technologies', + 'cloud_technologies', 'testing_technologies', 'mobile_technologies', 'devops_technologies', + 'ai_ml_technologies', 'tools', 'price_based_stacks', 'stack_recommendations'] + + print('📊 Database Statistics:') + for table in tables: + try: + cursor.execute(f'SELECT COUNT(*) FROM {table};') + count = cursor.fetchone()[0] + print(f' {table}: {count} records') + except Exception as e: + print(f' {table}: Error - {e}') + + cursor.close() + conn.close() +except Exception as e: + print(f'Error getting migration summary: {e}') +" 2>/dev/null + +# Check if Neo4j is running +print_info "Checking Neo4j connection..." +if ! python3 -c " +from neo4j import GraphDatabase +try: + driver = GraphDatabase.driver('bolt://localhost:7687', auth=('neo4j', 'password')) + driver.verify_connectivity() + print('Neo4j connection successful') + driver.close() +except Exception as e: + print(f'Neo4j connection failed: {e}') + exit(1) +" 2>/dev/null; then + print_error "Neo4j is not running or not accessible" + print_info "Please start Neo4j first:" + print_info " docker run -d --name neo4j -p 7474:7474 -p 7687:7687 -e NEO4J_AUTH=neo4j/password neo4j:latest" + print_info " Wait for Neo4j to start (check http://localhost:7474)" + exit 1 +fi + +print_status "Neo4j is running and accessible" + +# Check if migration has been run +print_info "Checking if migration has been completed..." +if ! python3 -c " +from neo4j import GraphDatabase +try: + driver = GraphDatabase.driver('bolt://localhost:7687', auth=('neo4j', 'password')) + with driver.session() as session: + result = session.run('MATCH (p:PriceTier) RETURN count(p) as count') + price_tiers = result.single()['count'] + if price_tiers == 0: + print('No data found in Neo4j - migration needed') + exit(1) + else: + print(f'Found {price_tiers} price tiers - migration appears complete') + driver.close() +except Exception as e: + print(f'Error checking migration status: {e}') + exit(1) +" 2>/dev/null; then + print_warning "No data found in Neo4j - running migration..." + + # Run migration + if python3 migrate_postgres_to_neo4j.py; then + print_status "Migration completed successfully" + else + print_error "Migration failed" + exit 1 + fi +else + print_status "Migration appears to be complete" +fi + +# Set environment variables +export NEO4J_URI="bolt://localhost:7687" +export NEO4J_USER="neo4j" +export NEO4J_PASSWORD="password" +export POSTGRES_HOST="localhost" +export POSTGRES_PORT="5432" +export POSTGRES_USER="pipeline_admin" +export POSTGRES_PASSWORD="secure_pipeline_2024" +export POSTGRES_DB="dev_pipeline" +export CLAUDE_API_KEY="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" + +print_status "Environment variables set" + +# Create logs directory if it doesn't exist +mkdir -p logs + +# Start the migrated application +print_info "Starting Enhanced Tech Stack Selector (Migrated Version)..." +print_info "Server will be available at: http://localhost:8002" +print_info "API documentation: http://localhost:8002/docs" +print_info "Health check: http://localhost:8002/health" +print_info "Diagnostics: http://localhost:8002/api/diagnostics" +print_info "" +print_info "Press Ctrl+C to stop the server" +print_info "" + +# Start the application +cd src +python3 main_migrated.py diff --git a/services/tech-stack-selector/test_domains.py b/services/tech-stack-selector/test_domains.py new file mode 100644 index 0000000..9c8f97e --- /dev/null +++ b/services/tech-stack-selector/test_domains.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +""" +Test script to verify domain recommendations are working properly +""" + +import requests +import json + +def test_domain_recommendations(): + """Test recommendations for different domains""" + + base_url = "http://localhost:8002" + + # Test domains + test_domains = [ + "saas", + "SaaS", # Test case sensitivity + "ecommerce", + "E-commerce", # Test case sensitivity and hyphen + "healthcare", + "finance", + "gaming", + "education", + "media", + "iot", + "social", + "elearning", + "realestate", + "travel", + "manufacturing", + "personal", + "startup", + "enterprise" + ] + + print("🧪 Testing Domain Recommendations") + print("=" * 50) + + for domain in test_domains: + print(f"\n🔍 Testing domain: '{domain}'") + + # Test recommendation endpoint + payload = { + "domain": domain, + "budget": 900.0 + } + + try: + response = requests.post(f"{base_url}/recommend/best", json=payload, timeout=10) + + if response.status_code == 200: + data = response.json() + recommendations = data.get('recommendations', []) + + print(f" ✅ Status: {response.status_code}") + print(f" 📝 Response: {recommendations}") + print(f" 📊 Recommendations: {len(recommendations)}") + + if recommendations: + print(f" 🏆 Top recommendation: {recommendations[0]['stack_name']}") + print(f" 💰 Cost: ${recommendations[0]['monthly_cost']}") + print(f" 🎯 Domains: {recommendations[0].get('recommended_domains', 'N/A')}") + else: + print(" ⚠️ No recommendations found") + else: + print(f" ❌ Error: {response.status_code}") + print(f" 📝 Response: {response.text}") + + except requests.exceptions.RequestException as e: + print(f" ❌ Request failed: {e}") + except Exception as e: + print(f" ❌ Unexpected error: {e}") + + # Test available domains endpoint + print(f"\n🌐 Testing available domains endpoint") + try: + response = requests.get(f"{base_url}/api/domains", timeout=10) + if response.status_code == 200: + data = response.json() + domains = data.get('domains', []) + print(f" ✅ Available domains: {len(domains)}") + for domain in domains: + print(f" - {domain['domain_name']} ({domain['project_scale']}, {domain['team_experience_level']})") + else: + print(f" ❌ Error: {response.status_code}") + except Exception as e: + print(f" ❌ Error: {e}") + +if __name__ == "__main__": + test_domain_recommendations() diff --git a/services/tech-stack-selector/test_migration.py b/services/tech-stack-selector/test_migration.py new file mode 100644 index 0000000..6b4ebed --- /dev/null +++ b/services/tech-stack-selector/test_migration.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +""" +Test script to verify PostgreSQL migration is working properly +""" + +import psycopg2 +import sys + +def test_database_migration(): + """Test if the database migration was successful""" + + try: + # Connect to PostgreSQL + conn = psycopg2.connect( + host='localhost', + port=5432, + user='pipeline_admin', + password='secure_pipeline_2024', + database='dev_pipeline' + ) + cursor = conn.cursor() + + print("🧪 Testing PostgreSQL Migration") + print("=" * 40) + + # Test tables exist + tables_to_check = [ + 'price_tiers', + 'frontend_technologies', + 'backend_technologies', + 'database_technologies', + 'cloud_technologies', + 'testing_technologies', + 'mobile_technologies', + 'devops_technologies', + 'ai_ml_technologies', + 'tools', + 'price_based_stacks', + 'stack_recommendations' + ] + + print("📋 Checking table existence:") + for table in tables_to_check: + cursor.execute(f""" + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = '{table}' + ); + """) + exists = cursor.fetchone()[0] + status = "✅" if exists else "❌" + print(f" {status} {table}") + + print("\n📊 Checking data counts:") + for table in tables_to_check: + try: + cursor.execute(f'SELECT COUNT(*) FROM {table};') + count = cursor.fetchone()[0] + print(f" {table}: {count} records") + except Exception as e: + print(f" {table}: Error - {e}") + + # Test specific data + print("\n🔍 Testing specific data:") + + # Test price tiers + cursor.execute("SELECT tier_name, min_price_usd, max_price_usd FROM price_tiers ORDER BY min_price_usd;") + price_tiers = cursor.fetchall() + print(f" Price tiers: {len(price_tiers)}") + for tier in price_tiers: + print(f" - {tier[0]}: ${tier[1]} - ${tier[2]}") + + # Test stack recommendations + cursor.execute("SELECT business_domain, COUNT(*) FROM stack_recommendations GROUP BY business_domain;") + domains = cursor.fetchall() + print(f" Domain recommendations: {len(domains)}") + for domain in domains: + print(f" - {domain[0]}: {domain[1]} recommendations") + + # Test tools + cursor.execute("SELECT category, COUNT(*) FROM tools GROUP BY category;") + tool_categories = cursor.fetchall() + print(f" Tool categories: {len(tool_categories)}") + for category in tool_categories: + print(f" - {category[0]}: {category[1]} tools") + + cursor.close() + conn.close() + + print("\n✅ Database migration test completed successfully!") + return True + + except Exception as e: + print(f"\n❌ Database migration test failed: {e}") + return False + +if __name__ == "__main__": + success = test_database_migration() + sys.exit(0 if success else 1) diff --git a/services/template-manager/Dockerfile b/services/template-manager/Dockerfile index 217aa62..cf6b1e6 100644 --- a/services/template-manager/Dockerfile +++ b/services/template-manager/Dockerfile @@ -3,7 +3,7 @@ FROM node:18-alpine WORKDIR /app # Install curl for health checks -RUN apk add --no-cache curl +RUN apk add --no-cache curl python3 py3-pip py3-virtualenv # Ensure shared pipeline schema can be applied automatically when missing ENV APPLY_SCHEMAS_SQL=true @@ -17,6 +17,15 @@ RUN npm install # Copy source code COPY . . +# Setup Python venv and install AI dependencies if present +RUN if [ -f "/app/ai/requirements.txt" ]; then \ + python3 -m venv /opt/venv && \ + /opt/venv/bin/pip install --no-cache-dir -r /app/ai/requirements.txt; \ + fi + +# Ensure venv binaries are on PATH +ENV PATH="/opt/venv/bin:${PATH}" + # Create non-root user RUN addgroup -g 1001 -S nodejs RUN adduser -S template-manager -u 1001 @@ -26,11 +35,11 @@ RUN chown -R template-manager:nodejs /app USER template-manager # Expose port -EXPOSE 8009 +EXPOSE 8009 8013 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8009/health || exit 1 + CMD curl -f http://localhost:8009/health || curl -f http://localhost:8013/health || exit 1 # Start the application -CMD ["npm", "start"] \ No newline at end of file +CMD ["/bin/sh", "/app/start.sh"] \ No newline at end of file diff --git a/services/template-manager/add-sample-templates.js b/services/template-manager/add-sample-templates.js deleted file mode 100644 index 961381c..0000000 --- a/services/template-manager/add-sample-templates.js +++ /dev/null @@ -1,121 +0,0 @@ -require('dotenv').config(); -const database = require('./src/config/database'); - -const SAMPLE_TEMPLATES = [ - { - type: 'blog_platform', - title: 'Blog Platform', - description: 'Modern blog with content management, comments, and SEO', - icon: '📝', - category: 'Content', - gradient: 'from-purple-50 to-purple-100', - border: 'border-purple-200', - text: 'text-purple-900', - subtext: 'text-purple-700' - }, - { - type: 'task_manager', - title: 'Task Manager', - description: 'Project and task management with team collaboration', - icon: '✅', - category: 'Productivity', - gradient: 'from-green-50 to-green-100', - border: 'border-green-200', - text: 'text-green-900', - subtext: 'text-green-700' - }, - { - type: 'analytics_dashboard', - title: 'Analytics Dashboard', - description: 'Data visualization and business intelligence platform', - icon: '📊', - category: 'Business', - gradient: 'from-blue-50 to-blue-100', - border: 'border-blue-200', - text: 'text-blue-900', - subtext: 'text-blue-700' - }, - { - type: 'social_network', - title: 'Social Network', - description: 'Connect with friends, share content, and build communities', - icon: '🌐', - category: 'Social', - gradient: 'from-pink-50 to-pink-100', - border: 'border-pink-200', - text: 'text-pink-900', - subtext: 'text-pink-700' - }, - { - type: 'learning_platform', - title: 'Learning Platform', - description: 'Online courses, quizzes, and educational content', - icon: '🎓', - category: 'Education', - gradient: 'from-yellow-50 to-yellow-100', - border: 'border-yellow-200', - text: 'text-yellow-900', - subtext: 'text-yellow-700' - } -]; - -async function addSampleTemplates() { - const client = await database.connect(); - - try { - await client.query('BEGIN'); - - console.log('🚀 Adding sample templates...'); - - for (const template of SAMPLE_TEMPLATES) { - const query = ` - INSERT INTO templates ( - id, type, title, description, icon, category, - gradient, border, text, subtext, is_active, created_at, updated_at - ) VALUES ( - gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, true, NOW(), NOW() - ) - `; - - const values = [ - template.type, - template.title, - template.description, - template.icon, - template.category, - template.gradient, - template.border, - template.text, - template.subtext - ]; - - await client.query(query, values); - console.log(`✅ Added template: ${template.title}`); - } - - await client.query('COMMIT'); - console.log('🎉 Sample templates added successfully!'); - - } catch (error) { - await client.query('ROLLBACK'); - console.error('❌ Error adding sample templates:', error.message); - throw error; - } finally { - client.release(); - } -} - -// Run if called directly -if (require.main === module) { - addSampleTemplates() - .then(() => { - console.log('🎉 Process completed!'); - process.exit(0); - }) - .catch((error) => { - console.error('💥 Process failed:', error.message); - process.exit(1); - }); -} - -module.exports = { addSampleTemplates }; diff --git a/services/template-manager/ai/requirements.txt b/services/template-manager/ai/requirements.txt new file mode 100644 index 0000000..8e9dd1f --- /dev/null +++ b/services/template-manager/ai/requirements.txt @@ -0,0 +1,12 @@ +# Python dependencies for AI features +asyncpg==0.30.0 +anthropic>=0.34.0 +loguru==0.7.2 +requests==2.31.0 +python-dotenv==1.0.0 +neo4j==5.15.0 +fastapi==0.104.1 +uvicorn==0.24.0 +pydantic==2.11.9 +httpx>=0.25.0 + diff --git a/services/template-manager/ai/tech_stack_service.py b/services/template-manager/ai/tech_stack_service.py new file mode 100644 index 0000000..ba3ddc1 --- /dev/null +++ b/services/template-manager/ai/tech_stack_service.py @@ -0,0 +1,2031 @@ +# Copied from template-manager (2)/template-manager/tech_stack_service.py +# See original for full implementation details + + +#!/usr/bin/env python3 +""" +Complete Tech Stack Recommendation Service +Consolidated service that includes all essential functionality: +- AI-powered tech stack recommendations +- Claude API integration +- Feature extraction +- Neo4j knowledge graph operations +- Database operations +""" + +import os +import sys +import json +import asyncio +import asyncpg +from datetime import datetime +from typing import Dict, List, Any, Optional +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel, Field +import uvicorn +from loguru import logger +import anthropic +import requests +from neo4j import AsyncGraphDatabase + +# Configure logging +logger.remove() +# Check if running as command line tool +if len(sys.argv) > 2 and sys.argv[1] == "--template-id": + # For command line usage, output logs to stderr + logger.add(lambda msg: print(msg, end="", file=sys.stderr), level="ERROR", format="{time} | {level} | {message}") +else: + # For server usage, output logs to stdout + logger.add(lambda msg: print(msg, end=""), level="INFO", format="{time} | {level} | {message}") + +# ============================================================================ +# PYDANTIC MODELS +# ============================================================================ + +class TechRecommendationRequest(BaseModel): + template_id: str = Field(..., description="Template ID to get recommendations for") + +class TechRecommendationResponse(BaseModel): + template_id: str + stack_name: str + monthly_cost: float + setup_cost: float + team_size: str + development_time: int + satisfaction: int + success_rate: int + frontend: str + backend: str + database: str + cloud: str + testing: str + mobile: str + devops: str + ai_ml: str + # Single recommended tool + recommended_tool: str = "" + recommendation_score: float + created_at: datetime + +# ============================================================================ +# CLAUDE CLIENT +# ============================================================================ + +class ClaudeClient: + """Claude API client for tech stack recommendations""" + + def __init__(self): + # Claude API configuration + self.api_key = os.getenv("CLAUDE_API_KEY") + + if not self.api_key: + logger.warning("CLAUDE_API_KEY environment variable not set - AI features will be limited") + self.client = None + else: + # Initialize Anthropic client + self.client = anthropic.Anthropic(api_key=self.api_key) + + # Database configuration with fallback + self.db_config = self._get_db_config() + + logger.info("ClaudeClient initialized") + + def _get_db_config(self): + """Get database configuration with fallback options""" + # Try environment variables first + host = os.getenv("POSTGRES_HOST") + if not host: + # Check if running inside Docker (postgres hostname available) + try: + import socket + socket.gethostbyname("postgres") + host = "postgres" # Docker internal network + except socket.gaierror: + # Not in Docker, use localhost + host = "localhost" + + return { + "host": host, + "port": int(os.getenv("POSTGRES_PORT", "5432")), + "database": os.getenv("POSTGRES_DB", "dev_pipeline"), + "user": os.getenv("POSTGRES_USER", "pipeline_admin"), + "password": os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024") + } + + async def connect_db(self): + """Create database connection""" + try: + conn = await asyncpg.connect(**self.db_config) + logger.info("Database connected successfully") + return conn + except Exception as e: + logger.error(f"Database connection failed: {e}") + raise + + def create_prompt(self, template_data: Dict[str, Any], keywords: List[str]) -> str: + """Create a prompt for Claude API""" + prompt = f""" +You are a tech stack recommendation expert. Based on the following template information and extracted keywords, recommend a complete tech stack solution including both technologies and ONE essential business tool. + +Template Information: +- Type: {template_data.get('type', 'N/A')} +- Title: {template_data.get('title', 'N/A')} +- Description: {template_data.get('description', 'N/A')} +- Category: {template_data.get('category', 'N/A')} + +Extracted Keywords: {', '.join(keywords) if keywords else 'None'} + +Please provide a complete tech stack recommendation in the following JSON format. Include realistic cost estimates, team size, development time, success metrics, and ONE relevant business tool. + +{{ + "stack_name": "MVP Startup Stack", + "monthly_cost": 65.0, + "setup_cost": 850.0, + "team_size": "2-4", + "development_time": 3, + "satisfaction": 85, + "success_rate": 88, + "frontend": "Next.js", + "backend": "Node.js", + "database": "PostgreSQL", + "cloud": "Railway", + "testing": "Jest", + "mobile": "React Native", + "devops": "GitHub Actions", + "ai_ml": "Hugging Face", + "recommended_tool": "Shopify", + "recommendation_score": 96.5 +}} + +Guidelines: +- Choose technologies that work well together +- Provide realistic cost estimates based on the template complexity +- Estimate development time in months +- Include satisfaction and success rate percentages (0-100) +- Set recommendation_score based on how well the stack fits the requirements (0-100) +- Use modern, popular technologies +- Consider the template's business domain and technical requirements +- Select ONLY ONE tool total that best complements the entire tech stack +- Choose the most appropriate tool for the template's specific needs and industry +- The tool should be the most essential business tool for this particular template + +IMPORTANT TOOL SELECTION RULES: +- For E-commerce/Online Store templates: Use Shopify, WooCommerce, or Magento +- For CRM/Customer Management: Use Salesforce, HubSpot, or Zoho CRM +- For Analytics/Data: Use Google Analytics, Mixpanel, or Tableau +- For Payment Processing: Use Stripe, PayPal, or Razorpay +- For Communication/Collaboration: Use Slack, Microsoft Teams, or Discord +- For Project Management: Use Trello, Jira, or Asana +- For Marketing: Use Mailchimp, SendGrid, or Constant Contact +- For Social Media: Use Hootsuite, Buffer, or Sprout Social +- For AI/ML projects: Use TensorFlow, PyTorch, or Hugging Face +- For Mobile Apps: Use Firebase, AWS Amplify, or App Store Connect +- For Enterprise: Use Microsoft 365, Google Workspace, or Atlassian +- For Startups: Use Notion, Airtable, or Zapier + +Choose the tool that BEST matches the template's primary business function and industry. + +Provide only the JSON response, no additional text. +""" + return prompt + + async def get_recommendation(self, template_id: str) -> Dict[str, Any]: + """Get tech stack recommendation from Claude API""" + try: + if not self.client: + raise HTTPException(status_code=503, detail="Claude API not available - API key not configured") + + conn = await self.connect_db() + + # Get template data - check both templates and custom_templates tables + template_query = """ + SELECT id, type, title, description, category + FROM templates + WHERE id = $1 + """ + template_result = await conn.fetchrow(template_query, template_id) + + if not template_result: + # Try custom_templates table + template_query = """ + SELECT id, type, title, description, category + FROM custom_templates + WHERE id = $1 + """ + template_result = await conn.fetchrow(template_query, template_id) + + if not template_result: + await conn.close() + raise HTTPException(status_code=404, detail="Template not found") + + template_data = dict(template_result) + + # Get extracted keywords + keywords_result = await conn.fetchrow(''' + SELECT keywords_json FROM extracted_keywords + WHERE template_id = $1 AND keywords_json IS NOT NULL + ORDER BY created_at DESC + LIMIT 1 + ''', template_id) + + keywords = [] + if keywords_result: + keywords = json.loads(keywords_result['keywords_json']) + + await conn.close() + + # Create prompt with extracted keywords + prompt = self.create_prompt(template_data, keywords) + + # Call Claude API + response = self.client.messages.create( + model="claude-3-5-sonnet-20241022", + max_tokens=2000, + temperature=0.7, + messages=[{"role": "user", "content": prompt}] + ) + + # Parse response + response_text = response.content[0].text.strip() + + # Extract JSON from response + if response_text.startswith('```json'): + response_text = response_text[7:-3] + elif response_text.startswith('```'): + response_text = response_text[3:-3] + + response_data = json.loads(response_text) + + # Store recommendation + await self.store_tech_recommendations(template_id, response_data) + + # Auto-migrate new recommendation to Neo4j + try: + await self.auto_migrate_single_recommendation(template_id) + except Exception as e: + logger.warning(f"Auto-migration failed for template {template_id}: {e}") + + return response_data + + except Exception as e: + logger.error(f"Error getting recommendation: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get recommendation: {str(e)}") + + async def store_tech_recommendations(self, template_id: str, response_data: Dict[str, Any]): + """Store tech recommendations in tech_stack_recommendations table""" + try: + conn = await self.connect_db() + + # Clear existing recommendations for this template + await conn.execute( + "DELETE FROM tech_stack_recommendations WHERE template_id = $1", + template_id + ) + + # Handle fields that could be dict or string + def format_field(field_value): + if isinstance(field_value, dict): + return json.dumps(field_value) + return str(field_value) if field_value is not None else '' + + # Handle single tool + def format_tool(tool_value): + if isinstance(tool_value, str): + return tool_value + return '' + + # Store the complete tech stack in the proper table + await conn.execute( + """ + INSERT INTO tech_stack_recommendations + (template_id, stack_name, monthly_cost, setup_cost, team_size, development_time, + satisfaction, success_rate, frontend, backend, database, cloud, testing, + mobile, devops, ai_ml, recommended_tool, recommendation_score) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) + """, + template_id, + response_data.get('stack_name', 'Tech Stack'), + response_data.get('monthly_cost', 0.0), + response_data.get('setup_cost', 0.0), + response_data.get('team_size', '1-2'), + response_data.get('development_time', 1), + response_data.get('satisfaction', 0), + response_data.get('success_rate', 0), + format_field(response_data.get('frontend', '')), + format_field(response_data.get('backend', '')), + format_field(response_data.get('database', '')), + format_field(response_data.get('cloud', '')), + format_field(response_data.get('testing', '')), + format_field(response_data.get('mobile', '')), + format_field(response_data.get('devops', '')), + format_field(response_data.get('ai_ml', '')), + format_tool(response_data.get('recommended_tool', '')), + response_data.get('recommendation_score', 0.0) + ) + + await conn.close() + logger.info(f"Stored complete tech stack with tools for template {template_id} in tech_stack_recommendations table") + except Exception as e: + logger.error(f"Error storing tech recommendations: {e}") + + async def auto_migrate_single_recommendation(self, template_id: str): + """Auto-migrate a single recommendation from tech_stack_recommendations table to Neo4j""" + try: + logger.info(f"Starting auto-migration for template {template_id}") + conn = await self.connect_db() + + # Get recommendation from tech_stack_recommendations table + rec_query = """ + SELECT * FROM tech_stack_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC LIMIT 1 + """ + rec = await conn.fetchrow(rec_query, template_id) + + if not rec: + logger.warning(f"No recommendation found in tech_stack_recommendations for template {template_id}") + await conn.close() + return + + logger.info(f"Found recommendation: {rec['stack_name']} for template {template_id}") + + # Get template data for context - check both templates and custom_templates tables + template_query = """ + SELECT id, title, description, category, type + FROM templates + WHERE id = $1 + """ + template_result = await conn.fetchrow(template_query, template_id) + + if not template_result: + # Try custom_templates table + template_query = """ + SELECT id, title, description, category, type + FROM custom_templates + WHERE id = $1 + """ + template_result = await conn.fetchrow(template_query, template_id) + + if not template_result: + logger.warning(f"Template {template_id} not found in templates or custom_templates tables") + await conn.close() + return + + template_data = dict(template_result) + template_data['id'] = str(template_data['id']) + + # Get extracted keywords + keywords_result = await conn.fetchrow(''' + SELECT keywords_json FROM extracted_keywords + WHERE template_id = $1 AND keywords_json IS NOT NULL + ORDER BY created_at DESC + LIMIT 1 + ''', template_id) + + keywords = [] + if keywords_result: + keywords = json.loads(keywords_result['keywords_json']) + + await conn.close() + + # Create template node in Neo4j + await neo4j_client.create_template_node(template_data) + + # Create tech stack node + tech_stack_data = { + "name": rec['stack_name'], + "category": "tech_stack", + "maturity_score": 0.9, + "learning_curve": "medium", + "performance_rating": float(rec['recommendation_score']) / 100.0 + } + await neo4j_client.create_technology_node(tech_stack_data) + + # Create recommendation relationship + await neo4j_client.create_recommendation_relationship( + str(template_id), + rec['stack_name'], + "tech_stack", + float(rec['recommendation_score']) / 100.0 + ) + + # Create individual technology nodes and relationships + tech_fields = ['frontend', 'backend', 'database', 'cloud', 'testing', 'mobile', 'devops', 'ai_ml'] + + for field in tech_fields: + tech_value = rec[field] + if tech_value and tech_value.strip(): + # Parse JSON if it's a string + if isinstance(tech_value, str) and tech_value.startswith('{'): + try: + tech_value = json.loads(tech_value) + if isinstance(tech_value, dict): + tech_name = tech_value.get('name', str(tech_value)) + else: + tech_name = str(tech_value) + except: + tech_name = str(tech_value) + else: + tech_name = str(tech_value) + + # Create technology node + tech_data = { + "name": tech_name, + "category": field, + "maturity_score": 0.8, + "learning_curve": "medium", + "performance_rating": 0.8 + } + await neo4j_client.create_technology_node(tech_data) + + # Create relationship + await neo4j_client.create_recommendation_relationship( + str(template_id), + tech_name, + field, + 0.8 + ) + + # Create tool node for single recommended tool + recommended_tool = rec.get('recommended_tool', '') + if recommended_tool and recommended_tool.strip(): + # Create tool node + tool_data = { + "name": recommended_tool, + "category": "business_tool", + "type": "Tool", + "maturity_score": 0.8, + "learning_curve": "easy", + "performance_rating": 0.8 + } + await neo4j_client.create_technology_node(tool_data) + + # Create relationship + await neo4j_client.create_recommendation_relationship( + str(template_id), + recommended_tool, + "business_tool", + 0.8 + ) + + # Create keyword relationships + if keywords and len(keywords) > 0: + logger.info(f"Creating {len(keywords)} keyword relationships for template {template_id}") + for keyword in keywords: + if keyword and keyword.strip(): + await neo4j_client.create_keyword_relationship(str(template_id), keyword) + else: + logger.warning(f"No keywords found for template {template_id}, skipping keyword relationships") + + # Create TemplateRecommendation node with rich data + recommendation_data = { + 'stack_name': rec['stack_name'], + 'description': template_data.get('description', ''), + 'project_scale': 'medium', + 'team_size': 3, + 'experience_level': 'intermediate', + 'confidence_score': int(rec['recommendation_score']), + 'recommendation_reasons': [ + f"Tech stack: {rec['stack_name']}", + f"Score: {rec['recommendation_score']}/100", + "AI-generated recommendation" + ], + 'key_features': [ + f"Frontend: {rec.get('frontend', 'N/A')}", + f"Backend: {rec.get('backend', 'N/A')}", + f"Database: {rec.get('database', 'N/A')}", + f"Cloud: {rec.get('cloud', 'N/A')}" + ], + 'estimated_development_time_months': rec.get('development_time', 3), + 'complexity_level': 'medium', + 'budget_range_usd': f"${rec.get('monthly_cost', 0):.0f} - ${rec.get('setup_cost', 0):.0f}", + 'time_to_market_weeks': rec.get('development_time', 3) * 4, + 'scalability_requirements': 'moderate', + 'security_requirements': 'standard', + 'success_rate_percentage': rec.get('success_rate', 85), + 'user_satisfaction_score': rec.get('satisfaction', 85) + } + await neo4j_client.create_template_recommendation_node(str(template_id), recommendation_data) + + # Create HAS_RECOMMENDATION relationship between Template and TemplateRecommendation + await neo4j_client.create_has_recommendation_relationship(str(template_id), f"rec-{template_id}") + + logger.info(f"✅ Successfully auto-migrated template {template_id} to Neo4j knowledge graph") + + except Exception as e: + logger.error(f"Error in auto-migration for template {template_id}: {e}") + +# ============================================================================ +# FEATURE EXTRACTOR +# ============================================================================ + +class FeatureExtractor: + """Extracts features from templates and gets tech stack recommendations""" + + def __init__(self): + # Database configurations with fallback + self.template_db_config = self._get_db_config() + + # Claude API configuration + self.claude_api_key = os.getenv("CLAUDE_API_KEY") + if not self.claude_api_key: + logger.warning("CLAUDE_API_KEY not set - AI features will be limited") + + self.claude_client = anthropic.Anthropic(api_key=self.claude_api_key) if self.claude_api_key else None + + logger.info("FeatureExtractor initialized") + + def _get_db_config(self): + """Get database configuration with fallback options""" + # Try environment variables first + host = os.getenv("POSTGRES_HOST") + if not host: + # Check if running inside Docker (postgres hostname available) + try: + import socket + socket.gethostbyname("postgres") + host = "postgres" # Docker internal network + except socket.gaierror: + # Not in Docker, use localhost + host = "localhost" + + return { + "host": host, + "port": int(os.getenv("POSTGRES_PORT", "5432")), + "database": os.getenv("POSTGRES_DB", "dev_pipeline"), + "user": os.getenv("POSTGRES_USER", "pipeline_admin"), + "password": os.getenv("POSTGRES_PASSWORD", "secure_pipeline_2024") + } + + async def connect_db(self): + """Create database connection""" + try: + conn = await asyncpg.connect(**self.template_db_config) + logger.info("Database connected successfully") + return conn + except Exception as e: + logger.error(f"Database connection failed: {e}") + raise + + async def extract_keywords_from_template(self, template_data: Dict[str, Any]) -> List[str]: + """Extract keywords from template using local NLP processing""" + try: + # Combine all text data + text_content = f"{template_data.get('title', '')} {template_data.get('description', '')} {template_data.get('category', '')}" + + # Clean and process text + keywords = self._extract_keywords_local(text_content) + + logger.info(f"Extracted {len(keywords)} keywords locally: {keywords}") + return keywords + + except Exception as e: + logger.error(f"Error extracting keywords: {e}") + return [] + + def _extract_keywords_local(self, text: str) -> List[str]: + """Extract keywords using local text processing""" + import re + from collections import Counter + + # Define technical and business keywords + tech_keywords = { + 'web', 'api', 'database', 'frontend', 'backend', 'mobile', 'cloud', 'ai', 'ml', 'analytics', + 'ecommerce', 'e-commerce', 'payment', 'authentication', 'security', 'testing', 'deployment', + 'microservices', 'rest', 'graphql', 'react', 'angular', 'vue', 'node', 'python', 'java', + 'javascript', 'typescript', 'docker', 'kubernetes', 'aws', 'azure', 'gcp', 'postgresql', + 'mysql', 'mongodb', 'redis', 'elasticsearch', 'rabbitmq', 'kafka', 'nginx', 'jenkins', + 'gitlab', 'github', 'ci', 'cd', 'devops', 'monitoring', 'logging', 'caching', 'scaling' + } + + business_keywords = { + 'healthcare', 'medical', 'patient', 'appointment', 'records', 'telehealth', 'pharmacy', + 'finance', 'banking', 'payment', 'invoice', 'accounting', 'trading', 'investment', + 'education', 'learning', 'student', 'course', 'training', 'certification', 'lms', + 'retail', 'inventory', 'shopping', 'cart', 'checkout', 'order', 'shipping', 'warehouse', + 'crm', 'sales', 'marketing', 'lead', 'customer', 'support', 'ticket', 'workflow', + 'automation', 'process', 'approval', 'document', 'file', 'content', 'management', + 'enterprise', 'business', 'solution', 'platform', 'service', 'application', 'system' + } + + # Clean text + text = re.sub(r'[^\w\s-]', ' ', text.lower()) + words = re.findall(r'\b\w+\b', text) + + # Filter out common stop words + stop_words = { + 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', + 'by', 'from', 'up', 'about', 'into', 'through', 'during', 'before', 'after', + 'above', 'below', 'between', 'among', 'is', 'are', 'was', 'were', 'be', 'been', + 'being', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', + 'should', 'may', 'might', 'must', 'can', 'this', 'that', 'these', 'those', + 'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her', 'us', 'them' + } + + # Filter words + filtered_words = [word for word in words if len(word) > 2 and word not in stop_words] + + # Count word frequency + word_counts = Counter(filtered_words) + + # Extract relevant keywords + keywords = [] + + # Add technical keywords found in text + for word in filtered_words: + if word in tech_keywords or word in business_keywords: + keywords.append(word) + + # Add most frequent meaningful words (excluding already added keywords) + remaining_words = [word for word, count in word_counts.most_common(10) + if word not in keywords and count > 1] + keywords.extend(remaining_words[:5]) + + # Remove duplicates and limit to 10 keywords + unique_keywords = list(dict.fromkeys(keywords))[:10] + + return unique_keywords + + async def get_template_data(self, template_id: str) -> Optional[Dict[str, Any]]: + """Get template data from database""" + try: + conn = await self.connect_db() + + # Try templates table first + template = await conn.fetchrow( + """ + SELECT id, title, description, category, type + FROM templates + WHERE id = $1 + """, + template_id + ) + + if not template: + # Try custom_templates table + template = await conn.fetchrow( + """ + SELECT id, title, description, category, type + FROM custom_templates + WHERE id = $1 + """, + template_id + ) + + await conn.close() + + if template: + return dict(template) + return None + + except Exception as e: + logger.error(f"Error getting template data: {e}") + return None + + async def get_all_templates(self) -> List[Dict[str, Any]]: + """Get all templates from both tables""" + try: + conn = await self.connect_db() + + # Get from templates table + templates = await conn.fetch( + """ + SELECT id, title, description, category, type + FROM templates + WHERE type NOT IN ('_system', '_migration', '_test') + """ + ) + + # Get from custom_templates table + custom_templates = await conn.fetch( + """ + SELECT id, title, description, category, type + FROM custom_templates + """ + ) + + await conn.close() + + # Combine results + all_templates = [] + for template in templates: + all_templates.append(dict(template)) + for template in custom_templates: + all_templates.append(dict(template)) + + return all_templates + + except Exception as e: + logger.error(f"Error getting all templates: {e}") + return [] + + async def store_extracted_keywords(self, template_id: str, keywords: List[str]): + """Store extracted keywords in database""" + try: + conn = await self.connect_db() + + # Determine template source + template_source = 'templates' + template = await conn.fetchrow("SELECT id FROM templates WHERE id = $1", template_id) + if not template: + template_source = 'custom_templates' + + # Store keywords + await conn.execute( + """ + INSERT INTO extracted_keywords (template_id, template_source, keywords_json, created_at) + VALUES ($1, $2, $3, $4) + ON CONFLICT (template_id, template_source) + DO UPDATE SET keywords_json = $3, updated_at = $4 + """, + template_id, + template_source, + json.dumps(keywords), + datetime.now() + ) + + await conn.close() + logger.info(f"Stored keywords for template {template_id} from {template_source}") + + except Exception as e: + logger.error(f"Error storing extracted keywords: {e}") + + async def store_keywords(self, template_id: str, keywords: List[str]): + """Store extracted keywords in database""" + try: + conn = await self.connect_db() + + # Store keywords + await conn.execute( + """ + INSERT INTO extracted_keywords (template_id, keywords_json, created_at) + VALUES ($1, $2, $3) + ON CONFLICT (template_id) + DO UPDATE SET keywords_json = $2, updated_at = $3 + """, + template_id, + json.dumps(keywords), + datetime.now() + ) + + await conn.close() + logger.info(f"Stored keywords for template {template_id}") + + except Exception as e: + logger.error(f"Error storing keywords: {e}") + + +# ============================================================================ +# NEO4J CLIENT +# ============================================================================ + +class Neo4jClient: + """Neo4j client for knowledge graph operations""" + + def __init__(self): + # Neo4j configuration - try multiple connection options + self.uri = self._get_neo4j_uri() + self.username = os.getenv("NEO4J_USERNAME", "neo4j") + self.password = os.getenv("NEO4J_PASSWORD", "password") + + # Create driver + self._create_driver() + + def _get_neo4j_uri(self): + """Get Neo4j URI with fallback options""" + # Try environment variable first + uri = os.getenv("NEO4J_URI") + if uri: + return uri + + # Check if running inside Docker (neo4j hostname available) + try: + import socket + socket.gethostbyname("neo4j") + return "bolt://neo4j:7687" # Docker internal network + except socket.gaierror: + # Not in Docker, use localhost + return "bolt://localhost:7687" + + def _create_driver(self): + """Create Neo4j driver""" + self.driver = AsyncGraphDatabase.driver( + self.uri, + auth=(self.username, self.password) + ) + logger.info(f"Neo4jClient initialized with URI: {self.uri}") + + async def close(self): + """Close the Neo4j driver""" + await self.driver.close() + logger.info("Neo4j connection closed") + + async def test_connection(self): + """Test Neo4j connection""" + try: + async with self.driver.session() as session: + result = await session.run("RETURN 1 as test") + record = await result.single() + if record and record["test"] == 1: + logger.info("Neo4j connection successful") + return True + else: + logger.error("Neo4j connection test failed") + return False + except Exception as e: + logger.error(f"Neo4j connection failed: {e}") + return False + + async def create_constraints(self): + """Create Neo4j constraints""" + try: + async with self.driver.session() as session: + # Create constraints + constraints = [ + "CREATE CONSTRAINT template_id_unique IF NOT EXISTS FOR (t:Template) REQUIRE t.id IS UNIQUE", + "CREATE CONSTRAINT technology_name_unique IF NOT EXISTS FOR (tech:Technology) REQUIRE tech.name IS UNIQUE", + "CREATE CONSTRAINT keyword_name_unique IF NOT EXISTS FOR (k:Keyword) REQUIRE k.name IS UNIQUE" + ] + + for constraint in constraints: + try: + await session.run(constraint) + except Exception as e: + logger.warning(f"Constraint creation warning: {e}") + + logger.info("Neo4j constraints created successfully") + except Exception as e: + logger.error(f"Error creating constraints: {e}") + + async def create_template_node(self, template_data: Dict[str, Any]): + """Create or update template node""" + try: + async with self.driver.session() as session: + await session.run( + """ + MERGE (t:Template {id: $id}) + SET t.name = $name, + t.description = $description, + t.category = $category, + t.type = $type, + t.updated_at = datetime() + """, + id=template_data.get('id'), + name=template_data.get('name', template_data.get('title', '')), + description=template_data.get('description', ''), + category=template_data.get('category', ''), + type=template_data.get('type', '') + ) + logger.info(f"Created/updated template node: {template_data.get('name', template_data.get('title', ''))}") + except Exception as e: + logger.error(f"Error creating template node: {e}") + + async def create_technology_node(self, tech_data: Dict[str, Any]): + """Create or update technology node""" + try: + async with self.driver.session() as session: + await session.run( + """ + MERGE (tech:Technology {name: $name}) + SET tech.category = $category, + tech.type = $type, + tech.maturity_score = $maturity_score, + tech.learning_curve = $learning_curve, + tech.performance_rating = $performance_rating, + tech.updated_at = datetime() + """, + name=tech_data.get('name'), + category=tech_data.get('category', ''), + type=tech_data.get('type', 'Technology'), + maturity_score=tech_data.get('maturity_score', 0.8), + learning_curve=tech_data.get('learning_curve', 'medium'), + performance_rating=tech_data.get('performance_rating', 0.8) + ) + logger.info(f"Created/updated technology node: {tech_data.get('name')}") + except Exception as e: + logger.error(f"Error creating technology node: {e}") + + async def create_recommendation_relationship(self, template_id: str, tech_name: str, category: str, score: float): + """Create recommendation relationship""" + try: + async with self.driver.session() as session: + await session.run( + """ + MATCH (t:Template {id: $template_id}) + MATCH (tech:Technology {name: $tech_name}) + MERGE (t)-[r:RECOMMENDED_TECHNOLOGY {category: $category, score: $score}]->(tech) + SET r.updated_at = datetime() + """, + template_id=template_id, + tech_name=tech_name, + category=category, + score=score + ) + logger.info(f"Created recommendation relationship: {template_id} -> {tech_name}") + except Exception as e: + logger.error(f"Error creating recommendation relationship: {e}") + + async def create_keyword_relationship(self, template_id: str, keyword: str): + """Create keyword relationship""" + try: + async with self.driver.session() as session: + # Create keyword node + await session.run( + """ + MERGE (k:Keyword {name: $keyword}) + SET k.updated_at = datetime() + """, + keyword=keyword + ) + + # Create relationship + await session.run( + """ + MATCH (t:Template {id: $template_id}) + MATCH (k:Keyword {name: $keyword}) + MERGE (t)-[r:HAS_KEYWORD]->(k) + SET r.updated_at = datetime() + """, + template_id=template_id, + keyword=keyword + ) + logger.info(f"Created keyword relationship: {template_id} -> {keyword}") + except Exception as e: + logger.error(f"Error creating keyword relationship: {e}") + + async def create_has_recommendation_relationship(self, template_id: str, recommendation_id: str): + """Create HAS_RECOMMENDATION relationship between Template and TemplateRecommendation""" + try: + async with self.driver.session() as session: + await session.run( + """ + MATCH (t:Template {id: $template_id}) + MATCH (tr:TemplateRecommendation {id: $recommendation_id}) + MERGE (t)-[r:HAS_RECOMMENDATION]->(tr) + SET r.created_at = datetime(), + r.updated_at = datetime() + """, + template_id=template_id, + recommendation_id=recommendation_id + ) + logger.info(f"Created HAS_RECOMMENDATION relationship: {template_id} -> {recommendation_id}") + except Exception as e: + logger.error(f"Error creating HAS_RECOMMENDATION relationship: {e}") + + async def get_recommendations_from_neo4j(self, template_id: str) -> Optional[Dict[str, Any]]: + """Get tech stack recommendations from Neo4j knowledge graph""" + try: + # Convert UUID to string if needed + template_id_str = str(template_id) + + async with self.driver.session() as session: + # Query for template recommendations from Neo4j + result = await session.run( + """ + MATCH (t:Template {id: $template_id})-[:HAS_RECOMMENDATION]->(tr:TemplateRecommendation) + OPTIONAL MATCH (t)-[r:RECOMMENDED_TECHNOLOGY]->(tech:Technology) + WITH tr, collect({ + name: tech.name, + category: r.category, + score: r.score, + type: tech.type, + maturity_score: tech.maturity_score, + learning_curve: tech.learning_curve, + performance_rating: tech.performance_rating + }) as technologies + RETURN tr.business_domain as business_domain, + tr.project_type as project_type, + tr.team_size as team_size, + tr.confidence_score as confidence_score, + tr.estimated_development_time_months as development_time, + tr.success_rate_percentage as success_rate, + tr.user_satisfaction_score as satisfaction, + tr.budget_range_usd as budget_range, + tr.complexity_level as complexity_level, + technologies + ORDER BY tr.created_at DESC + LIMIT 1 + """, + template_id=template_id_str + ) + + record = await result.single() + if record: + # Process technologies by category + tech_categories = {} + for tech in record['technologies']: + category = tech['category'] + if category not in tech_categories: + tech_categories[category] = [] + tech_categories[category].append(tech) + + # Build recommendation response + recommendation = { + 'stack_name': f"{record['business_domain']} {record['project_type']} Stack", + 'monthly_cost': record['budget_range'] / 12 if record['budget_range'] else 1000, + 'setup_cost': record['budget_range'] if record['budget_range'] else 5000, + 'team_size': record['team_size'] or '2-4', + 'development_time': record['development_time'] or 6, + 'satisfaction': record['satisfaction'] or 85, + 'success_rate': record['success_rate'] or 80, + 'frontend': '', + 'backend': '', + 'database': '', + 'cloud': '', + 'testing': '', + 'mobile': '', + 'devops': '', + 'ai_ml': '', + 'recommended_tool': '', + 'recommendation_score': record['confidence_score'] or 85.0 + } + + # Map technologies to categories + for category, techs in tech_categories.items(): + if techs: + best_tech = max(techs, key=lambda x: x['score']) + if category.lower() == 'frontend': + recommendation['frontend'] = best_tech['name'] + elif category.lower() == 'backend': + recommendation['backend'] = best_tech['name'] + elif category.lower() == 'database': + recommendation['database'] = best_tech['name'] + elif category.lower() == 'cloud': + recommendation['cloud'] = best_tech['name'] + elif category.lower() == 'testing': + recommendation['testing'] = best_tech['name'] + elif category.lower() == 'mobile': + recommendation['mobile'] = best_tech['name'] + elif category.lower() == 'devops': + recommendation['devops'] = best_tech['name'] + elif category.lower() in ['ai', 'ml', 'ai_ml']: + recommendation['ai_ml'] = best_tech['name'] + elif category.lower() == 'tool': + recommendation['recommended_tool'] = best_tech['name'] + + logger.info(f"Found recommendations in Neo4j for template {template_id}: {recommendation['stack_name']}") + return recommendation + else: + logger.info(f"No recommendations found in Neo4j for template {template_id}") + return None + + except Exception as e: + logger.error(f"Error getting recommendations from Neo4j: {e}") + return None + + async def create_template_recommendation_node(self, template_id: str, recommendation_data: Dict[str, Any]): + """Create TemplateRecommendation node with rich data""" + try: + async with self.driver.session() as session: + # Extract business domain from template category or description + business_domain = self._extract_business_domain(recommendation_data) + project_type = self._extract_project_type(recommendation_data) + + # Create TemplateRecommendation node + await session.run( + """ + MERGE (tr:TemplateRecommendation {id: $id}) + SET tr.business_domain = $business_domain, + tr.project_type = $project_type, + tr.project_scale = $project_scale, + tr.team_size = $team_size, + tr.experience_level = $experience_level, + tr.confidence_score = $confidence_score, + tr.recommendation_reasons = $recommendation_reasons, + tr.key_features = $key_features, + tr.estimated_development_time_months = $estimated_development_time_months, + tr.complexity_level = $complexity_level, + tr.budget_range_usd = $budget_range_usd, + tr.time_to_market_weeks = $time_to_market_weeks, + tr.scalability_requirements = $scalability_requirements, + tr.security_requirements = $security_requirements, + tr.success_rate_percentage = $success_rate_percentage, + tr.user_satisfaction_score = $user_satisfaction_score, + tr.created_by_system = $created_by_system, + tr.recommendation_source = $recommendation_source, + tr.is_active = $is_active, + tr.usage_count = $usage_count, + tr.created_at = datetime(), + tr.updated_at = datetime() + """, + id=f"rec-{template_id}", + business_domain=business_domain, + project_type=project_type, + project_scale=recommendation_data.get('project_scale', 'medium'), + team_size=recommendation_data.get('team_size', 3), + experience_level=recommendation_data.get('experience_level', 'intermediate'), + confidence_score=recommendation_data.get('confidence_score', 85), + recommendation_reasons=recommendation_data.get('recommendation_reasons', ['AI-generated recommendation']), + key_features=recommendation_data.get('key_features', []), + estimated_development_time_months=recommendation_data.get('estimated_development_time_months', 3), + complexity_level=recommendation_data.get('complexity_level', 'medium'), + budget_range_usd=recommendation_data.get('budget_range_usd', '$5,000 - $15,000'), + time_to_market_weeks=recommendation_data.get('time_to_market_weeks', 12), + scalability_requirements=recommendation_data.get('scalability_requirements', 'moderate'), + security_requirements=recommendation_data.get('security_requirements', 'standard'), + success_rate_percentage=recommendation_data.get('success_rate_percentage', 85), + user_satisfaction_score=recommendation_data.get('user_satisfaction_score', 85), + created_by_system=True, + recommendation_source='ai_model', + is_active=True, + usage_count=0 + ) + + # Create relationship from Template to TemplateRecommendation + await session.run( + """ + MATCH (t:Template {id: $template_id}) + MATCH (tr:TemplateRecommendation {id: $rec_id}) + MERGE (t)-[:RECOMMENDED_FOR]->(tr) + """, + template_id=template_id, + rec_id=f"rec-{template_id}" + ) + + logger.info(f"Created TemplateRecommendation node: rec-{template_id}") + except Exception as e: + logger.error(f"Error creating TemplateRecommendation node: {e}") + + def _extract_business_domain(self, recommendation_data: Dict[str, Any]) -> str: + """Extract business domain from recommendation data""" + # Try to extract from stack name or description + stack_name = recommendation_data.get('stack_name', '').lower() + description = recommendation_data.get('description', '').lower() + + if any(word in stack_name or word in description for word in ['ecommerce', 'e-commerce', 'shop', 'store', 'retail']): + return 'E-commerce' + elif any(word in stack_name or word in description for word in ['social', 'community', 'network']): + return 'Social Media' + elif any(word in stack_name or word in description for word in ['finance', 'payment', 'banking', 'fintech']): + return 'Fintech' + elif any(word in stack_name or word in description for word in ['health', 'medical', 'care']): + return 'Healthcare' + elif any(word in stack_name or word in description for word in ['education', 'learning', 'course']): + return 'Education' + else: + return 'General Business' + + def _extract_project_type(self, recommendation_data: Dict[str, Any]) -> str: + """Extract project type from recommendation data""" + stack_name = recommendation_data.get('stack_name', '').lower() + description = recommendation_data.get('description', '').lower() + + if any(word in stack_name or word in description for word in ['web', 'website', 'portal']): + return 'Web Application' + elif any(word in stack_name or word in description for word in ['mobile', 'app', 'ios', 'android']): + return 'Mobile Application' + elif any(word in stack_name or word in description for word in ['api', 'service', 'microservice']): + return 'API Service' + elif any(word in stack_name or word in description for word in ['dashboard', 'admin', 'management']): + return 'Management Dashboard' + else: + return 'Web Application' + +# ============================================================================ +# FASTAPI APPLICATION +# ============================================================================ + +# Initialize FastAPI app +app = FastAPI( + title="Tech Stack Recommendation Service", + description="AI-powered tech stack recommendations with tools integration", + version="1.0.0" +) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Initialize clients +claude_client = ClaudeClient() +feature_extractor = FeatureExtractor() +neo4j_client = Neo4jClient() + +@app.on_event("startup") +async def startup_event(): + """Initialize services on startup""" + print("🚀 STARTING TECH STACK RECOMMENDATION SERVICE") + print("=" * 50) + print("✅ AI Service will be available at: http://localhost:8013") + print("✅ API Documentation: http://localhost:8013/docs") + print("✅ Test endpoint: POST http://localhost:8013/ai/recommendations") + print("=" * 50) + + # Automatic migration on startup + print("🔄 Starting automatic migration to Neo4j...") + try: + await migrate_to_neo4j() + print("✅ Automatic migration completed successfully!") + except Exception as e: + print(f"⚠️ Migration warning: {e}") + print("✅ Service will continue running with existing data") + print("=" * 50) + +@app.get("/") +async def root(): + """Root endpoint""" + return { + "message": "Tech Stack Recommendation Service", + "version": "1.0.0", + "status": "running", + "endpoints": { + "recommendations": "POST /ai/recommendations", + "docs": "GET /docs" + } + } + +@app.get("/health") +async def health_check(): + """Health check endpoint""" + return {"status": "healthy", "timestamp": datetime.now()} + +@app.post("/ai/recommendations/formatted") +async def get_formatted_tech_recommendations(request: TechRecommendationRequest): + """Get tech stack recommendations in a formatted, user-friendly way""" + try: + logger.info(f"Getting formatted recommendations for template: {request.template_id}") + + # Get the standard recommendation + conn = await claude_client.connect_db() + + recommendations = await conn.fetch(''' + SELECT template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score, created_at, updated_at + FROM tech_stack_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC + LIMIT 1 + ''', request.template_id) + + if recommendations: + rec = dict(recommendations[0]) + + await conn.close() + + # Format the response in a user-friendly way + formatted_response = { + "template_id": request.template_id, + "tech_stack": { + "name": rec.get('stack_name', 'Tech Stack'), + "score": f"{rec.get('recommendation_score', 0.0)}/100", + "technologies": { + "Frontend": rec.get('frontend', ''), + "Backend": rec.get('backend', ''), + "Database": rec.get('database', ''), + "Cloud": rec.get('cloud', ''), + "Testing": rec.get('testing', ''), + "Mobile": rec.get('mobile', ''), + "DevOps": rec.get('devops', ''), + "AI/ML": rec.get('ai_ml', '') + }, + "recommended_tool": rec.get('recommended_tool', ''), + "costs": { + "monthly": f"${rec.get('monthly_cost', 0.0)}", + "setup": f"${rec.get('setup_cost', 0.0)}" + }, + "team": { + "size": rec.get('team_size', '1-2'), + "development_time": f"{rec.get('development_time', 1)} months" + }, + "metrics": { + "satisfaction": f"{rec.get('satisfaction', 0)}%", + "success_rate": f"{rec.get('success_rate', 0)}%" + } + }, + "created_at": rec.get('created_at', datetime.now()) + } + + return formatted_response + else: + await conn.close() + return {"error": "No recommendations found for this template"} + + except Exception as e: + logger.error(f"Error getting formatted recommendations: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.get("/extract-keywords/{template_id}") +async def get_extracted_keywords(template_id: str): + """Get extracted keywords for a specific template""" + try: + logger.info(f"Getting keywords for template: {template_id}") + + conn = await feature_extractor.connect_db() + + # Get keywords from database + keywords_result = await conn.fetchrow(''' + SELECT keywords_json, created_at, template_source + FROM extracted_keywords + WHERE template_id = $1 AND keywords_json IS NOT NULL + ORDER BY created_at DESC + LIMIT 1 + ''', template_id) + + await conn.close() + + if not keywords_result: + raise HTTPException(status_code=404, detail="No keywords found for this template") + + keywords = json.loads(keywords_result['keywords_json']) if keywords_result['keywords_json'] else [] + + return { + "template_id": template_id, + "keywords": keywords, + "count": len(keywords), + "created_at": keywords_result['created_at'], + "template_source": keywords_result['template_source'] + } + + except Exception as e: + logger.error(f"Error getting keywords: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/extract-keywords/{template_id}") +async def extract_keywords_for_template(template_id: str): + """Extract keywords for a specific template""" + try: + logger.info(f"Extracting keywords for template: {template_id}") + + # Get template data from database + template_data = await feature_extractor.get_template_data(template_id) + + if not template_data: + raise HTTPException(status_code=404, detail="Template not found") + + # Extract keywords using local NLP + keywords = await feature_extractor.extract_keywords_from_template(template_data) + + # Store keywords in database + await feature_extractor.store_extracted_keywords(template_id, keywords) + + return { + "template_id": template_id, + "keywords": keywords, + "count": len(keywords) + } + + except Exception as e: + logger.error(f"Error extracting keywords: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/extract-keywords-all") +async def extract_keywords_for_all_templates(): + """Extract keywords for all templates""" + try: + logger.info("Extracting keywords for all templates") + + # Get all templates from database + templates = await feature_extractor.get_all_templates() + + results = [] + for template in templates: + try: + # Extract keywords using Claude AI + keywords = await feature_extractor.extract_keywords_from_template(template) + + # Store keywords in database + await feature_extractor.store_extracted_keywords(template['id'], keywords) + + results.append({ + "template_id": template['id'], + "title": template['title'], + "keywords": keywords, + "count": len(keywords) + }) + except Exception as e: + logger.error(f"Error extracting keywords for template {template['id']}: {e}") + results.append({ + "template_id": template['id'], + "title": template['title'], + "error": str(e) + }) + + return { + "total_templates": len(templates), + "processed": len(results), + "results": results + } + + except Exception as e: + logger.error(f"Error in bulk keyword extraction: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/auto-workflow/{template_id}") +async def trigger_automatic_workflow(template_id: str): + """Trigger complete automatic workflow for a new template""" + try: + logger.info(f"🚀 Starting automatic workflow for template: {template_id}") + + # Step 1: Extract keywords + logger.info("📝 Step 1: Extracting keywords...") + template_data = await feature_extractor.get_template_data(template_id) + + if not template_data: + raise HTTPException(status_code=404, detail="Template not found") + + keywords = await feature_extractor.extract_keywords_from_template(template_data) + await feature_extractor.store_extracted_keywords(template_id, keywords) + logger.info(f"✅ Keywords extracted and stored: {len(keywords)} keywords") + + # Step 2: Generate tech stack recommendation + logger.info("🤖 Step 2: Generating tech stack recommendation...") + try: + recommendation_data = await claude_client.get_recommendation(template_id) + logger.info(f"✅ Tech stack recommendation generated: {recommendation_data.get('stack_name', 'Unknown')}") + except Exception as e: + logger.warning(f"⚠️ Claude AI failed (likely billing issue): {e}") + logger.info("🔄 Using database fallback for recommendation...") + + # Check if recommendation already exists in database + conn = await claude_client.connect_db() + existing_rec = await conn.fetchrow(''' + SELECT * FROM tech_stack_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC LIMIT 1 + ''', template_id) + + if existing_rec: + recommendation_data = dict(existing_rec) + logger.info(f"✅ Found existing recommendation: {recommendation_data.get('stack_name', 'Unknown')}") + else: + # Create a basic recommendation + recommendation_data = { + 'stack_name': f'{template_data.get("title", "Template")} Tech Stack', + 'monthly_cost': 100.0, + 'setup_cost': 2000.0, + 'team_size': '3-5', + 'development_time': 6, + 'satisfaction': 85, + 'success_rate': 90, + 'frontend': 'React.js', + 'backend': 'Node.js', + 'database': 'PostgreSQL', + 'cloud': 'AWS', + 'testing': 'Jest', + 'mobile': 'React Native', + 'devops': 'Docker', + 'ai_ml': 'TensorFlow', + 'recommended_tool': 'Custom Tool', + 'recommendation_score': 85.0 + } + logger.info(f"✅ Created basic recommendation: {recommendation_data.get('stack_name', 'Unknown')}") + + await conn.close() + + # Step 3: Auto-migrate to Neo4j + logger.info("🔄 Step 3: Auto-migrating to Neo4j knowledge graph...") + await claude_client.auto_migrate_single_recommendation(template_id) + logger.info("✅ Auto-migration to Neo4j completed") + + return { + "template_id": template_id, + "workflow_status": "completed", + "steps_completed": [ + "keyword_extraction", + "tech_stack_recommendation", + "neo4j_migration" + ], + "keywords_count": len(keywords), + "stack_name": recommendation_data.get('stack_name', 'Unknown'), + "message": "Complete workflow executed successfully" + } + + except Exception as e: + logger.error(f"Error in automatic workflow for template {template_id}: {e}") + raise HTTPException(status_code=500, detail=f"Workflow failed: {str(e)}") + +@app.post("/auto-workflow-batch") +async def trigger_automatic_workflow_batch(): + """Trigger automatic workflow for all templates without recommendations""" + try: + logger.info("🚀 Starting batch automatic workflow for all templates") + + # Get all templates without recommendations + conn = await claude_client.connect_db() + + templates_query = """ + SELECT t.id, t.title, t.description, t.category, t.type + FROM templates t + LEFT JOIN tech_stack_recommendations tsr ON t.id = tsr.template_id + WHERE tsr.template_id IS NULL + AND t.type NOT LIKE '_%' + UNION + SELECT ct.id, ct.title, ct.description, ct.category, ct.type + FROM custom_templates ct + LEFT JOIN tech_stack_recommendations tsr ON ct.id = tsr.template_id + WHERE tsr.template_id IS NULL + AND ct.type NOT LIKE '_%' + """ + + templates = await conn.fetch(templates_query) + await conn.close() + + logger.info(f"📋 Found {len(templates)} templates without recommendations") + + results = [] + for i, template in enumerate(templates, 1): + try: + logger.info(f"🔄 Processing {i}/{len(templates)}: {template['title']}") + + # Trigger workflow for this template + workflow_result = await trigger_automatic_workflow(template['id']) + + results.append({ + "template_id": template['id'], + "title": template['title'], + "status": "success", + "workflow_result": workflow_result + }) + + except Exception as e: + logger.error(f"Error processing template {template['id']}: {e}") + results.append({ + "template_id": template['id'], + "title": template['title'], + "status": "failed", + "error": str(e) + }) + + success_count = len([r for r in results if r['status'] == 'success']) + failed_count = len([r for r in results if r['status'] == 'failed']) + + return { + "message": f"Batch workflow completed: {success_count} success, {failed_count} failed", + "total_templates": len(templates), + "success_count": success_count, + "failed_count": failed_count, + "results": results + } + + except Exception as e: + logger.error(f"Error in batch automatic workflow: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@app.post("/ai/recommendations") +async def get_tech_recommendations(request: TechRecommendationRequest): + """Get tech stack recommendations for a template""" + try: + logger.info(f"Getting recommendations for template: {request.template_id}") + + # 1. FIRST: Check Neo4j knowledge graph for recommendations + logger.info("🔍 Checking Neo4j knowledge graph for recommendations...") + neo4j_recommendation = await neo4j_client.get_recommendations_from_neo4j(request.template_id) + + if neo4j_recommendation: + logger.info(f"✅ Found recommendations in Neo4j: {neo4j_recommendation['stack_name']}") + + # Format the response from Neo4j data + response = TechRecommendationResponse( + template_id=request.template_id, + stack_name=neo4j_recommendation.get('stack_name', 'Tech Stack'), + monthly_cost=float(neo4j_recommendation.get('monthly_cost', 0.0)), + setup_cost=float(neo4j_recommendation.get('setup_cost', 0.0)), + team_size=neo4j_recommendation.get('team_size', '1-2'), + development_time=neo4j_recommendation.get('development_time', 1), + satisfaction=neo4j_recommendation.get('satisfaction', 0), + success_rate=neo4j_recommendation.get('success_rate', 0), + frontend=neo4j_recommendation.get('frontend', ''), + backend=neo4j_recommendation.get('backend', ''), + database=neo4j_recommendation.get('database', ''), + cloud=neo4j_recommendation.get('cloud', ''), + testing=neo4j_recommendation.get('testing', ''), + mobile=neo4j_recommendation.get('mobile', ''), + devops=neo4j_recommendation.get('devops', ''), + ai_ml=neo4j_recommendation.get('ai_ml', ''), + recommended_tool=neo4j_recommendation.get('recommended_tool', ''), + recommendation_score=float(neo4j_recommendation.get('recommendation_score', 0.0)), + created_at=datetime.now() + ) + + # Log the complete tech stack with tool for visibility + logger.info(f"📋 Complete Tech Stack Recommendation:") + logger.info(f" 🎯 Stack: {response.stack_name}") + logger.info(f" 💻 Frontend: {response.frontend}") + logger.info(f" ⚙️ Backend: {response.backend}") + logger.info(f" 🗄️ Database: {response.database}") + logger.info(f" ☁️ Cloud: {response.cloud}") + logger.info(f" 🧪 Testing: {response.testing}") + logger.info(f" 📱 Mobile: {response.mobile}") + logger.info(f" 🚀 DevOps: {response.devops}") + logger.info(f" 🤖 AI/ML: {response.ai_ml}") + logger.info(f" 🔧 Recommended Tool: {response.recommended_tool}") + logger.info(f" ⭐ Score: {response.recommendation_score}") + + # Return in the requested format with recommendations array + return { + "recommendations": [ + { + "template_id": response.template_id, + "stack_name": response.stack_name, + "monthly_cost": response.monthly_cost, + "setup_cost": response.setup_cost, + "team_size": response.team_size, + "development_time": response.development_time, + "satisfaction": response.satisfaction, + "success_rate": response.success_rate, + "frontend": response.frontend, + "backend": response.backend, + "database": response.database, + "cloud": response.cloud, + "testing": response.testing, + "mobile": response.mobile, + "devops": response.devops, + "ai_ml": response.ai_ml, + "recommendation_score": response.recommendation_score + } + ] + } + else: + # 2. SECOND: Check database as fallback + logger.info("🔍 Neo4j not found, checking database as fallback...") + conn = await claude_client.connect_db() + + recommendations = await conn.fetch(''' + SELECT template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score, created_at, updated_at + FROM tech_stack_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC + LIMIT 1 + ''', request.template_id) + + if recommendations: + rec = dict(recommendations[0]) + logger.info(f"✅ Found recommendations in database: {rec.get('stack_name', 'Unknown')}") + + # Auto-migrate to Neo4j when found in database + try: + logger.info("🔄 Auto-migrating database recommendation to Neo4j...") + await claude_client.auto_migrate_single_recommendation(request.template_id) + except Exception as e: + logger.warning(f"Auto-migration failed for template {request.template_id}: {e}") + + await conn.close() + + # Format the response from database + response = TechRecommendationResponse( + template_id=request.template_id, + stack_name=rec.get('stack_name', 'Tech Stack'), + monthly_cost=float(rec.get('monthly_cost', 0.0)), + setup_cost=float(rec.get('setup_cost', 0.0)), + team_size=rec.get('team_size', '1-2'), + development_time=rec.get('development_time', 1), + satisfaction=rec.get('satisfaction', 0), + success_rate=rec.get('success_rate', 0), + frontend=rec.get('frontend', ''), + backend=rec.get('backend', ''), + database=rec.get('database', ''), + cloud=rec.get('cloud', ''), + testing=rec.get('testing', ''), + mobile=rec.get('mobile', ''), + devops=rec.get('devops', ''), + ai_ml=rec.get('ai_ml', ''), + recommended_tool=rec.get('recommended_tool', ''), + recommendation_score=float(rec.get('recommendation_score', 0.0)), + created_at=datetime.now() + ) + + # Log the complete tech stack with tool for visibility + logger.info(f"📋 Complete Tech Stack Recommendation (from database):") + logger.info(f" 🎯 Stack: {response.stack_name}") + logger.info(f" 💻 Frontend: {response.frontend}") + logger.info(f" ⚙️ Backend: {response.backend}") + logger.info(f" 🗄️ Database: {response.database}") + logger.info(f" ☁️ Cloud: {response.cloud}") + logger.info(f" 🧪 Testing: {response.testing}") + logger.info(f" 📱 Mobile: {response.mobile}") + logger.info(f" 🚀 DevOps: {response.devops}") + logger.info(f" 🤖 AI/ML: {response.ai_ml}") + logger.info(f" 🔧 Recommended Tool: {response.recommended_tool}") + logger.info(f" ⭐ Score: {response.recommendation_score}") + + # Return in the requested format with recommendations array + return { + "recommendations": [ + { + "template_id": response.template_id, + "stack_name": response.stack_name, + "monthly_cost": response.monthly_cost, + "setup_cost": response.setup_cost, + "team_size": response.team_size, + "development_time": response.development_time, + "satisfaction": response.satisfaction, + "success_rate": response.success_rate, + "frontend": response.frontend, + "backend": response.backend, + "database": response.database, + "cloud": response.cloud, + "testing": response.testing, + "mobile": response.mobile, + "devops": response.devops, + "ai_ml": response.ai_ml, + "recommendation_score": response.recommendation_score + } + ] + } + else: + # 3. THIRD: Generate new recommendations using Claude AI + logger.info("🔍 No existing recommendations found, generating new ones with Claude AI...") + await conn.close() + response_data = await claude_client.get_recommendation(request.template_id) + + # Get keywords + conn = await claude_client.connect_db() + keywords_result = await conn.fetchrow(''' + SELECT keywords_json FROM extracted_keywords + WHERE template_id = $1 AND keywords_json IS NOT NULL + ORDER BY template_source + LIMIT 1 + ''', request.template_id) + + keywords = [] + if keywords_result: + keywords = json.loads(keywords_result['keywords_json']) + + await conn.close() + + # Return in the requested format with recommendations array + return { + "recommendations": [ + { + "template_id": request.template_id, + "stack_name": response_data.get('stack_name', 'Tech Stack'), + "monthly_cost": float(response_data.get('monthly_cost', 0.0)), + "setup_cost": float(response_data.get('setup_cost', 0.0)), + "team_size": response_data.get('team_size', '1-2'), + "development_time": response_data.get('development_time', 1), + "satisfaction": response_data.get('satisfaction', 0), + "success_rate": response_data.get('success_rate', 0), + "frontend": response_data.get('frontend', ''), + "backend": response_data.get('backend', ''), + "database": response_data.get('database', ''), + "cloud": response_data.get('cloud', ''), + "testing": response_data.get('testing', ''), + "mobile": response_data.get('mobile', ''), + "devops": response_data.get('devops', ''), + "ai_ml": response_data.get('ai_ml', ''), + "recommendation_score": float(response_data.get('recommendation_score', 0.0)) + } + ] + } + + except Exception as e: + logger.error(f"Error getting recommendations: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +# ============================================================================ +# MIGRATION FUNCTIONALITY +# ============================================================================ + +async def migrate_to_neo4j(): + """Migrate tech stack recommendations to Neo4j knowledge graph""" + print("🚀 Migrating Tech Stack Recommendations to Neo4j Knowledge Graph") + print("=" * 70) + + try: + # Test Neo4j connection + if not await neo4j_client.test_connection(): + print("❌ Neo4j connection failed") + return + + # Create constraints + await neo4j_client.create_constraints() + print("✅ Neo4j constraints created") + + # Connect to PostgreSQL + conn = await claude_client.connect_db() + print("✅ PostgreSQL connected") + + # Get templates with recommendations + templates_query = """ + SELECT DISTINCT t.id, t.title, t.description, t.category, t.type, t.created_at + FROM templates t + JOIN tech_stack_recommendations tsr ON t.id = tsr.template_id + ORDER BY t.created_at DESC + """ + templates = await conn.fetch(templates_query) + print(f"📋 Found {len(templates)} templates to migrate") + + for i, template in enumerate(templates, 1): + print(f"\n📝 Processing {i}/{len(templates)}: {template['title']}") + + # Get recommendation + rec_query = """ + SELECT * FROM tech_stack_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC LIMIT 1 + """ + rec = await conn.fetchrow(rec_query, template['id']) + + if not rec: + print(" ⚠️ No recommendations found for this template") + continue + + print(f" 🔍 Found recommendation: {rec['stack_name']}") + + # Get keywords for this template + keywords_query = """ + SELECT keywords_json FROM extracted_keywords + WHERE template_id = $1 AND template_source = 'templates' + ORDER BY created_at DESC LIMIT 1 + """ + keywords_result = await conn.fetchrow(keywords_query, template['id']) + keywords = [] + if keywords_result and keywords_result['keywords_json']: + keywords_data = keywords_result['keywords_json'] + # Parse JSON if it's a string + if isinstance(keywords_data, str): + try: + import json + keywords = json.loads(keywords_data) + except: + keywords = [] + elif isinstance(keywords_data, list): + keywords = keywords_data + print(f" 🔑 Found {len(keywords)} keywords") + + # Create template node in Neo4j + template_data = dict(template) + template_data['id'] = str(template_data['id']) + await neo4j_client.create_template_node(template_data) + + # Create tech stack node + tech_stack_data = { + "name": rec['stack_name'], + "category": "tech_stack", + "maturity_score": 0.9, + "learning_curve": "medium", + "performance_rating": float(rec['recommendation_score']) / 100.0 + } + await neo4j_client.create_technology_node(tech_stack_data) + + # Create recommendation relationship + await neo4j_client.create_recommendation_relationship( + str(template['id']), + rec['stack_name'], + "tech_stack", + float(rec['recommendation_score']) / 100.0 + ) + + # Create individual technology nodes and relationships + tech_fields = ['frontend', 'backend', 'database', 'cloud', 'testing', 'mobile', 'devops', 'ai_ml'] + + for field in tech_fields: + tech_value = rec[field] + if tech_value and tech_value.strip(): + # Parse JSON if it's a string + if isinstance(tech_value, str) and tech_value.startswith('{'): + try: + tech_value = json.loads(tech_value) + if isinstance(tech_value, dict): + tech_name = tech_value.get('name', str(tech_value)) + else: + tech_name = str(tech_value) + except: + tech_name = str(tech_value) + else: + tech_name = str(tech_value) + + # Create technology node + tech_data = { + "name": tech_name, + "category": field, + "maturity_score": 0.8, + "learning_curve": "medium", + "performance_rating": 0.8 + } + await neo4j_client.create_technology_node(tech_data) + + # Create relationship + await neo4j_client.create_recommendation_relationship( + str(template['id']), + tech_name, + field, + 0.8 + ) + + # Create tool node for single recommended tool + recommended_tool = rec.get('recommended_tool', '') + if recommended_tool and recommended_tool.strip(): + # Create tool node + tool_data = { + "name": recommended_tool, + "category": "business_tool", + "type": "Tool", + "maturity_score": 0.8, + "learning_curve": "easy", + "performance_rating": 0.8 + } + await neo4j_client.create_technology_node(tool_data) + + # Create relationship + await neo4j_client.create_recommendation_relationship( + str(template['id']), + recommended_tool, + "business_tool", + 0.8 + ) + print(f" 🔧 Created tool: {recommended_tool}") + + # Create keyword relationships + if isinstance(keywords, list): + print(f" 🔑 Processing {len(keywords)} keywords: {keywords[:3]}...") + for keyword in keywords: + if keyword and keyword.strip(): + await neo4j_client.create_keyword_relationship(str(template['id']), keyword) + else: + print(f" ⚠️ Keywords not in expected list format: {type(keywords)}") + + # Create TemplateRecommendation node with rich data + recommendation_data = { + 'stack_name': rec['stack_name'], + 'description': template.get('description', ''), + 'project_scale': 'medium', + 'team_size': 3, + 'experience_level': 'intermediate', + 'confidence_score': int(rec['recommendation_score']), + 'recommendation_reasons': [ + f"Tech stack: {rec['stack_name']}", + f"Score: {rec['recommendation_score']}/100", + "AI-generated recommendation" + ], + 'key_features': [ + f"Frontend: {rec.get('frontend', 'N/A')}", + f"Backend: {rec.get('backend', 'N/A')}", + f"Database: {rec.get('database', 'N/A')}", + f"Cloud: {rec.get('cloud', 'N/A')}" + ], + 'estimated_development_time_months': rec.get('development_time', 3), + 'complexity_level': 'medium', + 'budget_range_usd': f"${rec.get('monthly_cost', 0):.0f} - ${rec.get('setup_cost', 0):.0f}", + 'time_to_market_weeks': rec.get('development_time', 3) * 4, + 'scalability_requirements': 'moderate', + 'security_requirements': 'standard', + 'success_rate_percentage': rec.get('success_rate', 85), + 'user_satisfaction_score': rec.get('satisfaction', 85) + } + await neo4j_client.create_template_recommendation_node(str(template['id']), recommendation_data) + print(f" 📋 Created TemplateRecommendation node") + + print(f" ✅ Successfully migrated to Neo4j") + + await conn.close() + await neo4j_client.close() + + print("\n🎉 MIGRATION COMPLETED!") + print(f"📊 Successfully migrated: {len(templates)} templates") + print("🔗 Neo4j knowledge graph created with tech stack relationships") + + except Exception as e: + print(f"❌ Migration failed: {e}") + +# ============================================================================ +# MAIN EXECUTION +# ============================================================================ + +if __name__ == "__main__": + import sys + + if len(sys.argv) > 1 and sys.argv[1] == "migrate": + # Run migration + asyncio.run(migrate_to_neo4j()) + elif len(sys.argv) > 2 and sys.argv[1] == "--template-id": + # Generate recommendations for specific template + template_id = sys.argv[2] + + # Configure logger to output to stderr for command line usage + import logging + logging.basicConfig(level=logging.ERROR, stream=sys.stderr) + + async def get_recommendation(): + try: + claude_client = ClaudeClient() + result = await claude_client.get_recommendation(template_id) + # Only output JSON to stdout + print(json.dumps(result, default=str)) + except Exception as e: + error_result = { + "error": str(e), + "template_id": template_id + } + print(json.dumps(error_result)) + + asyncio.run(get_recommendation()) + else: + # Start FastAPI server + uvicorn.run( + app, + host="0.0.0.0", + port=8013, + log_level="info" + ) diff --git a/services/template-manager/package-lock.json b/services/template-manager/package-lock.json index 68fefb7..fc46bc8 100644 --- a/services/template-manager/package-lock.json +++ b/services/template-manager/package-lock.json @@ -8,6 +8,7 @@ "name": "template-manager", "version": "1.0.0", "dependencies": { + "@anthropic-ai/sdk": "^0.24.3", "axios": "^1.12.2", "cors": "^2.8.5", "dotenv": "^16.0.3", @@ -16,10 +17,12 @@ "joi": "^17.7.0", "jsonwebtoken": "^9.0.2", "morgan": "^1.10.0", + "neo4j-driver": "^5.15.0", "pg": "^8.8.0", "redis": "^4.6.0", "socket.io": "^4.8.1", - "uuid": "^9.0.0" + "uuid": "^9.0.0", + "winston": "^3.11.0" }, "devDependencies": { "nodemon": "^2.0.22" @@ -28,6 +31,57 @@ "node": ">=18.0.0" } }, + "node_modules/@anthropic-ai/sdk": { + "version": "0.24.3", + "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.24.3.tgz", + "integrity": "sha512-916wJXO6T6k8R6BAAcLhLPv/pnLGy7YSEBZXZ1XTFbLcTZE8oTy3oDW9WJf9KKZwMvVcePIfoTSvzXHRcGxkQQ==", + "license": "MIT", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7", + "web-streams-polyfill": "^3.2.1" + } + }, + "node_modules/@anthropic-ai/sdk/node_modules/@types/node": { + "version": "18.19.127", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.127.tgz", + "integrity": "sha512-gSjxjrnKXML/yo0BO099uPixMqfpJU0TKYjpfLU7TrtA2WWDki412Np/RSTPRil1saKBhvVVKzVx/p/6p94nVA==", + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@anthropic-ai/sdk/node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "license": "MIT" + }, + "node_modules/@colors/colors": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz", + "integrity": "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==", + "license": "MIT", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@dabh/diagnostics": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz", + "integrity": "sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==", + "license": "MIT", + "dependencies": { + "colorspace": "1.1.x", + "enabled": "2.0.x", + "kuler": "^2.0.0" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -147,6 +201,34 @@ "undici-types": "~7.10.0" } }, + "node_modules/@types/node-fetch": { + "version": "2.6.13", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz", + "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.4" + } + }, + "node_modules/@types/triple-beam": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz", + "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==", + "license": "MIT" + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, "node_modules/accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", @@ -160,6 +242,18 @@ "node": ">= 0.6" } }, + "node_modules/agentkeepalive": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", + "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, "node_modules/anymatch": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", @@ -180,6 +274,12 @@ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", "license": "MIT" }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "license": "MIT" + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -204,6 +304,26 @@ "dev": true, "license": "MIT" }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/base64id": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz", @@ -292,6 +412,30 @@ "node": ">=8" } }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/buffer-equal-constant-time": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", @@ -370,6 +514,51 @@ "node": ">=0.10.0" } }, + "node_modules/color": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", + "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "license": "MIT" + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/colorspace": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.4.tgz", + "integrity": "sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w==", + "license": "MIT", + "dependencies": { + "color": "^3.1.3", + "text-hex": "1.0.x" + } + }, "node_modules/combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -516,6 +705,12 @@ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", "license": "MIT" }, + "node_modules/enabled": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==", + "license": "MIT" + }, "node_modules/encodeurl": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", @@ -646,6 +841,15 @@ "node": ">= 0.6" } }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/express": { "version": "4.21.2", "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", @@ -692,6 +896,12 @@ "url": "https://opencollective.com/express" } }, + "node_modules/fecha": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz", + "integrity": "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==", + "license": "MIT" + }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", @@ -723,6 +933,12 @@ "node": ">= 0.8" } }, + "node_modules/fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==", + "license": "MIT" + }, "node_modules/follow-redirects": { "version": "1.15.11", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", @@ -759,6 +975,34 @@ "node": ">= 6" } }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", + "license": "MIT" + }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "license": "MIT", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, + "node_modules/formdata-node/node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -946,6 +1190,15 @@ "node": ">= 0.8" } }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -958,6 +1211,26 @@ "node": ">=0.10.0" } }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/ignore-by-default": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", @@ -980,6 +1253,12 @@ "node": ">= 0.10" } }, + "node_modules/is-arrayish": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz", + "integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==", + "license": "MIT" + }, "node_modules/is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", @@ -1026,6 +1305,18 @@ "node": ">=0.12.0" } }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/joi": { "version": "17.13.3", "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", @@ -1100,6 +1391,12 @@ "safe-buffer": "^5.0.1" } }, + "node_modules/kuler": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==", + "license": "MIT" + }, "node_modules/lodash.includes": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", @@ -1142,6 +1439,29 @@ "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", "license": "MIT" }, + "node_modules/logform": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.7.0.tgz", + "integrity": "sha512-TFYA4jnP7PVbmlBIfhlSe+WKxs9dklXMTEGcBCIvLhE/Tn3H6Gk1norupVW7m5Cnd4bLcr08AytbyV/xj7f/kQ==", + "license": "MIT", + "dependencies": { + "@colors/colors": "1.6.0", + "@types/triple-beam": "^1.3.2", + "fecha": "^4.2.0", + "ms": "^2.1.1", + "safe-stable-stringify": "^2.3.1", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/logform/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -1267,6 +1587,74 @@ "node": ">= 0.6" } }, + "node_modules/neo4j-driver": { + "version": "5.28.2", + "resolved": "https://registry.npmjs.org/neo4j-driver/-/neo4j-driver-5.28.2.tgz", + "integrity": "sha512-nix4Canllf7Tl4FZL9sskhkKYoCp40fg7VsknSRTRgbm1JaE2F1Ej/c2nqlM06nqh3WrkI0ww3taVB+lem7w7w==", + "license": "Apache-2.0", + "dependencies": { + "neo4j-driver-bolt-connection": "5.28.2", + "neo4j-driver-core": "5.28.2", + "rxjs": "^7.8.2" + } + }, + "node_modules/neo4j-driver-bolt-connection": { + "version": "5.28.2", + "resolved": "https://registry.npmjs.org/neo4j-driver-bolt-connection/-/neo4j-driver-bolt-connection-5.28.2.tgz", + "integrity": "sha512-dEX06iNPEo9iyCb0NssxJeA3REN+H+U/Y0MdAjJBEoil4tGz5PxBNZL6/+noQnu2pBJT5wICepakXCrN3etboA==", + "license": "Apache-2.0", + "dependencies": { + "buffer": "^6.0.3", + "neo4j-driver-core": "5.28.2", + "string_decoder": "^1.3.0" + } + }, + "node_modules/neo4j-driver-core": { + "version": "5.28.2", + "resolved": "https://registry.npmjs.org/neo4j-driver-core/-/neo4j-driver-core-5.28.2.tgz", + "integrity": "sha512-fBMk4Ox379oOz4FcfdS6ZOxsTEypjkcAelNm9LcWQZ981xCdOnGMzlWL+qXECvL0qUwRfmZxoqbDlJzuzFrdvw==", + "license": "Apache-2.0" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, "node_modules/nodemon": { "version": "2.0.22", "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-2.0.22.tgz", @@ -1365,6 +1753,15 @@ "node": ">= 0.8" } }, + "node_modules/one-time": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", + "license": "MIT", + "dependencies": { + "fn.name": "1.x.x" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -1586,6 +1983,20 @@ "node": ">= 0.8" } }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -1616,6 +2027,15 @@ "@redis/time-series": "1.1.0" } }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -1636,6 +2056,15 @@ ], "license": "MIT" }, + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -1784,6 +2213,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/simple-swizzle": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz", + "integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, "node_modules/simple-update-notifier": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-1.1.0.tgz", @@ -1926,6 +2364,15 @@ "node": ">= 10.x" } }, + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==", + "license": "MIT", + "engines": { + "node": "*" + } + }, "node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", @@ -1935,6 +2382,15 @@ "node": ">= 0.8" } }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, "node_modules/supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", @@ -1948,6 +2404,12 @@ "node": ">=4" } }, + "node_modules/text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==", + "license": "MIT" + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -1980,6 +2442,27 @@ "nodetouch": "bin/nodetouch.js" } }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/triple-beam": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz", + "integrity": "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==", + "license": "MIT", + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -2015,6 +2498,12 @@ "node": ">= 0.8" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, "node_modules/utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", @@ -2046,6 +2535,67 @@ "node": ">= 0.8" } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/winston": { + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.17.0.tgz", + "integrity": "sha512-DLiFIXYC5fMPxaRg832S6F5mJYvePtmO5G9v9IgUFPhXm9/GkXarH/TUrBAVzhTCzAj9anE/+GjrgXp/54nOgw==", + "license": "MIT", + "dependencies": { + "@colors/colors": "^1.6.0", + "@dabh/diagnostics": "^2.0.2", + "async": "^3.2.3", + "is-stream": "^2.0.0", + "logform": "^2.7.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", + "safe-stable-stringify": "^2.3.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.9.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/winston-transport": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz", + "integrity": "sha512-8drMJ4rkgaPo1Me4zD/3WLfI/zPdA9o2IipKODunnGDcuqbHwjsbB79ylv04LCGGzU0xQ6vTznOMpQGaLhhm6A==", + "license": "MIT", + "dependencies": { + "logform": "^2.7.0", + "readable-stream": "^3.6.2", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, "node_modules/ws": { "version": "8.17.1", "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", diff --git a/services/template-manager/src/ai-service.js b/services/template-manager/src/ai-service.js index 125a9f3..940c8a6 100644 --- a/services/template-manager/src/ai-service.js +++ b/services/template-manager/src/ai-service.js @@ -5,6 +5,8 @@ const axios = require('axios'); const app = express(); const PORT = process.env.PORT || 8009; +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 + // Claude API configuration const CLAUDE_API_KEY = process.env.CLAUDE_API_KEY || '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'; const CLAUDE_AVAILABLE = !!CLAUDE_API_KEY; diff --git a/services/template-manager/src/migrations/001_initial_schema.sql b/services/template-manager/src/migrations/001_initial_schema.sql index 202295a..85bca61 100644 --- a/services/template-manager/src/migrations/001_initial_schema.sql +++ b/services/template-manager/src/migrations/001_initial_schema.sql @@ -1,11 +1,7 @@ -- Template Manager Database Schema -- Self-learning template and feature management system --- Drop tables if they exist (for development) -DROP TABLE IF EXISTS feature_usage CASCADE; -DROP TABLE IF EXISTS custom_features CASCADE; -DROP TABLE IF EXISTS template_features CASCADE; -DROP TABLE IF EXISTS templates CASCADE; +-- Create tables only if they don't exist (production-safe) -- Enable UUID extension (only if we have permission) DO $$ @@ -20,7 +16,7 @@ BEGIN END $$; -- Templates table -CREATE TABLE templates ( +CREATE TABLE IF NOT EXISTS templates ( id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), type VARCHAR(100) NOT NULL UNIQUE, title VARCHAR(200) NOT NULL, @@ -37,7 +33,7 @@ CREATE TABLE templates ( ); -- Template features table -CREATE TABLE template_features ( +CREATE TABLE IF NOT EXISTS template_features ( id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), template_id UUID REFERENCES templates(id) ON DELETE CASCADE, feature_id VARCHAR(100) NOT NULL, @@ -56,7 +52,7 @@ CREATE TABLE template_features ( ); -- Feature usage tracking -CREATE TABLE feature_usage ( +CREATE TABLE IF NOT EXISTS feature_usage ( id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), template_id UUID REFERENCES templates(id) ON DELETE CASCADE, feature_id UUID REFERENCES template_features(id) ON DELETE CASCADE, @@ -66,7 +62,7 @@ CREATE TABLE feature_usage ( ); -- User-added custom features -CREATE TABLE custom_features ( +CREATE TABLE IF NOT EXISTS custom_features ( id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), template_id UUID REFERENCES templates(id) ON DELETE CASCADE, name VARCHAR(200) NOT NULL, diff --git a/services/template-manager/src/migrations/009_ai_features.sql b/services/template-manager/src/migrations/009_ai_features.sql new file mode 100644 index 0000000..9903bdc --- /dev/null +++ b/services/template-manager/src/migrations/009_ai_features.sql @@ -0,0 +1,479 @@ +-- ===================================================== +-- 009_ai_features.sql +-- AI-related schema for Template Manager: keywords, recommendations, queue, triggers +-- Safe for existing monorepo by using IF EXISTS/OR REPLACE and drop-if-exists for triggers +-- ===================================================== + +-- ===================================================== +-- 1. CORE TABLES +-- NOTE: templates and custom_templates are already managed by existing migrations. +-- This migration intentionally does NOT create or modify those core tables. + +-- ===================================================== +-- 2. AI FEATURES TABLES +-- ===================================================== + +CREATE TABLE IF NOT EXISTS tech_stack_recommendations ( + id SERIAL PRIMARY KEY, + template_id UUID NOT NULL, + stack_name VARCHAR(255) NOT NULL, + monthly_cost DECIMAL(10,2) NOT NULL, + setup_cost DECIMAL(10,2) NOT NULL, + team_size VARCHAR(50) NOT NULL, + development_time INTEGER NOT NULL, + satisfaction INTEGER NOT NULL CHECK (satisfaction >= 0 AND satisfaction <= 100), + success_rate INTEGER NOT NULL CHECK (success_rate >= 0 AND success_rate <= 100), + frontend VARCHAR(255) NOT NULL, + backend VARCHAR(255) NOT NULL, + database VARCHAR(255) NOT NULL, + cloud VARCHAR(255) NOT NULL, + testing VARCHAR(255) NOT NULL, + mobile VARCHAR(255) NOT NULL, + devops VARCHAR(255) NOT NULL, + ai_ml VARCHAR(255) NOT NULL, + recommended_tool VARCHAR(255) NOT NULL, + recommendation_score DECIMAL(5,2) NOT NULL CHECK (recommendation_score >= 0 AND recommendation_score <= 100), + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS extracted_keywords ( + id SERIAL PRIMARY KEY, + template_id UUID NOT NULL, + template_source VARCHAR(20) NOT NULL CHECK (template_source IN ('templates', 'custom_templates')), + keywords_json JSONB NOT NULL, + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW(), + UNIQUE(template_id, template_source) +); + +CREATE TABLE IF NOT EXISTS migration_queue ( + id SERIAL PRIMARY KEY, + template_id UUID NOT NULL, + migration_type VARCHAR(50) NOT NULL, + status VARCHAR(20) DEFAULT 'pending' CHECK (status IN ('pending', 'processing', 'completed', 'failed')), + created_at TIMESTAMP DEFAULT NOW(), + processed_at TIMESTAMP, + error_message TEXT, + UNIQUE(template_id, migration_type) +); + +-- ===================================================== +-- 3. INDEXES (idempotent) +-- ===================================================== + +-- (No new indexes on templates/custom_templates here) + +CREATE INDEX IF NOT EXISTS idx_tech_stack_recommendations_template_id ON tech_stack_recommendations(template_id); +CREATE INDEX IF NOT EXISTS idx_tech_stack_recommendations_score ON tech_stack_recommendations(recommendation_score); + +CREATE INDEX IF NOT EXISTS idx_extracted_keywords_template_id ON extracted_keywords(template_id); +CREATE INDEX IF NOT EXISTS idx_extracted_keywords_template_source ON extracted_keywords(template_source); + +CREATE INDEX IF NOT EXISTS idx_migration_queue_status ON migration_queue(status); +CREATE INDEX IF NOT EXISTS idx_migration_queue_template_id ON migration_queue(template_id); + +-- ===================================================== +-- 4. FUNCTIONS (OR REPLACE) +-- ===================================================== + +CREATE OR REPLACE FUNCTION update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION extract_keywords_for_template() +RETURNS TRIGGER AS $$ +DECLARE + keywords_list TEXT[]; + title_keywords TEXT[]; + desc_keywords TEXT[]; + final_keywords TEXT[]; + word TEXT; + clean_word TEXT; +BEGIN + IF NEW.type IN ('_system', '_migration', '_test', '_auto_tech_stack_migration', '_extracted_keywords_fix', '_migration_test', '_automation_fix', '_migration_queue_fix', '_workflow_fix', '_sql_ambiguity_fix', '_consolidated_schema') THEN + RETURN NEW; + END IF; + + IF EXISTS (SELECT 1 FROM extracted_keywords WHERE template_id = NEW.id AND template_source = 'templates') THEN + RETURN NEW; + END IF; + + keywords_list := ARRAY[]::TEXT[]; + + IF NEW.title IS NOT NULL AND LENGTH(TRIM(NEW.title)) > 0 THEN + title_keywords := string_to_array(LOWER(REGEXP_REPLACE(NEW.title, '[^a-zA-Z0-9\s]', ' ', 'g')), ' '); + FOREACH word IN ARRAY title_keywords LOOP + clean_word := TRIM(word); + IF LENGTH(clean_word) > 2 AND clean_word NOT IN ('the','and','for','are','but','not','you','all','can','had','her','was','one','our','out','day','get','has','him','his','how','its','may','new','now','old','see','two','way','who','boy','did','man','men','put','say','she','too','use') THEN + keywords_list := array_append(keywords_list, clean_word); + END IF; + END LOOP; + END IF; + + IF NEW.description IS NOT NULL AND LENGTH(TRIM(NEW.description)) > 0 THEN + desc_keywords := string_to_array(LOWER(REGEXP_REPLACE(NEW.description, '[^a-zA-Z0-9\s]', ' ', 'g')), ' '); + FOREACH word IN ARRAY desc_keywords LOOP + clean_word := TRIM(word); + IF LENGTH(clean_word) > 2 AND clean_word NOT IN ('the','and','for','are','but','not','you','all','can','had','her','was','one','our','out','day','get','has','him','his','how','its','may','new','now','old','see','two','way','who','boy','did','man','men','put','say','she','too','use') THEN + keywords_list := array_append(keywords_list, clean_word); + END IF; + END LOOP; + END IF; + + IF NEW.category IS NOT NULL THEN + keywords_list := array_append(keywords_list, LOWER(REGEXP_REPLACE(NEW.category, '[^a-zA-Z0-9]', '_', 'g'))); + END IF; + + IF NEW.type IS NOT NULL THEN + keywords_list := array_append(keywords_list, LOWER(REGEXP_REPLACE(NEW.type, '[^a-zA-Z0-9]', '_', 'g'))); + END IF; + + SELECT ARRAY( + SELECT DISTINCT unnest(keywords_list) + ORDER BY 1 + LIMIT 15 + ) INTO final_keywords; + + WHILE array_length(final_keywords, 1) < 8 LOOP + final_keywords := array_append(final_keywords, 'business_enterprise'); + END LOOP; + + INSERT INTO extracted_keywords (template_id, template_source, keywords_json) + VALUES (NEW.id, 'templates', to_jsonb(final_keywords)); + + RETURN NEW; +EXCEPTION WHEN OTHERS THEN + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION extract_keywords_for_custom_template() +RETURNS TRIGGER AS $$ +DECLARE + keywords_list TEXT[]; + title_keywords TEXT[]; + desc_keywords TEXT[]; + final_keywords TEXT[]; + word TEXT; + clean_word TEXT; +BEGIN + IF EXISTS (SELECT 1 FROM extracted_keywords WHERE template_id = NEW.id AND template_source = 'custom_templates') THEN + RETURN NEW; + END IF; + + keywords_list := ARRAY[]::TEXT[]; + + IF NEW.title IS NOT NULL AND LENGTH(TRIM(NEW.title)) > 0 THEN + title_keywords := string_to_array(LOWER(REGEXP_REPLACE(NEW.title, '[^a-zA-Z0-9\s]', ' ', 'g')), ' '); + FOREACH word IN ARRAY title_keywords LOOP + clean_word := TRIM(word); + IF LENGTH(clean_word) > 2 AND clean_word NOT IN ('the','and','for','are','but','not','you','all','can','had','her','was','one','our','out','day','get','has','him','his','how','its','may','new','now','old','see','two','way','who','boy','did','man','men','put','say','she','too','use') THEN + keywords_list := array_append(keywords_list, clean_word); + END IF; + END LOOP; + END IF; + + IF NEW.description IS NOT NULL AND LENGTH(TRIM(NEW.description)) > 0 THEN + desc_keywords := string_to_array(LOWER(REGEXP_REPLACE(NEW.description, '[^a-zA-Z0-9\s]', ' ', 'g')), ' '); + FOREACH word IN ARRAY desc_keywords LOOP + clean_word := TRIM(word); + IF LENGTH(clean_word) > 2 AND clean_word NOT IN ('the','and','for','are','but','not','you','all','can','had','her','was','one','our','out','day','get','has','him','his','how','its','may','new','now','old','see','two','way','who','boy','did','man','men','put','say','she','too','use') THEN + keywords_list := array_append(keywords_list, clean_word); + END IF; + END LOOP; + END IF; + + IF NEW.category IS NOT NULL THEN + keywords_list := array_append(keywords_list, LOWER(REGEXP_REPLACE(NEW.category, '[^a-zA-Z0-9]', '_', 'g'))); + END IF; + + IF NEW.type IS NOT NULL THEN + keywords_list := array_append(keywords_list, LOWER(REGEXP_REPLACE(NEW.type, '[^a-zA-Z0-9]', '_', 'g'))); + END IF; + + SELECT ARRAY( + SELECT DISTINCT unnest(keywords_list) + ORDER BY 1 + LIMIT 15 + ) INTO final_keywords; + + WHILE array_length(final_keywords, 1) < 8 LOOP + final_keywords := array_append(final_keywords, 'business_enterprise'); + END LOOP; + + INSERT INTO extracted_keywords (template_id, template_source, keywords_json) + VALUES (NEW.id, 'custom_templates', to_jsonb(final_keywords)); + + RETURN NEW; +EXCEPTION WHEN OTHERS THEN + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION generate_tech_stack_recommendation() +RETURNS TRIGGER AS $$ +DECLARE + keywords_json_data JSONB; + keywords_list TEXT[]; + stack_name TEXT; + monthly_cost DECIMAL(10,2); + setup_cost DECIMAL(10,2); + team_size TEXT; + development_time INTEGER; + satisfaction INTEGER; + success_rate INTEGER; + frontend TEXT; + backend TEXT; + database_tech TEXT; + cloud TEXT; + testing TEXT; + mobile TEXT; + devops TEXT; + ai_ml TEXT; + recommended_tool TEXT; + recommendation_score DECIMAL(5,2); +BEGIN + IF NEW.type IN ('_system', '_migration', '_test', '_auto_tech_stack_migration', '_extracted_keywords_fix', '_migration_test', '_automation_fix', '_migration_queue_fix', '_workflow_fix', '_sql_ambiguity_fix', '_consolidated_schema') THEN + RETURN NEW; + END IF; + + IF EXISTS (SELECT 1 FROM tech_stack_recommendations WHERE template_id = NEW.id) THEN + RETURN NEW; + END IF; + + SELECT ek.keywords_json INTO keywords_json_data + FROM extracted_keywords ek + WHERE ek.template_id = NEW.id AND ek.template_source = 'templates' + ORDER BY ek.created_at DESC LIMIT 1; + + IF keywords_json_data IS NULL THEN + INSERT INTO tech_stack_recommendations ( + template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ) VALUES ( + NEW.id, NEW.title || ' Tech Stack', 100.0, 2000.0, '3-5', + 6, 85, 90, 'React.js', 'Node.js', + 'PostgreSQL', 'AWS', 'Jest', 'React Native', 'Docker', 'TensorFlow', 'Custom Tool', + 85.0 + ); + + INSERT INTO migration_queue (template_id, migration_type, status, created_at) + VALUES (NEW.id, 'tech_stack_recommendation', 'pending', NOW()) + ON CONFLICT (template_id, migration_type) DO UPDATE SET + status = 'pending', created_at = NOW(), processed_at = NULL, error_message = NULL; + + RETURN NEW; + END IF; + + SELECT ARRAY(SELECT jsonb_array_elements_text(keywords_json_data)) INTO keywords_list; + + stack_name := NEW.title || ' AI-Recommended Tech Stack'; + + CASE NEW.category + WHEN 'Healthcare' THEN + monthly_cost := 200.0; setup_cost := 5000.0; team_size := '6-8'; development_time := 10; + satisfaction := 92; success_rate := 90; frontend := 'React.js'; backend := 'Java Spring Boot'; + database_tech := 'MongoDB'; cloud := 'AWS'; testing := 'JUnit'; mobile := 'Flutter'; devops := 'Jenkins'; + ai_ml := 'TensorFlow'; recommended_tool := 'Salesforce Health Cloud'; recommendation_score := 94.0; + WHEN 'E-commerce' THEN + monthly_cost := 150.0; setup_cost := 3000.0; team_size := '4-6'; development_time := 8; + satisfaction := 88; success_rate := 92; frontend := 'Next.js'; backend := 'Node.js'; + database_tech := 'MongoDB'; cloud := 'AWS'; testing := 'Jest'; mobile := 'React Native'; devops := 'Docker'; + ai_ml := 'TensorFlow'; recommended_tool := 'Shopify'; recommendation_score := 90.0; + ELSE + monthly_cost := 100.0; setup_cost := 2000.0; team_size := '3-5'; development_time := 6; + satisfaction := 85; success_rate := 90; frontend := 'React.js'; backend := 'Node.js'; + database_tech := 'PostgreSQL'; cloud := 'AWS'; testing := 'Jest'; mobile := 'React Native'; devops := 'Docker'; + ai_ml := 'TensorFlow'; recommended_tool := 'Custom Tool'; recommendation_score := 85.0; + END CASE; + + INSERT INTO tech_stack_recommendations ( + template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ) VALUES ( + NEW.id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database_tech, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ); + + INSERT INTO migration_queue (template_id, migration_type, status, created_at) + VALUES (NEW.id, 'tech_stack_recommendation', 'pending', NOW()) + ON CONFLICT (template_id, migration_type) DO UPDATE SET + status = 'pending', created_at = NOW(), processed_at = NULL, error_message = NULL; + + RETURN NEW; +EXCEPTION WHEN OTHERS THEN + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION generate_tech_stack_recommendation_custom() +RETURNS TRIGGER AS $$ +DECLARE + keywords_json_data JSONB; + keywords_list TEXT[]; + stack_name TEXT; + monthly_cost DECIMAL(10,2); + setup_cost DECIMAL(10,2); + team_size TEXT; + development_time INTEGER; + satisfaction INTEGER; + success_rate INTEGER; + frontend TEXT; + backend TEXT; + database_tech TEXT; + cloud TEXT; + testing TEXT; + mobile TEXT; + devops TEXT; + ai_ml TEXT; + recommended_tool TEXT; + recommendation_score DECIMAL(5,2); +BEGIN + IF EXISTS (SELECT 1 FROM tech_stack_recommendations WHERE template_id = NEW.id) THEN + RETURN NEW; + END IF; + + SELECT ek.keywords_json INTO keywords_json_data + FROM extracted_keywords ek + WHERE ek.template_id = NEW.id AND ek.template_source = 'custom_templates' + ORDER BY ek.created_at DESC LIMIT 1; + + IF keywords_json_data IS NULL THEN + INSERT INTO tech_stack_recommendations ( + template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ) VALUES ( + NEW.id, NEW.title || ' Custom Tech Stack', 180.0, 3500.0, '5-7', + 9, 88, 92, 'Vue.js', 'Python Django', + 'MongoDB', 'Google Cloud', 'Cypress', 'Flutter', 'Kubernetes', 'PyTorch', 'Custom Business Tool', + 90.0 + ); + + INSERT INTO migration_queue (template_id, migration_type, status, created_at) + VALUES (NEW.id, 'tech_stack_recommendation', 'pending', NOW()) + ON CONFLICT (template_id, migration_type) DO UPDATE SET + status = 'pending', created_at = NOW(), processed_at = NULL, error_message = NULL; + + RETURN NEW; + END IF; + + SELECT ARRAY(SELECT jsonb_array_elements_text(keywords_json_data)) INTO keywords_list; + + stack_name := NEW.title || ' Custom AI-Recommended Tech Stack'; + + CASE NEW.category + WHEN 'Healthcare' THEN + monthly_cost := 250.0; setup_cost := 6000.0; team_size := '7-9'; development_time := 12; + satisfaction := 94; success_rate := 92; frontend := 'React.js'; backend := 'Java Spring Boot'; + database_tech := 'MongoDB'; cloud := 'AWS'; testing := 'JUnit'; mobile := 'Flutter'; devops := 'Jenkins'; + ai_ml := 'TensorFlow'; recommended_tool := 'Custom Healthcare Tool'; recommendation_score := 95.0; + WHEN 'E-commerce' THEN + monthly_cost := 200.0; setup_cost := 4000.0; team_size := '5-7'; development_time := 10; + satisfaction := 90; success_rate := 94; frontend := 'Next.js'; backend := 'Node.js'; + database_tech := 'MongoDB'; cloud := 'AWS'; testing := 'Jest'; mobile := 'React Native'; devops := 'Docker'; + ai_ml := 'TensorFlow'; recommended_tool := 'Custom E-commerce Tool'; recommendation_score := 92.0; + ELSE + monthly_cost := 180.0; setup_cost := 3500.0; team_size := '5-7'; development_time := 9; + satisfaction := 88; success_rate := 92; frontend := 'Vue.js'; backend := 'Python Django'; + database_tech := 'MongoDB'; cloud := 'Google Cloud'; testing := 'Cypress'; mobile := 'Flutter'; devops := 'Kubernetes'; + ai_ml := 'PyTorch'; recommended_tool := 'Custom Business Tool'; recommendation_score := 90.0; + END CASE; + + INSERT INTO tech_stack_recommendations ( + template_id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ) VALUES ( + NEW.id, stack_name, monthly_cost, setup_cost, team_size, + development_time, satisfaction, success_rate, frontend, backend, + database_tech, cloud, testing, mobile, devops, ai_ml, recommended_tool, + recommendation_score + ); + + INSERT INTO migration_queue (template_id, migration_type, status, created_at) + VALUES (NEW.id, 'tech_stack_recommendation', 'pending', NOW()) + ON CONFLICT (template_id, migration_type) DO UPDATE SET + status = 'pending', created_at = NOW(), processed_at = NULL, error_message = NULL; + + RETURN NEW; +EXCEPTION WHEN OTHERS THEN + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- ===================================================== +-- 5. TRIGGERS (conditionally create AI-related triggers only) +-- ===================================================== + +-- Keyword extraction triggers (create if not exists) +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'auto_extract_keywords' + ) THEN + CREATE TRIGGER auto_extract_keywords + AFTER INSERT ON templates + FOR EACH ROW + EXECUTE FUNCTION extract_keywords_for_template(); + END IF; +END $$; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'auto_extract_keywords_custom' + ) THEN + CREATE TRIGGER auto_extract_keywords_custom + AFTER INSERT ON custom_templates + FOR EACH ROW + EXECUTE FUNCTION extract_keywords_for_custom_template(); + END IF; +END $$; + +-- AI recommendation triggers (create if not exists) +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'auto_generate_tech_stack_recommendation' + ) THEN + CREATE TRIGGER auto_generate_tech_stack_recommendation + AFTER INSERT ON templates + FOR EACH ROW + EXECUTE FUNCTION generate_tech_stack_recommendation(); + END IF; +END $$; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_trigger WHERE tgname = 'auto_generate_tech_stack_recommendation_custom' + ) THEN + CREATE TRIGGER auto_generate_tech_stack_recommendation_custom + AFTER INSERT ON custom_templates + FOR EACH ROW + EXECUTE FUNCTION generate_tech_stack_recommendation_custom(); + END IF; +END $$; + +-- Success marker (idempotent) +DO $$ BEGIN + INSERT INTO templates (type, title, description, category) + VALUES ('_consolidated_schema', 'Consolidated Schema', 'AI features added via 009_ai_features', 'System') + ON CONFLICT (type) DO NOTHING; +END $$; + + diff --git a/services/template-manager/src/migrations/migrate.js b/services/template-manager/src/migrations/migrate.js index 430edca..0cb51e8 100644 --- a/services/template-manager/src/migrations/migrate.js +++ b/services/template-manager/src/migrations/migrate.js @@ -32,35 +32,8 @@ async function runMigrations() { console.log('🚀 Starting template-manager database migrations...'); try { - // Optionally bootstrap shared pipeline schema if requested and missing - const applySchemas = String(process.env.APPLY_SCHEMAS_SQL || '').toLowerCase() === 'true'; - if (applySchemas) { - try { - const probe = await database.query("SELECT to_regclass('public.projects') AS tbl"); - const hasProjects = !!(probe.rows && probe.rows[0] && probe.rows[0].tbl); - if (!hasProjects) { - const schemasPath = path.join(__dirname, '../../../../databases/scripts/schemas.sql'); - if (fs.existsSync(schemasPath)) { - console.log('📦 Applying shared pipeline schemas.sql (projects, tech_stack_decisions, etc.)...'); - let schemasSQL = fs.readFileSync(schemasPath, 'utf8'); - // Remove psql meta-commands like \c dev_pipeline that the driver cannot execute - schemasSQL = schemasSQL - .split('\n') - .filter(line => !/^\s*\\/.test(line)) - .join('\n'); - await database.query(schemasSQL); - console.log('✅ schemas.sql applied'); - } else { - console.log('⚠️ schemas.sql not found at expected path, skipping'); - } - } else { - console.log('⏭️ Shared pipeline schema already present (projects exists), skipping schemas.sql'); - } - } catch (e) { - console.error('❌ Failed applying schemas.sql:', e.message); - throw e; - } - } + // Skip shared pipeline schema - it should be handled by the main migration service + console.log('⏭️ Skipping shared pipeline schema - handled by main migration service'); // Create migrations tracking table first await createMigrationsTable(); diff --git a/services/template-manager/start.sh b/services/template-manager/start.sh new file mode 100644 index 0000000..3a07c17 --- /dev/null +++ b/services/template-manager/start.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env sh +set -e + +# Start Python AI service in background on 8013 +if [ -f "/app/ai/tech_stack_service.py" ]; then + echo "Starting Template Manager AI (FastAPI) on 8013..." + python3 /app/ai/tech_stack_service.py & +else + echo "AI service not found at /app/ai/tech_stack_service.py; skipping AI startup" +fi + +# Start Node Template Manager on 8009 (foreground) +echo "Starting Template Manager (Node) on 8009..." +npm start + + diff --git a/services/unison/.gitignore b/services/unison/.gitignore new file mode 100644 index 0000000..e1d1047 --- /dev/null +++ b/services/unison/.gitignore @@ -0,0 +1,126 @@ +# Dependencies +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Environment variables +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +# Logs +logs/ +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Coverage directory used by tools like istanbul +coverage/ +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage +.grunt + +# Bower dependency directory +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# TypeScript v1 declaration files +typings/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache +.cache +.parcel-cache + +# Next.js build output +.next + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +public + +# Storybook build outputs +.out +.storybook-out + +# Temporary folders +tmp/ +temp/ + +# Editor directories and files +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Docker +.dockerignore + +# Test files +test-results/ +coverage/ diff --git a/services/unison/Dockerfile b/services/unison/Dockerfile new file mode 100644 index 0000000..a89c84f --- /dev/null +++ b/services/unison/Dockerfile @@ -0,0 +1,52 @@ +FROM node:18-alpine + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apk add --no-cache \ + curl \ + bash \ + && rm -rf /var/cache/apk/* + +# Create non-root user +RUN addgroup -g 1001 -S nodejs && \ + adduser -S unison -u 1001 -G nodejs + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm ci --only=production && \ + npm cache clean --force + +# Copy source code +COPY src/ ./src/ + +# Copy environment configuration +COPY config.env ./ + +# Create logs directory +RUN mkdir -p logs && \ + chown -R unison:nodejs logs + +# Change ownership of app directory +RUN chown -R unison:nodejs /app + +# Switch to non-root user +USER unison + +# Expose port +EXPOSE 8010 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -f http://localhost:8010/health || exit 1 + +# Set environment variables +ENV NODE_ENV=production +ENV PORT=8010 +ENV HOST=0.0.0.0 + +# Start the application +CMD ["node", "src/app.js"] diff --git a/services/unison/ENDPOINT_ANALYSIS.md b/services/unison/ENDPOINT_ANALYSIS.md new file mode 100644 index 0000000..8ce2e54 --- /dev/null +++ b/services/unison/ENDPOINT_ANALYSIS.md @@ -0,0 +1,199 @@ +# Unison Service - Endpoint Analysis Report + +## 📊 Service Overview +- **Service Name**: Unison - Unified Tech Stack Recommendation Service +- **Version**: 1.0.0 +- **Port**: 8014 (external) → 8010 (internal) +- **Status**: ✅ OPERATIONAL +- **Base URL**: `http://localhost:8014` + +## 🔗 Complete Endpoint Inventory + +### 1. **Root Endpoint** +- **URL**: `GET /` +- **Purpose**: Service information and available endpoints +- **Status**: ✅ WORKING +- **Response**: Service metadata, version, available endpoints, external service URLs + +### 2. **Health Endpoints** + +#### 2.1 Basic Health Check +- **URL**: `GET /health` +- **Purpose**: Service health status with external service checks +- **Status**: ✅ WORKING +- **Features**: + - Service uptime and memory usage + - External service health checks (tech-stack-selector, template-manager) + - Response time monitoring + - Feature availability status + +#### 2.2 Detailed Health Check +- **URL**: `GET /health/detailed` +- **Purpose**: Comprehensive system information +- **Status**: ✅ WORKING +- **Features**: + - Node.js version and platform info + - Detailed memory and CPU usage + - Process information (PID) + - Configuration details + +### 3. **Recommendation Endpoints** + +#### 3.1 Unified Recommendations (Main Endpoint) +- **URL**: `POST /api/recommendations/unified` +- **Purpose**: Get unified tech stack recommendations combining both services +- **Status**: ✅ WORKING +- **Request Body**: + ```json + { + "domain": "string", + "budget": "number", + "preferredTechnologies": ["string"], + "templateId": "string (optional)", + "includeSimilar": "boolean (optional)", + "includeKeywords": "boolean (optional)", + "forceRefresh": "boolean (optional)" + } + ``` +- **Features**: + - Combines recommendations from tech-stack-selector and template-manager + - Uses Claude AI for unified recommendations + - Fallback to single service if others unavailable + - Comprehensive error handling + +#### 3.2 Tech Stack Only +- **URL**: `GET /api/recommendations/tech-stack` +- **Purpose**: Get recommendations from tech-stack-selector only +- **Status**: ✅ WORKING +- **Query Parameters**: + - `domain` (optional): Domain for recommendations + - `budget` (optional): Budget constraint + - `preferredTechnologies` (optional): Comma-separated list + +#### 3.3 Template Only +- **URL**: `GET /api/recommendations/template/:templateId` +- **Purpose**: Get recommendations from template-manager only +- **Status**: ✅ WORKING +- **Path Parameters**: + - `templateId`: UUID of the template +- **Query Parameters**: + - `force_refresh` (optional): Force refresh recommendations + +#### 3.4 Schema Information +- **URL**: `GET /api/recommendations/schemas` +- **Purpose**: Get available validation schemas +- **Status**: ✅ WORKING +- **Response**: Available schemas and their definitions + +### 4. **Error Handling** + +#### 4.1 404 Handler +- **URL**: `*` (catch-all) +- **Purpose**: Handle non-existent routes +- **Status**: ✅ WORKING +- **Response**: Error message with available endpoints list + +## 🧪 Endpoint Testing Results + +| Endpoint | Method | Status | Response Time | Notes | +|----------|--------|--------|---------------|-------| +| `/` | GET | ✅ | ~5ms | Service info returned correctly | +| `/health` | GET | ✅ | ~12ms | All external services healthy | +| `/health/detailed` | GET | ✅ | ~5ms | Detailed system info available | +| `/api/recommendations/tech-stack` | GET | ✅ | ~50ms | 10 recommendations returned | +| `/api/recommendations/schemas` | GET | ✅ | ~10ms | 3 schemas available | +| `/api/recommendations/unified` | POST | ✅ | ~11ms | Working with fallback | +| `/api/recommendations/template/:id` | GET | ✅ | ~15ms | Template service responding | +| `/nonexistent` | GET | ✅ | ~5ms | 404 handler working | + +## 🔧 Service Dependencies + +### External Services Status +- **Tech Stack Selector**: ✅ HEALTHY (http://pipeline_tech_stack_selector:8002) +- **Template Manager**: ✅ HEALTHY (http://pipeline_template_manager:8009) +- **Claude AI**: ✅ CONFIGURED (API key present) + +### Internal Services +- **Schema Validator**: ✅ WORKING (3 schemas available) +- **Logger**: ✅ WORKING (Winston-based logging) +- **Error Handler**: ✅ WORKING (Comprehensive error handling) + +## 📈 Performance Metrics + +### Response Times +- **Average Response Time**: ~15ms +- **Health Check**: ~12ms +- **Tech Stack Recommendations**: ~50ms +- **Unified Recommendations**: ~11ms + +### Memory Usage +- **Used Memory**: 16 MB +- **Total Memory**: 18 MB +- **External Memory**: 3 MB + +### Uptime +- **Current Uptime**: 222+ seconds +- **Service Status**: Stable + +## 🛡️ Security Features + +### Middleware Stack +1. **Helmet**: Security headers +2. **CORS**: Cross-origin resource sharing +3. **Rate Limiting**: 100 requests per 15 minutes +4. **Request Validation**: Input validation +5. **Compression**: Response compression + +### Rate Limiting +- **Window**: 15 minutes (900,000ms) +- **Max Requests**: 100 per IP +- **Headers**: Standard rate limit headers included + +## 📝 Request/Response Examples + +### Unified Recommendation Request +```bash +curl -X POST http://localhost:8014/api/recommendations/unified \ + -H "Content-Type: application/json" \ + -d '{ + "domain": "e-commerce", + "budget": 1000.0, + "preferredTechnologies": ["React", "Node.js", "PostgreSQL"] + }' +``` + +### Health Check Request +```bash +curl http://localhost:8014/health +``` + +### Tech Stack Only Request +```bash +curl "http://localhost:8014/api/recommendations/tech-stack?domain=web%20development&budget=500" +``` + +## ✅ Summary + +**All endpoints are working properly!** The Unison service is fully operational with: + +- ✅ 8 endpoints tested and working +- ✅ All external dependencies healthy +- ✅ Comprehensive error handling +- ✅ Proper validation and security +- ✅ Fast response times +- ✅ Detailed logging and monitoring + +The service successfully provides unified tech stack recommendations by combining data from multiple sources and using Claude AI for intelligent unification. + +## 🚀 Next Steps + +1. **Monitor Performance**: Track response times and memory usage +2. **Add Metrics**: Consider adding Prometheus metrics +3. **Load Testing**: Test under high load conditions +4. **Documentation**: Update API documentation with examples +5. **Monitoring**: Set up alerts for service health + +--- +*Generated on: 2025-09-22T05:01:45.120Z* +*Service Version: 1.0.0* +*Status: OPERATIONAL* diff --git a/services/unison/README.md b/services/unison/README.md new file mode 100644 index 0000000..5da923d --- /dev/null +++ b/services/unison/README.md @@ -0,0 +1,408 @@ +# Unison - Unified Tech Stack Recommendation Service + +Unison is a production-ready Node.js service that combines recommendations from both the `tech-stack-selector` and `template-manager` services, then uses Claude AI to generate a single, optimized tech stack recommendation that balances cost, domain requirements, and template-feature compatibility. + +## 🚀 Features + +- **Unified Recommendations**: Combines recommendations from both tech-stack-selector and template-manager services +- **Claude AI Integration**: Uses Claude AI to analyze and optimize recommendations +- **Robust Error Handling**: Graceful fallbacks when services are unavailable +- **Schema Validation**: Strict JSON schema validation using Ajv +- **Production Ready**: Comprehensive logging, health checks, and monitoring +- **Rate Limiting**: Built-in rate limiting to prevent abuse +- **Docker Support**: Fully containerized with Docker and Docker Compose + +## 📋 Prerequisites + +- Node.js 18+ +- Docker and Docker Compose +- Access to tech-stack-selector service (port 8002) +- Access to template-manager service (ports 8009, 8013) +- Claude API key (optional, service works with fallbacks) + +## 🏗️ Architecture + +``` +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ Client App │───▶│ Unison Service │───▶│ Claude AI API │ +└─────────────────┘ │ (Port 8010) │ └─────────────────┘ + └─────────┬────────┘ + │ + ┌────────────┼────────────┐ + │ │ │ + ┌───────▼──────┐ ┌───▼────┐ ┌────▼──────┐ + │ Tech Stack │ │Template│ │Template │ + │ Selector │ │Manager │ │Manager AI │ + │ (Port 8002) │ │(8009) │ │(Port 8013)│ + └──────────────┘ └────────┘ └───────────┘ +``` + +## 🛠️ Installation + +### Using Docker Compose (Recommended) + +The Unison service is already integrated into the main `docker-compose.yml` file. To start it: + +```bash +# Start all services including Unison +docker-compose up -d unison + +# Or start the entire stack +docker-compose up -d +``` + +### Manual Installation + +1. **Clone and navigate to the service directory:** + ```bash + cd services/unison + ``` + +2. **Install dependencies:** + ```bash + npm install + ``` + +3. **Set up environment variables:** + ```bash + # The config.env file is already configured with all necessary variables + # You can modify it if needed for your specific setup + cp config.env .env # Optional: create a .env file from config.env + ``` + +4. **Start the service:** + ```bash + npm start + # Or for development + npm run dev + ``` + +## ⚙️ Configuration + +### Environment Variables + +The service uses a `config.env` file for environment variables. This file is already configured with all necessary variables for the Unison service and integrates with your existing infrastructure. + +**Key Configuration Sections:** +- **Service Configuration**: Port, host, environment settings +- **External Service URLs**: Tech stack selector and template manager endpoints +- **Claude AI Configuration**: API key (model and token settings use defaults) +- **Database Configuration**: PostgreSQL, Neo4j, Redis, MongoDB settings +- **Security & Authentication**: JWT secrets and API keys +- **Email Configuration**: SMTP settings for notifications +- **CORS Configuration**: Cross-origin resource sharing settings + +| Variable | Default | Description | +|----------|---------|-------------| +| `NODE_ENV` | `production` | Environment mode | +| `PORT` | `8010` | Service port | +| `HOST` | `0.0.0.0` | Service host | +| `TECH_STACK_SELECTOR_URL` | `http://pipeline_tech_stack_selector:8002` | Tech stack selector service URL | +| `TEMPLATE_MANAGER_URL` | `http://pipeline_template_manager:8009` | Template manager service URL | +| `TEMPLATE_MANAGER_AI_URL` | `http://pipeline_template_manager:8013` | Template manager AI service URL | +| `CLAUDE_API_KEY` | `${CLAUDE_API_KEY}` | Claude API key (from environment) | +| `CLAUDE_MODEL` | `claude-3-sonnet-20240229` | Claude model to use | +| `CLAUDE_MAX_TOKENS` | `4000` | Maximum tokens for Claude | +| `RATE_LIMIT_WINDOW_MS` | `900000` | Rate limit window (15 minutes) | +| `RATE_LIMIT_MAX_REQUESTS` | `100` | Max requests per window | +| `LOG_LEVEL` | `info` | Logging level | +| `REQUEST_TIMEOUT` | `30000` | Request timeout in ms | +| `HEALTH_CHECK_TIMEOUT` | `5000` | Health check timeout in ms | + +## 📡 API Endpoints + +### Base URL +``` +http://localhost:8010 +``` + +### Endpoints + +#### 1. **POST** `/api/recommendations/unified` +Get unified tech stack recommendation combining both services. + +**Request Body:** +```json +{ + "domain": "web development", + "budget": 500.0, + "preferredTechnologies": ["React", "Node.js", "PostgreSQL"], + "templateId": "uuid-string", + "includeSimilar": true, + "includeKeywords": true, + "forceRefresh": false +} +``` + +**Response:** +```json +{ + "success": true, + "data": { + "stack_name": "Game Development Stack", + "monthly_cost": 199, + "setup_cost": 1200, + "team_size": "3-5", + "development_time": 5, + "satisfaction": 92, + "success_rate": 85, + "frontend": "Unity", + "backend": "Node.js", + "database": "MongoDB", + "cloud": "AWS GameLift", + "testing": "Unity Test Framework", + "mobile": "Unity Mobile", + "devops": "Jenkins", + "ai_ml": "ML.NET", + "recommended_tool": "Discord", + "recommendation_score": 94.5, + "message": "AI recommendations retrieved successfully" + }, + "source": "unified", + "message": "Unified recommendation generated successfully", + "processingTime": 1250, + "services": { + "techStackSelector": "available", + "templateManager": "available", + "claudeAI": "available" + }, + "claudeModel": "claude-3-sonnet-20240229" +} +``` + +#### 2. **GET** `/api/recommendations/tech-stack` +Get recommendations from tech-stack-selector only. + +**Query Parameters:** +- `domain` (optional): Domain for recommendations +- `budget` (optional): Budget constraint +- `preferredTechnologies` (optional): Comma-separated list of preferred technologies + +#### 3. **GET** `/api/recommendations/template/:templateId` +Get recommendations from template-manager only. + +**Query Parameters:** +- `force_refresh` (optional): Force refresh recommendations + +#### 4. **GET** `/api/recommendations/schemas` +Get available validation schemas. + +#### 5. **GET** `/health` +Health check endpoint. + +#### 6. **GET** `/` +Service information and available endpoints. + +## 🔧 Usage Examples + +### Basic Unified Recommendation + +```bash +curl -X POST http://localhost:8010/api/recommendations/unified \ + -H "Content-Type: application/json" \ + -d '{ + "domain": "e-commerce", + "budget": 1000.0, + "preferredTechnologies": ["Vue.js", "Django", "Redis"] + }' +``` + +### With Template ID + +```bash +curl -X POST http://localhost:8010/api/recommendations/unified \ + -H "Content-Type: application/json" \ + -d '{ + "domain": "startup", + "budget": 100.0, + "templateId": "123e4567-e89b-12d3-a456-426614174000", + "includeSimilar": true, + "forceRefresh": true + }' +``` + +### Tech Stack Only + +```bash +curl "http://localhost:8010/api/recommendations/tech-stack?domain=web%20development&budget=500" +``` + +### Template Only + +```bash +curl "http://localhost:8010/api/recommendations/template/123e4567-e89b-12d3-a456-426614174000?force_refresh=true" +``` + +## 🏥 Health Monitoring + +### Health Check +```bash +curl http://localhost:8010/health +``` + +### Detailed Health Check +```bash +curl http://localhost:8010/health/detailed +``` + +## 📊 Response Schema + +The unified recommendation follows a strict JSON schema: + +```json +{ + "stack_name": "string (descriptive name)", + "monthly_cost": "number (0-10000)", + "setup_cost": "number (0-50000)", + "team_size": "string (e.g., '1-2', '3-5')", + "development_time": "number (1-52 weeks)", + "satisfaction": "number (0-100)", + "success_rate": "number (0-100)", + "frontend": "string (frontend technology)", + "backend": "string (backend technology)", + "database": "string (database technology)", + "cloud": "string (cloud platform)", + "testing": "string (testing framework)", + "mobile": "string (mobile technology)", + "devops": "string (devops tool)", + "ai_ml": "string (AI/ML technology)", + "recommended_tool": "string (primary tool)", + "recommendation_score": "number (0-100)", + "message": "string (explanation)" +} +``` + +## 🔄 Service Dependencies + +Unison depends on the following services: + +1. **tech-stack-selector** (port 8002) + - Provides budget and domain-based recommendations + - Must be healthy for full functionality + +2. **template-manager** (ports 8009, 8013) + - Provides template-based recommendations + - AI service on port 8013 for Claude integration + - Must be healthy for full functionality + +3. **Claude AI** (external) + - Optional but recommended for unified recommendations + - Falls back to tech-stack-selector if unavailable + +## 🚨 Error Handling + +The service includes comprehensive error handling: + +- **Service Unavailable**: Falls back to available services +- **Invalid Requests**: Returns detailed validation errors +- **Claude AI Errors**: Falls back to tech-stack-selector +- **Schema Validation**: Ensures response format compliance +- **Rate Limiting**: Prevents abuse with configurable limits + +## 📝 Logging + +Logs are written to: +- Console (development) +- `logs/combined.log` (all logs) +- `logs/error.log` (error logs only) + +Log levels: `error`, `warn`, `info`, `debug` + +## 🧪 Testing + +```bash +# Run tests +npm test + +# Run with coverage +npm run test:coverage + +# Lint code +npm run lint +``` + +## 🐳 Docker + +### Build Image +```bash +docker build -t unison . +``` + +### Run Container +```bash +docker run -p 8010:8010 \ + -e CLAUDE_API_KEY=your_key_here \ + -e TECH_STACK_SELECTOR_URL=http://tech-stack-selector:8002 \ + -e TEMPLATE_MANAGER_URL=http://template-manager:8009 \ + unison +``` + +## 🔧 Development + +### Project Structure +``` +services/unison/ +├── src/ +│ ├── app.js # Main application +│ ├── middleware/ # Express middleware +│ ├── routes/ # API routes +│ ├── services/ # External service integrations +│ └── utils/ # Utility functions +├── logs/ # Log files +├── Dockerfile # Docker configuration +├── package.json # Dependencies +├── start.sh # Startup script +└── README.md # This file +``` + +### Adding New Features + +1. **New API Endpoints**: Add to `src/routes/` +2. **External Services**: Add to `src/services/` +3. **Middleware**: Add to `src/middleware/` +4. **Validation**: Update schemas in `src/utils/schemaValidator.js` + +## 📈 Monitoring + +### Metrics to Monitor +- Response times +- Error rates +- Service availability +- Claude AI usage +- Rate limit hits + +### Health Indicators +- All external services healthy +- Claude AI available +- Response time < 5 seconds +- Error rate < 1% + +## 🤝 Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Add tests +5. Submit a pull request + +## 📄 License + +MIT License - see LICENSE file for details. + +## 🆘 Support + +For issues and questions: +1. Check the logs in `logs/` directory +2. Verify external services are running +3. Check environment variables +4. Review the health endpoint + +## 🔄 Changelog + +### v1.0.0 +- Initial release +- Unified recommendation service +- Claude AI integration +- Comprehensive error handling +- Docker support +- Production-ready logging and monitoring diff --git a/services/unison/UNISON_WORKFLOW.md b/services/unison/UNISON_WORKFLOW.md new file mode 100644 index 0000000..f87509e --- /dev/null +++ b/services/unison/UNISON_WORKFLOW.md @@ -0,0 +1,376 @@ +# Unison Service - Complete Workflow Analysis + +## 🏗️ Architecture Overview + +The Unison service acts as a **unified orchestration layer** that combines recommendations from multiple sources and uses Claude AI to generate optimized tech stack recommendations. + +## 🔄 Complete Workflow Diagram + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ UNISON SERVICE WORKFLOW │ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ Client App │───▶│ Unison Service │───▶│ Claude AI API │ +│ │ │ (Port 8014) │ │ │ +└─────────────────┘ └─────────┬────────┘ └─────────────────┘ + │ + ┌────────────┼────────────┐ + │ │ │ + ┌───────▼──────┐ ┌───▼────┐ ┌────▼──────┐ + │ Tech Stack │ │Template│ │Template │ + │ Selector │ │Manager │ │Manager AI │ + │ (Port 8002) │ │(8009) │ │(Port 8013)│ + └──────────────┘ └────────┘ └───────────┘ +``` + +## 📋 Detailed Workflow Steps + +### 1. **Request Reception & Validation** +``` +Client Request → Unison Service → Middleware Stack +``` + +**Components:** +- **Express Server** (Port 8014) +- **Security Middleware** (Helmet, CORS) +- **Rate Limiting** (100 req/15min per IP) +- **Request Validation** (Joi schema validation) +- **Body Parsing** (JSON, URL-encoded) + +**Validation Rules:** +- Domain: 1-100 characters +- Budget: Positive number +- Preferred Technologies: Array of strings (1-50 chars each) +- Template ID: Valid UUID format +- Boolean flags: includeSimilar, includeKeywords, forceRefresh + +### 2. **Route Processing** +``` +POST /api/recommendations/unified → Unified Recommendation Handler +GET /api/recommendations/tech-stack → Tech Stack Only Handler +GET /api/recommendations/template/:id → Template Only Handler +GET /health → Health Check Handler +``` + +### 3. **Unified Recommendation Workflow** (Main Flow) + +#### 3.1 **Input Validation** +```javascript +// Validate tech stack request parameters +const techStackRequest = { domain, budget, preferredTechnologies }; +const techStackValidation = schemaValidator.validateTechStackRequest(techStackRequest); + +// Validate template request if templateId provided +if (templateId) { + const templateRequest = { templateId, includeSimilar, includeKeywords, forceRefresh }; + const templateValidation = schemaValidator.validateTemplateRequest(templateRequest); +} +``` + +#### 3.2 **Parallel Service Calls** +```javascript +// Always fetch from tech-stack-selector +const techStackPromise = techStackService.getRecommendations({ + domain, budget, preferredTechnologies +}).catch(error => ({ success: false, error: error.message, source: 'tech-stack-selector' })); + +// Fetch from template-manager if templateId provided +const templatePromise = templateId ? + templateService.getAIRecommendations(templateId, { forceRefresh }) + .catch(error => ({ success: false, error: error.message, source: 'template-manager' })) : + Promise.resolve({ success: false, error: 'No template ID provided', source: 'template-manager' }); + +// Execute both calls in parallel +const [techStackResult, templateResult] = await Promise.all([techStackPromise, templatePromise]); +``` + +#### 3.3 **Service Integration Details** + +**Tech Stack Selector Integration:** +- **Endpoint**: `POST /recommend/best` +- **Data Source**: PostgreSQL + Neo4j (migrated data) +- **Features**: Price-based relationships, Claude AI recommendations +- **Response**: Array of tech stack recommendations with costs, team sizes, etc. + +**Template Manager Integration:** +- **Endpoint**: `GET /api/templates/{id}/ai-recommendations` +- **Data Source**: Template database with AI analysis +- **Features**: Template-based recommendations, feature learning +- **Response**: Template-specific tech stack recommendations + +#### 3.4 **Decision Logic & Fallback Strategy** + +```javascript +// Check if we have at least one successful recommendation +if (!techStackResult.success && !templateResult.success) { + return res.status(500).json({ + success: false, + error: 'Failed to fetch recommendations from both services' + }); +} + +// If only one service succeeded, return its result +if (!techStackResult.success || !templateResult.success) { + const successfulResult = techStackResult.success ? techStackResult : templateResult; + return res.json({ + success: true, + data: successfulResult.data, + source: successfulResult.source, + message: 'Single service recommendation (other service unavailable)' + }); +} +``` + +#### 3.5 **Claude AI Unification** (When Both Services Succeed) + +**Claude AI Integration:** +- **Model**: claude-3-sonnet-20240229 +- **Max Tokens**: 4000 +- **Timeout**: 30 seconds +- **API**: Anthropic Claude API + +**Prompt Engineering:** +```javascript +const prompt = `You are an expert tech stack architect. I need you to analyze two different tech stack recommendations and create a single, optimized recommendation that balances cost, domain requirements, and template-feature compatibility. + +## Original Request Parameters: +- Domain: ${requestParams.domain} +- Budget: $${requestParams.budget} +- Preferred Technologies: ${requestParams.preferredTechnologies?.join(', ')} +- Template ID: ${requestParams.templateId} + +## Tech Stack Selector Recommendation: +${JSON.stringify(techStackRecommendation.data, null, 2)} + +## Template Manager Recommendation: +${JSON.stringify(templateRecommendation.data, null, 2)} + +## Your Task: +Analyze both recommendations and create a single, optimized tech stack recommendation that: +1. Balances cost-effectiveness with the budget constraint +2. Matches the domain requirements +3. Incorporates the best features from the template recommendation +4. Considers the preferred technologies when possible +5. Provides realistic team size, development time, and success metrics + +## Required Output Format: +[Detailed JSON schema requirements...]`; +``` + +**Response Processing:** +```javascript +// Parse Claude's response +const claudeResponse = response.data.content[0].text; +const unifiedRecommendation = this.parseClaudeResponse(claudeResponse); + +// Validate the unified recommendation +const validation = schemaValidator.validateUnifiedRecommendation(unifiedRecommendation); +if (!validation.valid) { + // Fallback to tech-stack-selector recommendation + return res.json({ + success: true, + data: techStackResult.data, + source: 'tech-stack-selector (fallback)', + message: 'Claude generated invalid recommendation, using tech-stack-selector as fallback' + }); +} +``` + +### 4. **Response Generation & Validation** + +**Schema Validation:** +- **Unified Recommendation Schema**: 18 required fields with strict validation +- **Numeric Ranges**: Monthly cost (0-10000), Setup cost (0-50000), etc. +- **String Constraints**: Team size pattern, length limits +- **Required Fields**: stack_name, monthly_cost, setup_cost, team_size, development_time, satisfaction, success_rate, frontend, backend, database, cloud, testing, mobile, devops, ai_ml, recommended_tool, recommendation_score, message + +**Response Format:** +```json +{ + "success": true, + "data": { + "stack_name": "Optimized E-commerce Stack", + "monthly_cost": 150, + "setup_cost": 2000, + "team_size": "3-5", + "development_time": 8, + "satisfaction": 92, + "success_rate": 88, + "frontend": "React", + "backend": "Node.js", + "database": "PostgreSQL", + "cloud": "AWS", + "testing": "Jest", + "mobile": "React Native", + "devops": "Docker", + "ai_ml": "TensorFlow", + "recommended_tool": "Vercel", + "recommendation_score": 94, + "message": "Balanced solution combining cost-effectiveness with modern tech stack" + }, + "source": "unified", + "message": "Unified recommendation generated successfully", + "processingTime": 1250, + "services": { + "techStackSelector": "available", + "templateManager": "available", + "claudeAI": "available" + }, + "claudeModel": "claude-3-sonnet-20240229" +} +``` + +### 5. **Error Handling & Logging** + +**Error Types:** +- **Validation Errors**: Invalid input parameters +- **Service Errors**: External service failures +- **Claude AI Errors**: API failures or invalid responses +- **Schema Validation Errors**: Invalid output format +- **Network Errors**: Timeout or connection issues + +**Logging Strategy:** +- **Winston Logger**: Structured JSON logging +- **Log Levels**: error, warn, info, debug +- **Log Files**: error.log, combined.log +- **Console Logging**: Development mode +- **Request Tracking**: Unique request IDs + +**Fallback Mechanisms:** +1. **Single Service Fallback**: If one service fails, use the other +2. **Claude AI Fallback**: If Claude fails, use tech-stack-selector +3. **Schema Validation Fallback**: If Claude output is invalid, use tech-stack-selector +4. **Graceful Degradation**: Always return some recommendation + +### 6. **Health Monitoring** + +**Health Check Endpoints:** +- **Basic Health**: `/health` - Service status with external service checks +- **Detailed Health**: `/health/detailed` - Comprehensive system information + +**External Service Monitoring:** +- **Tech Stack Selector**: `http://pipeline_tech_stack_selector:8002/health` +- **Template Manager**: `http://pipeline_template_manager:8009/health` +- **Response Time Tracking**: Individual service response times +- **Status Aggregation**: Overall service health status + +## 🔧 Service Dependencies + +### External Services +1. **Tech Stack Selector** (Port 8002) + - **Purpose**: Budget and domain-based recommendations + - **Data Source**: PostgreSQL + Neo4j + - **Features**: Price analysis, Claude AI integration + - **Health Check**: `/health` + +2. **Template Manager** (Port 8009) + - **Purpose**: Template-based recommendations + - **Data Source**: Template database + - **Features**: Feature learning, usage tracking + - **Health Check**: `/health` + +3. **Template Manager AI** (Port 8013) + - **Purpose**: AI-powered template analysis + - **Features**: Claude AI integration for templates + - **Health Check**: `/health` + +4. **Claude AI** (External API) + - **Purpose**: Intelligent recommendation unification + - **Model**: claude-3-sonnet-20240229 + - **Features**: Natural language processing, optimization + +### Internal Components +1. **Schema Validator**: JSON schema validation using Ajv +2. **Logger**: Winston-based structured logging +3. **Error Handler**: Comprehensive error handling +4. **Request Validator**: Joi-based input validation +5. **Health Check Middleware**: External service monitoring + +## 📊 Performance Characteristics + +### Response Times +- **Health Check**: ~12ms +- **Tech Stack Only**: ~50ms +- **Template Only**: ~15ms +- **Unified Recommendation**: ~11ms (with fallback) +- **Claude AI Unification**: ~2-5 seconds + +### Memory Usage +- **Base Memory**: ~16MB +- **Peak Memory**: ~18MB +- **External Memory**: ~3MB + +### Throughput +- **Rate Limit**: 100 requests per 15 minutes per IP +- **Concurrent Requests**: Handled by Express.js +- **Timeout**: 30 seconds per external service call + +## 🛡️ Security & Reliability + +### Security Features +- **Helmet**: Security headers +- **CORS**: Cross-origin resource sharing +- **Rate Limiting**: Abuse prevention +- **Input Validation**: XSS and injection prevention +- **Error Sanitization**: No sensitive data in error messages + +### Reliability Features +- **Graceful Fallbacks**: Multiple fallback strategies +- **Circuit Breaker Pattern**: Service failure handling +- **Timeout Management**: Prevents hanging requests +- **Health Monitoring**: Proactive service monitoring +- **Structured Logging**: Comprehensive debugging + +## 🚀 Deployment & Scaling + +### Docker Configuration +- **Base Image**: Node.js 18 Alpine +- **Port Mapping**: 8014:8010 +- **Health Check**: Built-in health check endpoint +- **Logging**: JSON file logging with rotation + +### Environment Variables +- **Service URLs**: External service endpoints +- **Claude API Key**: AI integration +- **Database URLs**: Connection strings +- **Security Keys**: JWT secrets, API keys +- **Performance Tuning**: Timeouts, limits + +## 📈 Monitoring & Observability + +### Metrics Tracked +- **Response Times**: Per endpoint and service +- **Error Rates**: By error type and service +- **Service Availability**: External service health +- **Memory Usage**: Heap and external memory +- **Request Volume**: Rate limiting metrics + +### Logging Strategy +- **Structured Logs**: JSON format for easy parsing +- **Log Levels**: Appropriate level for each event +- **Request Tracing**: Unique identifiers for requests +- **Error Context**: Detailed error information +- **Performance Metrics**: Response time tracking + +--- + +## 🎯 Summary + +The Unison service implements a **sophisticated orchestration workflow** that: + +1. **Validates** incoming requests with strict schema validation +2. **Orchestrates** parallel calls to multiple recommendation services +3. **Unifies** recommendations using Claude AI for intelligent optimization +4. **Validates** outputs with comprehensive schema validation +5. **Provides** multiple fallback strategies for reliability +6. **Monitors** health and performance continuously +7. **Logs** everything for debugging and analysis + +This creates a **robust, intelligent, and reliable** system that can provide high-quality tech stack recommendations even when individual services fail, while maintaining excellent performance and security standards. + +--- +*Generated on: 2025-09-22T05:01:45.120Z* +*Service Version: 1.0.0* +*Status: OPERATIONAL* diff --git a/services/unison/WORKFLOW_DIAGRAM.md b/services/unison/WORKFLOW_DIAGRAM.md new file mode 100644 index 0000000..d35b2d0 --- /dev/null +++ b/services/unison/WORKFLOW_DIAGRAM.md @@ -0,0 +1,499 @@ +# Unison Service - Visual Workflow Diagram + +## 🏗️ Complete System Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ UNISON SERVICE ARCHITECTURE │ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ Client App │───▶│ Unison Service │───▶│ Claude AI API │ +│ │ │ (Port 8014) │ │ │ +└─────────────────┘ └─────────┬────────┘ └─────────────────┘ + │ + ┌────────────┼────────────┐ + │ │ │ + ┌───────▼──────┐ ┌───▼────┐ ┌────▼──────┐ + │ Tech Stack │ │Template│ │Template │ + │ Selector │ │Manager │ │Manager AI │ + │ (Port 8002) │ │(8009) │ │(Port 8013)│ + └──────────────┘ └────────┘ └───────────┘ +``` + +## 🔄 Detailed Workflow Flow + +### 1. Request Processing Pipeline + +``` +┌─────────────────┐ +│ Client Request │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Express Server │ +│ (Port 8014) │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Security Stack │ +│ • Helmet │ +│ • CORS │ +│ • Rate Limiting │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Request Parser │ +│ • JSON Parser │ +│ • URL Encoded │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Validation │ +│ • Joi Schema │ +│ • Input Check │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Route Handler │ +│ • Unified │ +│ • Tech Stack │ +│ • Template │ +└─────────┬───────┘ +``` + +### 2. Unified Recommendation Workflow + +``` +┌─────────────────┐ +│ POST /unified │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Input Validation│ +│ • Domain │ +│ • Budget │ +│ • Technologies │ +│ • Template ID │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Parallel Calls │ +│ ┌─────────────┐ │ +│ │Tech Stack │ │ +│ │Selector │ │ +│ └─────────────┘ │ +│ ┌─────────────┐ │ +│ │Template │ │ +│ │Manager │ │ +│ └─────────────┘ │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Decision Logic │ +│ • Both Success │ +│ • One Success │ +│ • Both Failed │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Claude AI │ +│ Unification │ +│ (if both OK) │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Schema │ +│ Validation │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Response │ +│ Generation │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Client Response │ +└─────────────────┘ +``` + +### 3. Service Integration Details + +#### Tech Stack Selector Integration +``` +┌─────────────────┐ +│ Unison Service │ +└─────────┬───────┘ + │ POST /recommend/best + ▼ +┌─────────────────┐ +│ Tech Stack │ +│ Selector │ +│ (Port 8002) │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Data Sources │ +│ • PostgreSQL │ +│ • Neo4j │ +│ • Price Data │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Claude AI │ +│ Analysis │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Recommendations │ +│ • Cost Analysis │ +│ • Team Sizes │ +│ • Tech Stacks │ +└─────────────────┘ +``` + +#### Template Manager Integration +``` +┌─────────────────┐ +│ Unison Service │ +└─────────┬───────┘ + │ GET /api/templates/{id}/ai-recommendations + ▼ +┌─────────────────┐ +│ Template │ +│ Manager │ +│ (Port 8009) │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Template │ +│ Database │ +│ • Features │ +│ • Usage Data │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Template AI │ +│ Service │ +│ (Port 8013) │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ AI Analysis │ +│ • Feature Match │ +│ • Optimization │ +└─────────────────┘ +``` + +### 4. Claude AI Unification Process + +``` +┌─────────────────┐ +│ Tech Stack │ +│ Recommendation │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Claude AI │ +│ Analysis │ +│ • Cost Balance │ +│ • Domain Match │ +│ • Tech Merge │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Template │ +│ Recommendation │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Unified │ +│ Recommendation │ +│ • Optimized │ +│ • Balanced │ +│ • Validated │ +└─────────────────┘ +``` + +### 5. Error Handling & Fallback Strategy + +``` +┌─────────────────┐ +│ Service Call │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Success? │ +└─────────┬───────┘ + │ + ┌─────┴─────┐ + │ │ + ▼ ▼ +┌─────────┐ ┌─────────┐ +│ Success │ │ Failure │ +└────┬────┘ └────┬────┘ + │ │ + ▼ ▼ +┌─────────┐ ┌─────────┐ +│ Process │ │ Log │ +│ Result │ │ Error │ +└────┬────┘ └────┬────┘ + │ │ + ▼ ▼ +┌─────────┐ ┌─────────┐ +│ Return │ │ Fallback│ +│ Data │ │ Strategy│ +└─────────┘ └─────────┘ +``` + +### 6. Health Monitoring Flow + +``` +┌─────────────────┐ +│ Health Check │ +│ Request │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Check Internal │ +│ • Memory │ +│ • CPU │ +│ • Uptime │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Check External │ +│ Services │ +│ • Tech Stack │ +│ • Template │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Aggregate │ +│ Health Status │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Return Health │ +│ Response │ +└─────────────────┘ +``` + +## 🔧 Data Flow Architecture + +### Request Data Flow +``` +Client Request + │ + ▼ +┌─────────────────┐ +│ Input Validation│ +│ • Joi Schema │ +│ • Type Check │ +│ • Range Check │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Service Calls │ +│ • Parallel │ +│ • Async │ +│ • Timeout │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Data Processing │ +│ • Merge │ +│ • Optimize │ +│ • Validate │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Response │ +│ • JSON Format │ +│ • Error Handling│ +│ • Logging │ +└─────────────────┘ +``` + +### Response Data Flow +``` +Service Response + │ + ▼ +┌─────────────────┐ +│ Schema │ +│ Validation │ +│ • Ajv Validator │ +│ • Field Check │ +│ • Type Check │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Error Handling │ +│ • Validation │ +│ • Service │ +│ • Network │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Response │ +│ Formatting │ +│ • JSON │ +│ • Metadata │ +│ • Status Codes │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Client Response │ +└─────────────────┘ +``` + +## 📊 Performance Flow + +### Response Time Breakdown +``` +Total Request Time: ~50ms + │ + ├── Input Validation: ~2ms + ├── Service Calls: ~30ms + │ ├── Tech Stack: ~15ms + │ └── Template: ~15ms + ├── Claude AI: ~2-5s (if used) + ├── Schema Validation: ~3ms + └── Response Formatting: ~1ms +``` + +### Memory Usage Flow +``` +Memory Allocation + │ + ├── Base Service: ~16MB + ├── Request Processing: ~2MB + ├── External Calls: ~1MB + └── Response Generation: ~1MB +``` + +## 🛡️ Security Flow + +### Security Pipeline +``` +Incoming Request + │ + ▼ +┌─────────────────┐ +│ Helmet │ +│ • Security │ +│ Headers │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ CORS │ +│ • Origin Check │ +│ • Method Check │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Rate Limiting │ +│ • IP Tracking │ +│ • Request Count │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Input │ +│ Validation │ +│ • XSS Prevent │ +│ • Injection │ +└─────────┬───────┘ + │ + ▼ +┌─────────────────┐ +│ Processed │ +│ Request │ +└─────────────────┘ +``` + +## 🚀 Deployment Flow + +### Docker Deployment +``` +Docker Build + │ + ├── Node.js 18 Alpine + ├── Dependencies Install + ├── Source Code Copy + ├── Permissions Set + └── Health Check Config + │ + ▼ +Docker Run + │ + ├── Port Mapping: 8014:8010 + ├── Environment Variables + ├── Volume Mounts + └── Network Configuration + │ + ▼ +Service Running + │ + ├── Health Checks + ├── Log Monitoring + ├── Error Tracking + └── Performance Metrics +``` + +--- + +## 🎯 Key Workflow Characteristics + +1. **Asynchronous Processing**: Parallel service calls for performance +2. **Fault Tolerance**: Multiple fallback strategies +3. **Data Validation**: Strict input/output validation +4. **AI Integration**: Intelligent recommendation unification +5. **Comprehensive Logging**: Full request/response tracking +6. **Health Monitoring**: Proactive service monitoring +7. **Security First**: Multiple security layers +8. **Performance Optimized**: Fast response times +9. **Scalable Architecture**: Containerized deployment +10. **Observable System**: Detailed metrics and logging + +This workflow ensures that the Unison service provides **reliable, intelligent, and high-performance** tech stack recommendations while maintaining excellent security and observability standards. + +--- +*Generated on: 2025-09-22T05:01:45.120Z* +*Service Version: 1.0.0* +*Status: OPERATIONAL* diff --git a/services/unison/config.env b/services/unison/config.env new file mode 100644 index 0000000..c478741 --- /dev/null +++ b/services/unison/config.env @@ -0,0 +1,126 @@ +# Unison Service Environment Configuration +# This file contains environment variables for the Unison service + +# ===================================== +# Service Configuration +# ===================================== +NODE_ENV=development +PORT=8010 +HOST=0.0.0.0 +ENVIRONMENT=development + +# ===================================== +# External Service URLs +# ===================================== +TECH_STACK_SELECTOR_URL=http://pipeline_tech_stack_selector:8002 +TEMPLATE_MANAGER_URL=http://pipeline_template_manager:8009 +TEMPLATE_MANAGER_AI_URL=http://pipeline_template_manager:8013 + +# Service Health Check URLs +TECH_STACK_SELECTOR_HEALTH_URL=http://pipeline_tech_stack_selector:8002/health +TEMPLATE_MANAGER_HEALTH_URL=http://pipeline_template_manager:8009/health + +# ===================================== +# Claude AI Configuration +# ===================================== +CLAUDE_API_KEY=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 + +# ===================================== +# Database Configuration +# ===================================== +POSTGRES_HOST=postgres +POSTGRES_PORT=5432 +POSTGRES_DB=dev_pipeline +POSTGRES_USER=pipeline_admin +POSTGRES_PASSWORD=secure_pipeline_2024 +DATABASE_URL=postgresql://pipeline_admin:secure_pipeline_2024@postgres:5432/dev_pipeline + +# Neo4j Configuration +NEO4J_URI=bolt://neo4j:7687 +NEO4J_USER=neo4j +NEO4J_USERNAME=neo4j +NEO4J_PASSWORD=password + +# Redis Configuration +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_PASSWORD=redis_secure_2024 + +# MongoDB Configuration +MONGODB_HOST=mongodb +MONGODB_PORT=27017 +MONGO_INITDB_ROOT_USERNAME=pipeline_admin +MONGO_INITDB_ROOT_PASSWORD=mongo_secure_2024 +MONGODB_PASSWORD=mongo_secure_2024 + +# ===================================== +# Message Queue Configuration +# ===================================== +RABBITMQ_HOST=rabbitmq +RABBITMQ_PORT=5672 +RABBITMQ_DEFAULT_USER=pipeline_admin +RABBITMQ_DEFAULT_PASS=rabbit_secure_2024 +RABBITMQ_PASSWORD=rabbit_secure_2024 + +# ===================================== +# Security & Authentication +# ===================================== +JWT_SECRET=ultra_secure_jwt_secret_2024 +JWT_ACCESS_SECRET=access-secret-key-2024-tech4biz-secure_pipeline_2024 +JWT_REFRESH_SECRET=refresh-secret-key-2024-tech4biz-secure_pipeline_2024 +API_KEY_HEADER=X-API-Key + +# ===================================== +# Email Configuration +# ===================================== +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_SECURE=false +SMTP_USER=frontendtechbiz@gmail.com +SMTP_PASS=oidhhjeasgzbqptq +SMTP_FROM=frontendtechbiz@gmail.com +GMAIL_USER=frontendtechbiz@gmail.com +GMAIL_APP_PASSWORD=oidhhjeasgzbqptq + +# ===================================== +# CORS Configuration +# ===================================== +CORS_ORIGIN=* +CORS_METHODS=GET,POST,PUT,DELETE,PATCH,OPTIONS +CORS_CREDENTIALS=true + +# ===================================== +# Service Configuration +# ===================================== +# Rate Limiting +RATE_LIMIT_WINDOW_MS=900000 +RATE_LIMIT_MAX_REQUESTS=100 + +# Logging +LOG_LEVEL=info +LOG_FILE=logs/unison.log + +# Request Timeouts (in milliseconds) +REQUEST_TIMEOUT=30000 +HEALTH_CHECK_TIMEOUT=5000 + +# ===================================== +# External Service Integration +# ===================================== +# n8n Configuration +N8N_BASIC_AUTH_USER=admin +N8N_BASIC_AUTH_PASSWORD=admin_n8n_2024 +N8N_ENCRYPTION_KEY=very_secure_encryption_key_2024 + +# Jenkins Configuration +JENKINS_ADMIN_ID=admin +JENKINS_ADMIN_PASSWORD=jenkins_secure_2024 + +# Gitea Configuration +GITEA_ADMIN_USER=admin +GITEA_ADMIN_PASSWORD=gitea_secure_2024 + +# Monitoring +GRAFANA_ADMIN_USER=admin +GRAFANA_ADMIN_PASSWORD=grafana_secure_2024 + diff --git a/services/unison/package-lock.json b/services/unison/package-lock.json new file mode 100644 index 0000000..f8d2438 --- /dev/null +++ b/services/unison/package-lock.json @@ -0,0 +1,6686 @@ +{ + "name": "unison", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "unison", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "ajv": "^8.12.0", + "ajv-formats": "^2.1.1", + "axios": "^1.6.0", + "compression": "^1.7.4", + "cors": "^2.8.5", + "dotenv": "^16.3.1", + "express": "^4.18.2", + "express-rate-limit": "^7.1.5", + "helmet": "^7.1.0", + "joi": "^17.11.0", + "morgan": "^1.10.0", + "pg": "^8.11.3", + "uuid": "^9.0.1", + "winston": "^3.11.0" + }, + "devDependencies": { + "eslint": "^8.55.0", + "jest": "^29.7.0", + "nodemon": "^3.0.2", + "supertest": "^6.3.3" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz", + "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz", + "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.4", + "@babel/types": "^7.28.4", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@babel/core/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/generator": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz", + "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.3", + "@babel/types": "^7.28.2", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz", + "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.4" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz", + "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@babel/traverse/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/types": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz", + "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@colors/colors": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz", + "integrity": "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==", + "license": "MIT", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@dabh/diagnostics": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz", + "integrity": "sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==", + "license": "MIT", + "dependencies": { + "colorspace": "1.1.x", + "enabled": "2.0.x", + "kuler": "^2.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@eslint/eslintrc/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@paralleldrive/cuid2": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz", + "integrity": "sha512-ZOBkgDwEdoYVlSeRbYYXs0S9MejQofiVYoTbKzy/6GQa39/q5tQU2IX46+shYnUkpEl3wc+J6wRlar7r2EK2xA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@noble/hashes": "^1.1.5" + } + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/node": { + "version": "24.5.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.5.2.tgz", + "integrity": "sha512-FYxk1I7wPv3K2XBaoyH2cTnocQEu8AOZ60hPbsyukMPLv5/5qr7V1i8PLHdl6Zf87I+xZXFvPCXYjiTFq+YSDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.12.0" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/triple-beam": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz", + "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==", + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "dev": true, + "license": "ISC" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/asap": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", + "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==", + "dev": true, + "license": "MIT" + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "license": "MIT" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.12.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz", + "integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.8.6", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.6.tgz", + "integrity": "sha512-wrH5NNqren/QMtKUEEJf7z86YjfqW/2uw3IL3/xpqZUC95SSVIFXYQeeGjL6FT/X68IROu6RMehZQS5foy2BXw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/basic-auth/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.26.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.26.2.tgz", + "integrity": "sha512-ECFzp6uFOSB+dcZ5BK/IBaGWssbSYBHvuMeMt3MMFyhI0Z8SqGgEkBLARgpRH3hutIgPVsALcMwbDrJqPxQ65A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.8.3", + "caniuse-lite": "^1.0.30001741", + "electron-to-chromium": "^1.5.218", + "node-releases": "^2.0.21", + "update-browserslist-db": "^1.1.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001743", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001743.tgz", + "integrity": "sha512-e6Ojr7RV14Un7dz6ASD0aZDmQPT/A+eZU+nuTNfjqmRrmkmQlnTNWH0SKmqagx9PeW87UVqapSurtAXifmtdmw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/color": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", + "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/color/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "license": "MIT" + }, + "node_modules/colorspace": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.4.tgz", + "integrity": "sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w==", + "license": "MIT", + "dependencies": { + "color": "^3.1.3", + "text-hex": "1.0.x" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/component-emitter": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.1.tgz", + "integrity": "sha512-T0+barUSQRTUQASh8bx02dl+DhF54GtIDY13Y3m9oWTklKbb3Wv974meRpeZ3lp1JpLVECWWNHC4vaG2XHXouQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "license": "MIT", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz", + "integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "compressible": "~2.0.18", + "debug": "2.6.9", + "negotiator": "~0.6.4", + "on-headers": "~1.1.0", + "safe-buffer": "5.2.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/cookiejar": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.4.tgz", + "integrity": "sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw==", + "dev": true, + "license": "MIT" + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "license": "MIT", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/dezalgo": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/dezalgo/-/dezalgo-1.0.4.tgz", + "integrity": "sha512-rXSP0bf+5n0Qonsb+SVVfNfIsimO4HEtmnIpPHY8Q1UCzKlQrDMfdobr8nJOOsRgWCyMRqeSBQzmWUMq7zvVig==", + "dev": true, + "license": "ISC", + "dependencies": { + "asap": "^2.0.0", + "wrappy": "1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.222", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.222.tgz", + "integrity": "sha512-gA7psSwSwQRE60CEoLz6JBCQPIxNeuzB2nL8vE03GK/OHxlvykbLyeiumQy1iH5C2f3YbRAZpGCMT12a/9ih9w==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/enabled": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/eslint/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express-rate-limit": { + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.1.tgz", + "integrity": "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/express-rate-limit" + }, + "peerDependencies": { + "express": ">= 4.11" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fecha": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz", + "integrity": "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==", + "license": "MIT" + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==", + "license": "MIT" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/formidable": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-2.1.5.tgz", + "integrity": "sha512-Oz5Hwvwak/DCaXVVUtPn4oLMLLy1CdclLKO1LFgU7XzDpVMUU5UjlSLpGMocyQNNk8F6IJW9M/YdooSn2MRI+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@paralleldrive/cuid2": "^2.2.2", + "dezalgo": "^1.0.4", + "once": "^1.4.0", + "qs": "^6.11.0" + }, + "funding": { + "url": "https://ko-fi.com/tunnckoCore/commissions" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/helmet": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/helmet/-/helmet-7.2.0.tgz", + "integrity": "sha512-ZRiwvN089JfMXokizgqEPXsl2Guk094yExfoDXR0cBYWxtBbaSww/w+vT4WEJsBW2iTUi1GgZ6swmoug3Oy4Xw==", + "license": "MIT", + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/ignore-by-default": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", + "integrity": "sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA==", + "dev": true, + "license": "ISC" + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/istanbul-lib-source-maps/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/kuler": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==", + "license": "MIT" + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/logform": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.7.0.tgz", + "integrity": "sha512-TFYA4jnP7PVbmlBIfhlSe+WKxs9dklXMTEGcBCIvLhE/Tn3H6Gk1norupVW7m5Cnd4bLcr08AytbyV/xj7f/kQ==", + "license": "MIT", + "dependencies": { + "@colors/colors": "1.6.0", + "@types/triple-beam": "^1.3.2", + "fecha": "^4.2.0", + "ms": "^2.1.1", + "safe-stable-stringify": "^2.3.1", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/logform/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/morgan": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.1.tgz", + "integrity": "sha512-223dMRJtI/l25dJKWpgij2cMtywuG/WiUKXdvwfbhGKBhy1puASqXwFzmWZ7+K73vUPoR7SS2Qz2cI/g9MKw0A==", + "license": "MIT", + "dependencies": { + "basic-auth": "~2.0.1", + "debug": "2.6.9", + "depd": "~2.0.0", + "on-finished": "~2.3.0", + "on-headers": "~1.1.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/morgan/node_modules/on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.21", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.21.tgz", + "integrity": "sha512-5b0pgg78U3hwXkCM8Z9b2FJdPZlr9Psr9V2gQPESdGHqbntyFJKFW4r5TeWGFzafGY3hzs1JC62VEQMbl1JFkw==", + "dev": true, + "license": "MIT" + }, + "node_modules/nodemon": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.1.10.tgz", + "integrity": "sha512-WDjw3pJ0/0jMFmyNDp3gvY2YizjLmmOUQo6DEBY+JgdvW/yQ9mEeSw6H5ythl5Ny2ytb7f9C2nIbjSxMNzbJXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": "^3.5.2", + "debug": "^4", + "ignore-by-default": "^1.0.1", + "minimatch": "^3.1.2", + "pstree.remy": "^1.1.8", + "semver": "^7.5.3", + "simple-update-notifier": "^2.0.0", + "supports-color": "^5.5.0", + "touch": "^3.1.0", + "undefsafe": "^2.0.5" + }, + "bin": { + "nodemon": "bin/nodemon.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nodemon" + } + }, + "node_modules/nodemon/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/nodemon/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/nodemon/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nodemon/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/nodemon/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/one-time": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", + "license": "MIT", + "dependencies": { + "fn.name": "1.x.x" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/pg": { + "version": "8.16.3", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.16.3.tgz", + "integrity": "sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==", + "license": "MIT", + "dependencies": { + "pg-connection-string": "^2.9.1", + "pg-pool": "^3.10.1", + "pg-protocol": "^1.10.3", + "pg-types": "2.2.0", + "pgpass": "1.0.5" + }, + "engines": { + "node": ">= 16.0.0" + }, + "optionalDependencies": { + "pg-cloudflare": "^1.2.7" + }, + "peerDependencies": { + "pg-native": ">=3.0.1" + }, + "peerDependenciesMeta": { + "pg-native": { + "optional": true + } + } + }, + "node_modules/pg-cloudflare": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.2.7.tgz", + "integrity": "sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==", + "license": "MIT", + "optional": true + }, + "node_modules/pg-connection-string": { + "version": "2.9.1", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.9.1.tgz", + "integrity": "sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==", + "license": "MIT" + }, + "node_modules/pg-int8": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", + "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "license": "ISC", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pg-pool": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.10.1.tgz", + "integrity": "sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==", + "license": "MIT", + "peerDependencies": { + "pg": ">=8.0" + } + }, + "node_modules/pg-protocol": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.3.tgz", + "integrity": "sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==", + "license": "MIT" + }, + "node_modules/pg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", + "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "license": "MIT", + "dependencies": { + "pg-int8": "1.0.1", + "postgres-array": "~2.0.0", + "postgres-bytea": "~1.0.0", + "postgres-date": "~1.0.4", + "postgres-interval": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pgpass": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz", + "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==", + "license": "MIT", + "dependencies": { + "split2": "^4.1.0" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/postgres-array": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", + "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-bytea": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", + "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-date": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", + "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-interval": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", + "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "license": "MIT", + "dependencies": { + "xtend": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/pstree.remy": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz", + "integrity": "sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==", + "dev": true, + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/simple-swizzle": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz", + "integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/simple-swizzle/node_modules/is-arrayish": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz", + "integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==", + "license": "MIT" + }, + "node_modules/simple-update-notifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz", + "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/simple-update-notifier/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "license": "ISC", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/superagent": { + "version": "8.1.2", + "resolved": "https://registry.npmjs.org/superagent/-/superagent-8.1.2.tgz", + "integrity": "sha512-6WTxW1EB6yCxV5VFOIPQruWGHqc3yI7hEmZK6h+pyk69Lk/Ut7rLUY6W/ONF2MjBuGjvmMiIpsrVJ2vjrHlslA==", + "deprecated": "Please upgrade to superagent v10.2.2+, see release notes at https://github.com/forwardemail/superagent/releases/tag/v10.2.2 - maintenance is supported by Forward Email @ https://forwardemail.net", + "dev": true, + "license": "MIT", + "dependencies": { + "component-emitter": "^1.3.0", + "cookiejar": "^2.1.4", + "debug": "^4.3.4", + "fast-safe-stringify": "^2.1.1", + "form-data": "^4.0.0", + "formidable": "^2.1.2", + "methods": "^1.1.2", + "mime": "2.6.0", + "qs": "^6.11.0", + "semver": "^7.3.8" + }, + "engines": { + "node": ">=6.4.0 <13 || >=14" + } + }, + "node_modules/superagent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/superagent/node_modules/mime": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", + "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/superagent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/superagent/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/supertest": { + "version": "6.3.4", + "resolved": "https://registry.npmjs.org/supertest/-/supertest-6.3.4.tgz", + "integrity": "sha512-erY3HFDG0dPnhw4U+udPfrzXa4xhSG+n4rxfRuZWCUvjFWwKl+OxWf/7zk50s84/fAAs7vf5QAb9uRa0cCykxw==", + "deprecated": "Please upgrade to supertest v7.1.3+, see release notes at https://github.com/forwardemail/supertest/releases/tag/v7.1.3 - maintenance is supported by Forward Email @ https://forwardemail.net", + "dev": true, + "license": "MIT", + "dependencies": { + "methods": "^1.1.2", + "superagent": "^8.1.2" + }, + "engines": { + "node": ">=6.4.0" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==", + "license": "MIT" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/touch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.1.tgz", + "integrity": "sha512-r0eojU4bI8MnHr8c5bNo7lJDdI2qXlWWJk6a9EAFG7vbhTjElYhBVS3/miuE0uOuoLdb8Mc/rVfsmm6eo5o9GA==", + "dev": true, + "license": "ISC", + "bin": { + "nodetouch": "bin/nodetouch.js" + } + }, + "node_modules/triple-beam": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz", + "integrity": "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==", + "license": "MIT", + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/undefsafe": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.5.tgz", + "integrity": "sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==", + "dev": true, + "license": "MIT" + }, + "node_modules/undici-types": { + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.12.0.tgz", + "integrity": "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/winston": { + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.17.0.tgz", + "integrity": "sha512-DLiFIXYC5fMPxaRg832S6F5mJYvePtmO5G9v9IgUFPhXm9/GkXarH/TUrBAVzhTCzAj9anE/+GjrgXp/54nOgw==", + "license": "MIT", + "dependencies": { + "@colors/colors": "^1.6.0", + "@dabh/diagnostics": "^2.0.2", + "async": "^3.2.3", + "is-stream": "^2.0.0", + "logform": "^2.7.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", + "safe-stable-stringify": "^2.3.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.9.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/winston-transport": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz", + "integrity": "sha512-8drMJ4rkgaPo1Me4zD/3WLfI/zPdA9o2IipKODunnGDcuqbHwjsbB79ylv04LCGGzU0xQ6vTznOMpQGaLhhm6A==", + "license": "MIT", + "dependencies": { + "logform": "^2.7.0", + "readable-stream": "^3.6.2", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/services/unison/package.json b/services/unison/package.json new file mode 100644 index 0000000..7e2f3b7 --- /dev/null +++ b/services/unison/package.json @@ -0,0 +1,48 @@ +{ + "name": "unison", + "version": "1.0.0", + "description": "Unison - Unified Tech Stack Recommendation Service", + "main": "src/app.js", + "scripts": { + "start": "node src/app.js", + "dev": "nodemon src/app.js", + "test": "jest", + "lint": "eslint src/", + "docker:build": "docker build -t unison .", + "docker:run": "docker run -p 8010:8010 unison" + }, + "dependencies": { + "express": "^4.18.2", + "cors": "^2.8.5", + "helmet": "^7.1.0", + "morgan": "^1.10.0", + "dotenv": "^16.3.1", + "axios": "^1.6.0", + "joi": "^17.11.0", + "ajv": "^8.12.0", + "ajv-formats": "^2.1.1", + "uuid": "^9.0.1", + "winston": "^3.11.0", + "compression": "^1.7.4", + "express-rate-limit": "^7.1.5", + "pg": "^8.11.3" + }, + "devDependencies": { + "nodemon": "^3.0.2", + "jest": "^29.7.0", + "supertest": "^6.3.3", + "eslint": "^8.55.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "keywords": [ + "tech-stack", + "recommendations", + "ai", + "claude", + "unified" + ], + "author": "CODENUK Team", + "license": "MIT" +} diff --git a/services/unison/setup-env.sh b/services/unison/setup-env.sh new file mode 100644 index 0000000..6b78b88 --- /dev/null +++ b/services/unison/setup-env.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Setup script for Unison service environment variables + +echo "Setting up Unison service environment variables..." + +# Check if config.env exists +if [ ! -f "config.env" ]; then + echo "❌ config.env file not found!" + echo "Please ensure config.env exists in the current directory." + exit 1 +fi + +echo "✅ Found config.env file" + +# Check if .env already exists +if [ -f ".env" ]; then + echo "⚠️ .env file already exists!" + read -p "Do you want to overwrite it? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "❌ Setup cancelled." + exit 1 + fi +fi + +# Copy config.env to .env +cp config.env .env +echo "✅ Created .env file from config.env" + +# Check if running in Docker +if [ -f "/.dockerenv" ]; then + echo "🐳 Running in Docker container - using config.env directly" + echo "✅ Environment variables are loaded from config.env" +else + echo "🖥️ Running locally - .env file created" + echo "📝 You can edit .env file if you need to override any settings" +fi + +echo "🎉 Environment setup complete!" +echo "📋 Configuration includes:" +echo " - Service URLs for tech-stack-selector and template-manager" +echo " - Claude AI API key and configuration" +echo " - Database connections (PostgreSQL, Neo4j, Redis, MongoDB)" +echo " - Security and authentication settings" +echo " - Email configuration" +echo " - CORS settings" +echo "" +echo "🚀 Next steps:" +echo " 1. Run: npm start" +echo " 2. Or with Docker: docker-compose up -d unison" diff --git a/services/unison/src/app.js b/services/unison/src/app.js new file mode 100644 index 0000000..a46fce3 --- /dev/null +++ b/services/unison/src/app.js @@ -0,0 +1,140 @@ +const express = require('express'); +const cors = require('cors'); +const helmet = require('helmet'); +const morgan = require('morgan'); +const compression = require('compression'); +const rateLimit = require('express-rate-limit'); +require('dotenv').config({ path: './config.env' }); + +const logger = require('./utils/logger'); +const errorHandler = require('./middleware/errorHandler'); +const requestValidator = require('./middleware/requestValidator'); +const healthCheck = require('./middleware/healthCheck'); + +// Import routes +const recommendationRoutes = require('./routes/recommendations'); +const healthRoutes = require('./routes/health'); + +const app = express(); +const PORT = process.env.PORT || 8010; +const HOST = process.env.HOST || '0.0.0.0'; + +// Security middleware +app.use(helmet({ + contentSecurityPolicy: { + directives: { + defaultSrc: ["'self'"], + styleSrc: ["'self'", "'unsafe-inline'"], + scriptSrc: ["'self'"], + imgSrc: ["'self'", "data:", "https:"], + }, + }, +})); + +// CORS configuration +app.use(cors({ + origin: process.env.CORS_ORIGIN || '*', + credentials: process.env.CORS_CREDENTIALS === 'true', + methods: ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'], + allowedHeaders: ['Content-Type', 'Authorization', 'X-API-Key'] +})); + +// Compression middleware +app.use(compression()); + +// Logging middleware +app.use(morgan('combined', { + stream: { + write: (message) => logger.info(message.trim()) + } +})); + +// Rate limiting +const limiter = rateLimit({ + windowMs: parseInt(process.env.RATE_LIMIT_WINDOW_MS) || 15 * 60 * 1000, // 15 minutes + max: parseInt(process.env.RATE_LIMIT_MAX_REQUESTS) || 100, // limit each IP to 100 requests per windowMs + message: { + error: 'Too many requests from this IP, please try again later.', + retryAfter: Math.ceil((parseInt(process.env.RATE_LIMIT_WINDOW_MS) || 15 * 60 * 1000) / 1000) + }, + standardHeaders: true, + legacyHeaders: false, +}); + +app.use(limiter); + +// Body parsing middleware +app.use(express.json({ limit: '10mb' })); +app.use(express.urlencoded({ extended: true, limit: '10mb' })); + +// Request validation middleware +app.use(requestValidator); + +// Health check middleware +app.use(healthCheck); + +// Routes +app.use('/api/recommendations', recommendationRoutes); +app.use('/health', healthRoutes); + +// Root endpoint +app.get('/', (req, res) => { + res.json({ + message: 'Unison - Unified Tech Stack Recommendation Service', + version: '1.0.0', + status: 'operational', + timestamp: new Date().toISOString(), + endpoints: { + health: '/health', + recommendations: '/api/recommendations', + unified: '/api/recommendations/unified' + }, + services: { + techStackSelector: process.env.TECH_STACK_SELECTOR_URL || 'http://pipeline_tech_stack_selector:8002', + templateManager: process.env.TEMPLATE_MANAGER_URL || 'http://pipeline_template_manager:8009' + } + }); +}); + +// 404 handler +app.use('*', (req, res) => { + res.status(404).json({ + error: 'Not Found', + message: `Route ${req.originalUrl} not found`, + availableEndpoints: [ + 'GET /', + 'GET /health', + 'POST /api/recommendations/unified' + ] + }); +}); + +// Error handling middleware (must be last) +app.use(errorHandler); + +// Start server +const server = app.listen(PORT, HOST, () => { + logger.info(`🚀 Unison service started on ${HOST}:${PORT}`); + logger.info(`📊 Environment: ${process.env.NODE_ENV || 'development'}`); + logger.info(`🔗 Tech Stack Selector: ${process.env.TECH_STACK_SELECTOR_URL || 'http://pipeline_tech_stack_selector:8002'}`); + logger.info(`🔗 Template Manager: ${process.env.TEMPLATE_MANAGER_URL || 'http://pipeline_template_manager:8009'}`); +}); + +// Graceful shutdown +process.on('SIGTERM', () => { + logger.info('SIGTERM received, shutting down gracefully'); + server.close(() => { + logger.info('Process terminated'); + process.exit(0); + }); +}); + +process.on('SIGINT', () => { + logger.info('SIGINT received, shutting down gracefully'); + server.close(() => { + logger.info('Process terminated'); + process.exit(0); + }); +}); + +module.exports = app; diff --git a/services/unison/src/middleware/errorHandler.js b/services/unison/src/middleware/errorHandler.js new file mode 100644 index 0000000..77d3d24 --- /dev/null +++ b/services/unison/src/middleware/errorHandler.js @@ -0,0 +1,72 @@ +const logger = require('../utils/logger'); + +const errorHandler = (err, req, res, next) => { + let error = { ...err }; + error.message = err.message; + + // Log error + logger.error({ + message: err.message, + stack: err.stack, + url: req.originalUrl, + method: req.method, + ip: req.ip, + userAgent: req.get('User-Agent') + }); + + // Mongoose bad ObjectId + if (err.name === 'CastError') { + const message = 'Resource not found'; + error = { message, statusCode: 404 }; + } + + // Mongoose duplicate key + if (err.code === 11000) { + const message = 'Duplicate field value entered'; + error = { message, statusCode: 400 }; + } + + // Mongoose validation error + if (err.name === 'ValidationError') { + const message = Object.values(err.errors).map(val => val.message).join(', '); + error = { message, statusCode: 400 }; + } + + // JWT errors + if (err.name === 'JsonWebTokenError') { + const message = 'Invalid token'; + error = { message, statusCode: 401 }; + } + + if (err.name === 'TokenExpiredError') { + const message = 'Token expired'; + error = { message, statusCode: 401 }; + } + + // Axios errors + if (err.isAxiosError) { + const message = `External service error: ${err.response?.data?.message || err.message}`; + const statusCode = err.response?.status || 500; + error = { message, statusCode }; + } + + // Joi validation errors + if (err.isJoi) { + const message = err.details.map(detail => detail.message).join(', '); + error = { message, statusCode: 400 }; + } + + // AJV validation errors + if (err.name === 'ValidationError' && err.errors) { + const message = err.errors.map(e => `${e.instancePath || 'root'}: ${e.message}`).join(', '); + error = { message, statusCode: 400 }; + } + + res.status(error.statusCode || 500).json({ + success: false, + error: error.message || 'Server Error', + ...(process.env.NODE_ENV === 'development' && { stack: err.stack }) + }); +}; + +module.exports = errorHandler; diff --git a/services/unison/src/middleware/healthCheck.js b/services/unison/src/middleware/healthCheck.js new file mode 100644 index 0000000..af8d6fd --- /dev/null +++ b/services/unison/src/middleware/healthCheck.js @@ -0,0 +1,60 @@ +const axios = require('axios'); +const logger = require('../utils/logger'); + +// Health check middleware +const healthCheck = async (req, res, next) => { + // Skip health check for actual health endpoint + if (req.path === '/health') { + return next(); + } + + // Check external services health + const externalServices = { + techStackSelector: process.env.TECH_STACK_SELECTOR_HEALTH_URL || 'http://tech-stack-selector:8002/health', + templateManager: process.env.TEMPLATE_MANAGER_HEALTH_URL || 'http://template-manager:8009/health' + }; + + const healthStatus = { + unison: 'healthy', + externalServices: {}, + timestamp: new Date().toISOString() + }; + + // Check each external service + for (const [serviceName, url] of Object.entries(externalServices)) { + try { + const response = await axios.get(url, { + timeout: parseInt(process.env.HEALTH_CHECK_TIMEOUT) || 5000, + headers: { + 'User-Agent': 'Unison-HealthCheck/1.0' + } + }); + + healthStatus.externalServices[serviceName] = { + status: 'healthy', + responseTime: response.headers['x-response-time'] || 'unknown', + lastChecked: new Date().toISOString() + }; + } catch (error) { + logger.warn({ + message: `External service ${serviceName} health check failed`, + service: serviceName, + url: url, + error: error.message + }); + + healthStatus.externalServices[serviceName] = { + status: 'unhealthy', + error: error.message, + lastChecked: new Date().toISOString() + }; + } + } + + // Store health status in request for use in routes + req.healthStatus = healthStatus; + + next(); +}; + +module.exports = healthCheck; diff --git a/services/unison/src/middleware/requestValidator.js b/services/unison/src/middleware/requestValidator.js new file mode 100644 index 0000000..61bbe2a --- /dev/null +++ b/services/unison/src/middleware/requestValidator.js @@ -0,0 +1,45 @@ +const Joi = require('joi'); +const logger = require('../utils/logger'); + +// Request validation middleware +const requestValidator = (req, res, next) => { + // Skip validation for health checks and root endpoint + if (req.path === '/health' || req.path === '/') { + return next(); + } + + // Validate request body for POST/PUT requests + if (['POST', 'PUT', 'PATCH'].includes(req.method) && req.body) { + // Basic validation for unified recommendation request - simplified + if (req.path.includes('/unified')) { + const schema = Joi.object({ + domain: Joi.string().min(1).max(100).optional(), + budget: Joi.number().positive().optional(), + preferredTechnologies: Joi.array().items(Joi.string().min(1).max(50)).optional(), + templateId: Joi.string().uuid().optional(), + includeSimilar: Joi.boolean().optional(), + includeKeywords: Joi.boolean().optional(), + forceRefresh: Joi.boolean().optional() + }); + + const { error } = schema.validate(req.body); + if (error) { + logger.warn({ + message: 'Request validation failed', + error: error.details[0].message, + body: req.body, + path: req.path + }); + return res.status(400).json({ + success: false, + error: 'Invalid request data', + details: error.details[0].message + }); + } + } + } + + next(); +}; + +module.exports = requestValidator; diff --git a/services/unison/src/routes/health.js b/services/unison/src/routes/health.js new file mode 100644 index 0000000..1c4bb80 --- /dev/null +++ b/services/unison/src/routes/health.js @@ -0,0 +1,160 @@ +const express = require('express'); +const axios = require('axios'); +const DatabaseService = require('../services/databaseService'); +const logger = require('../utils/logger'); + +// Create database service instance +const databaseService = new DatabaseService(); + +const router = express.Router(); + +// Health check endpoint +router.get('/', async (req, res) => { + try { + const startTime = Date.now(); + + // Check external services + const externalServices = { + techStackSelector: process.env.TECH_STACK_SELECTOR_HEALTH_URL || 'http://tech-stack-selector:8002/health', + templateManager: process.env.TEMPLATE_MANAGER_HEALTH_URL || 'http://template-manager:8009/health' + }; + + const healthChecks = {}; + let allHealthy = true; + + // Check database health + const databaseHealthy = await databaseService.isHealthy(); + if (!databaseHealthy) { + allHealthy = false; + } + + // Check each external service + for (const [serviceName, url] of Object.entries(externalServices)) { + try { + const serviceStartTime = Date.now(); + const response = await axios.get(url, { + timeout: parseInt(process.env.HEALTH_CHECK_TIMEOUT) || 5000, + headers: { + 'User-Agent': 'Unison-HealthCheck/1.0' + } + }); + + const responseTime = Date.now() - serviceStartTime; + + healthChecks[serviceName] = { + status: 'healthy', + responseTime: `${responseTime}ms`, + statusCode: response.status, + lastChecked: new Date().toISOString(), + data: response.data + }; + } catch (error) { + allHealthy = false; + healthChecks[serviceName] = { + status: 'unhealthy', + error: error.message, + statusCode: error.response?.status || 'timeout', + lastChecked: new Date().toISOString() + }; + + logger.warn({ + message: `External service ${serviceName} health check failed`, + service: serviceName, + url: url, + error: error.message, + statusCode: error.response?.status + }); + } + } + + const totalResponseTime = Date.now() - startTime; + const overallStatus = allHealthy ? 'healthy' : 'degraded'; + + const healthResponse = { + status: overallStatus, + service: 'unison', + version: '1.0.0', + timestamp: new Date().toISOString(), + uptime: process.uptime(), + responseTime: `${totalResponseTime}ms`, + environment: process.env.NODE_ENV || 'development', + memory: { + used: Math.round(process.memoryUsage().heapUsed / 1024 / 1024) + ' MB', + total: Math.round(process.memoryUsage().heapTotal / 1024 / 1024) + ' MB', + external: Math.round(process.memoryUsage().external / 1024 / 1024) + ' MB' + }, + externalServices: healthChecks, + database: { + status: databaseHealthy ? 'healthy' : 'unhealthy', + type: 'PostgreSQL' + }, + features: { + unifiedRecommendations: true, + techStackSelector: healthChecks.techStackSelector?.status === 'healthy', + templateManager: healthChecks.templateManager?.status === 'healthy', + claudeAI: !!process.env.CLAUDE_API_KEY, + databaseStorage: databaseHealthy + } + }; + + const statusCode = allHealthy ? 200 : 503; + res.status(statusCode).json(healthResponse); + + } catch (error) { + logger.error({ + message: 'Health check failed', + error: error.message, + stack: error.stack + }); + + res.status(500).json({ + status: 'unhealthy', + service: 'unison', + error: 'Health check failed', + message: error.message, + timestamp: new Date().toISOString() + }); + } +}); + +// Detailed health check with more information +router.get('/detailed', async (req, res) => { + try { + const detailedHealth = { + status: 'healthy', + service: 'unison', + version: '1.0.0', + timestamp: new Date().toISOString(), + uptime: process.uptime(), + environment: process.env.NODE_ENV || 'development', + nodeVersion: process.version, + platform: process.platform, + architecture: process.arch, + memory: process.memoryUsage(), + cpu: process.cpuUsage(), + pid: process.pid, + config: { + port: process.env.PORT || 8010, + host: process.env.HOST || '0.0.0.0', + techStackSelectorUrl: process.env.TECH_STACK_SELECTOR_URL, + templateManagerUrl: process.env.TEMPLATE_MANAGER_URL, + claudeApiKey: process.env.CLAUDE_API_KEY ? 'configured' : 'not configured' + } + }; + + res.json(detailedHealth); + } catch (error) { + logger.error({ + message: 'Detailed health check failed', + error: error.message + }); + + res.status(500).json({ + status: 'unhealthy', + error: 'Detailed health check failed', + message: error.message + }); + } +}); + +module.exports = router; diff --git a/services/unison/src/routes/recommendations.js b/services/unison/src/routes/recommendations.js new file mode 100644 index 0000000..1c12e32 --- /dev/null +++ b/services/unison/src/routes/recommendations.js @@ -0,0 +1,601 @@ +const express = require('express'); +const techStackService = require('../services/techStackService'); +const templateService = require('../services/templateService'); +const claudeService = require('../services/claudeService'); +const DatabaseService = require('../services/databaseService'); +const schemaValidator = require('../utils/schemaValidator'); +const logger = require('../utils/logger'); +const { v4: uuidv4 } = require('uuid'); + +// Create database service instance +const databaseService = new DatabaseService(); + +const router = express.Router(); + +/** + * POST /api/recommendations/unified + * Get unified tech stack recommendation combining both services + */ +router.post('/unified', async (req, res) => { + try { + const startTime = Date.now(); + + // Extract request parameters with defaults + const { + domain = 'general', + budget = 5000, + preferredTechnologies = [], + templateId, + includeSimilar = false, + includeKeywords = false, + forceRefresh = false + } = req.body; + + logger.info({ + message: 'Processing unified recommendation request', + domain, + budget, + preferredTechnologies, + templateId, + includeSimilar, + includeKeywords, + forceRefresh + }); + + + // Use default values if not provided + const techStackRequest = { domain, budget, preferredTechnologies }; + const techStackValidation = schemaValidator.validateTechStackRequest(techStackRequest); + if (!techStackValidation.valid) { + return res.status(400).json({ + success: false, + error: 'Invalid tech stack request parameters', + details: techStackValidation.errors + }); + } + + // If templateId is provided, validate it + if (templateId) { + const templateRequest = { templateId, includeSimilar, includeKeywords, forceRefresh }; + const templateValidation = schemaValidator.validateTemplateRequest(templateRequest); + if (!templateValidation.valid) { + return res.status(400).json({ + success: false, + error: 'Invalid template request parameters', + details: templateValidation.errors + }); + } + } + + // Fetch recommendations from services + const promises = []; + + // Always fetch from tech-stack-selector (domain + budget based) + promises.push( + techStackService.getRecommendations({ + domain, + budget, + preferredTechnologies + }).catch(error => { + logger.error({ + message: 'Failed to fetch from tech-stack-selector', + error: error.message + }); + return { success: false, error: error.message, source: 'tech-stack-selector' }; + }) + ); + + // Fetch from template-manager if templateId is provided + if (templateId) { + promises.push( + templateService.getAIRecommendations(templateId, { forceRefresh }) + .catch(error => { + logger.error({ + message: 'Failed to fetch from template-manager', + error: error.message, + templateId + }); + return { success: false, error: error.message, source: 'template-manager' }; + }) + ); + } else { + // If no templateId, provide a default template recommendation + promises.push(Promise.resolve({ + success: true, + data: { + stack_name: 'Default General Purpose Stack', + monthly_cost: 100, + setup_cost: 2000, + team_size: '2-3', + development_time: 4, + satisfaction: 85, + success_rate: 80, + frontend: 'React', + backend: 'Node.js', + database: 'PostgreSQL', + cloud: 'AWS', + testing: 'Jest', + mobile: 'React Native', + devops: 'Docker', + ai_ml: 'Not specified', + recommendation_score: 85.0 + }, + source: 'template-manager-default' + })); + } + + const [techStackResult, templateResult] = await Promise.all(promises); + + // Check if we have at least one successful recommendation + if (!techStackResult.success && !templateResult.success) { + return res.status(500).json({ + success: false, + error: 'Failed to fetch recommendations from both services', + details: { + techStackError: techStackResult.error, + templateError: templateResult.error + } + }); + } + + // Both services must succeed for unified recommendations + if (!techStackResult.success || !templateResult.success) { + return res.status(500).json({ + success: false, + error: 'Both services are required for unified recommendations', + message: 'Both tech-stack-selector and template-manager must be available for unified recommendations', + processingTime: Date.now() - startTime, + services: { + techStackSelector: techStackResult.success ? 'available' : 'unavailable', + templateManager: templateResult.success ? 'available' : 'unavailable' + } + }); + } + + // Both services returned recommendations - use Claude to unify them + if (!claudeService.isAvailable()) { + return res.status(500).json({ + success: false, + error: 'Claude AI service is required for unified recommendations', + message: 'Claude AI is not available. Unified recommendations require Claude AI to process both tech-stack and template recommendations.', + processingTime: Date.now() - startTime, + services: { + techStackSelector: 'available', + templateManager: 'available', + claudeAI: 'unavailable' + } + }); + } + + // Generate unified recommendation using Claude + const claudeResult = await claudeService.generateUnifiedRecommendation( + techStackResult, + templateResult, + { domain, budget, preferredTechnologies, templateId } + ); + + // Log Claude AI response for debugging + logger.info({ + message: 'Claude AI response received', + claudeResponse: claudeResult.data, + claudeModel: claudeResult.claudeModel + }); + + // Validate the unified recommendation + const validation = schemaValidator.validateUnifiedRecommendation(claudeResult.data); + if (!validation.valid) { + logger.warn({ + message: 'Claude generated invalid recommendation, using tech-stack-selector as fallback', + validationErrors: validation.errors, + claudeResponse: claudeResult.data + }); + + return res.json({ + success: true, + data: techStackResult.data, + source: 'tech-stack-selector (fallback)', + message: 'Claude generated invalid recommendation, using tech-stack-selector as fallback', + processingTime: Date.now() - startTime, + services: { + techStackSelector: 'available', + templateManager: 'available', + claudeAI: 'invalid_output' + } + }); + } + + logger.info({ + message: 'Successfully generated unified recommendation', + stackName: claudeResult.data.stack_name, + recommendationScore: claudeResult.data.recommendation_score, + processingTime: Date.now() - startTime + }); + + // Store recommendation in database + const requestId = uuidv4(); + const processingTime = Date.now() - startTime; + + // Use a default template ID if none provided (represents "no template" case) + const templateIdForStorage = templateId || '00000000-0000-0000-0000-000000000000'; + + try { + const storageResult = await databaseService.storeRecommendation({ + requestId, + domain, + budget, + preferredTechnologies, + templateId: templateIdForStorage, + stackName: claudeResult.data.stack_name, + monthlyCost: claudeResult.data.monthly_cost, + setupCost: claudeResult.data.setup_cost, + teamSize: claudeResult.data.team_size, + developmentTime: claudeResult.data.development_time, + satisfaction: claudeResult.data.satisfaction, + successRate: claudeResult.data.success_rate, + frontend: claudeResult.data.frontend, + backend: claudeResult.data.backend, + database: claudeResult.data.database, + cloud: claudeResult.data.cloud, + testing: claudeResult.data.testing, + mobile: claudeResult.data.mobile, + devops: claudeResult.data.devops, + aiMl: claudeResult.data.ai_ml, + recommendedTool: claudeResult.data.recommended_tool, + recommendationScore: claudeResult.data.recommendation_score, + message: claudeResult.data.message, + claudeModel: claudeResult.claudeModel, + processingTime + }); + + if (storageResult.success) { + logger.info(`Recommendation stored in database with ID: ${storageResult.id}`); + } else { + logger.warn(`Failed to store recommendation in database: ${storageResult.error}`); + } + } catch (storageError) { + logger.error('Error storing recommendation in database:', storageError); + // Don't fail the request if storage fails + } + + res.json({ + success: true, + data: claudeResult.data, + source: 'unified', + message: 'Unified recommendation generated successfully', + processingTime, + services: { + techStackSelector: 'available', + templateManager: 'available', + claudeAI: 'available' + }, + claudeModel: claudeResult.claudeModel, + requestId, // Include request ID for tracking + templateId: templateId || null // Show original templateId (null if not provided) + }); + + } catch (error) { + logger.error({ + message: 'Error processing unified recommendation request', + error: error.message, + stack: error.stack, + body: req.body + }); + + res.status(500).json({ + success: false, + error: 'Internal server error', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/tech-stack + * Get recommendations from tech-stack-selector only + */ +router.get('/tech-stack', async (req, res) => { + try { + const { domain, budget, preferredTechnologies } = req.query; + + // Convert string parameters to appropriate types + const params = { + domain: domain || undefined, + budget: budget ? parseFloat(budget) : undefined, + preferredTechnologies: preferredTechnologies ? preferredTechnologies.split(',') : undefined + }; + + // Remove undefined values + Object.keys(params).forEach(key => { + if (params[key] === undefined) { + delete params[key]; + } + }); + + const result = await techStackService.getRecommendations(params); + + res.json({ + success: true, + data: result.data, + source: 'tech-stack-selector', + message: 'Tech stack recommendations retrieved successfully' + }); + + } catch (error) { + logger.error({ + message: 'Error fetching tech stack recommendations', + error: error.message, + query: req.query + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch tech stack recommendations', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/template/:templateId + * Get recommendations from template-manager only + */ +router.get('/template/:templateId', async (req, res) => { + try { + const { templateId } = req.params; + const { force_refresh } = req.query; + + // Validate UUID format + const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i; + if (!uuidRegex.test(templateId)) { + return res.status(400).json({ + success: false, + error: 'Invalid template ID format', + message: 'Template ID must be a valid UUID format', + providedId: templateId + }); + } + + const result = await templateService.getAIRecommendations(templateId, { + forceRefresh: force_refresh === 'true' + }); + + res.json({ + success: true, + data: result.data, + source: 'template-manager', + message: 'Template recommendations retrieved successfully' + }); + + } catch (error) { + logger.error({ + message: 'Error fetching template recommendations', + error: error.message, + templateId: req.params.templateId + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch template recommendations', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/endpoints + * Get available API endpoints + */ +router.get('/endpoints', (req, res) => { + res.json({ + success: true, + data: { + endpoints: [ + { + method: 'POST', + path: '/api/recommendations/unified', + description: 'Get unified tech stack recommendation combining both services', + parameters: { + domain: 'string (optional, default: "general")', + budget: 'number (optional, default: 5000)', + preferredTechnologies: 'array (optional, default: [])', + templateId: 'string (optional, UUID format)', + includeSimilar: 'boolean (optional, default: false)', + includeKeywords: 'boolean (optional, default: false)', + forceRefresh: 'boolean (optional, default: false)' + } + }, + { + method: 'GET', + path: '/api/recommendations/tech-stack', + description: 'Get recommendations from tech-stack-selector only', + parameters: { + domain: 'string (required)', + budget: 'number (required)', + preferredTechnologies: 'array (optional)' + } + }, + { + method: 'GET', + path: '/api/recommendations/template/:templateId', + description: 'Get recommendations from template-manager only', + parameters: { + templateId: 'string (required, UUID format)', + force_refresh: 'boolean (optional, query parameter)' + } + }, + { + method: 'GET', + path: '/api/recommendations/stored', + description: 'Get stored recommendations from database', + parameters: { + limit: 'number (optional, default: 10)', + domain: 'string (optional, filter by domain)', + templateId: 'string (optional, filter by template ID)' + } + }, + { + method: 'GET', + path: '/api/recommendations/stored/:id', + description: 'Get specific stored recommendation by ID', + parameters: { + id: 'string (required, UUID format)' + } + }, + { + method: 'GET', + path: '/api/recommendations/stats', + description: 'Get recommendation statistics', + parameters: 'none' + }, + { + method: 'GET', + path: '/api/recommendations/schemas', + description: 'Get available validation schemas', + parameters: 'none' + } + ] + }, + message: 'Available API endpoints' + }); +}); + +/** + * GET /api/recommendations/schemas + * Get available validation schemas + */ +router.get('/schemas', (req, res) => { + try { + const schemas = schemaValidator.getAvailableSchemas(); + const schemaDefinitions = {}; + + schemas.forEach(schemaName => { + schemaDefinitions[schemaName] = schemaValidator.getSchema(schemaName); + }); + + res.json({ + success: true, + data: { + availableSchemas: schemas, + schemas: schemaDefinitions + }, + message: 'Available schemas retrieved successfully' + }); + + } catch (error) { + logger.error({ + message: 'Error fetching schemas', + error: error.message + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch schemas', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/stored + * Get stored recommendations with optional filtering + */ +router.get('/stored', async (req, res) => { + try { + const { domain, templateId, limit = 20 } = req.query; + + let recommendations; + if (domain) { + recommendations = await databaseService.getRecommendationsByDomain(domain, parseInt(limit)); + } else if (templateId) { + recommendations = await databaseService.getRecommendationsByTemplateId(templateId, parseInt(limit)); + } else { + recommendations = await databaseService.getRecentRecommendations(parseInt(limit)); + } + + res.json({ + success: true, + data: recommendations, + count: recommendations.length, + filters: { domain, templateId, limit: parseInt(limit) } + }); + + } catch (error) { + logger.error({ + message: 'Error fetching stored recommendations', + error: error.message, + query: req.query + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch stored recommendations', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/stored/:id + * Get a specific stored recommendation by ID + */ +router.get('/stored/:id', async (req, res) => { + try { + const { id } = req.params; + const recommendation = await databaseService.getRecommendationById(id); + + if (!recommendation) { + return res.status(404).json({ + success: false, + error: 'Recommendation not found', + message: `No recommendation found with ID: ${id}` + }); + } + + res.json({ + success: true, + data: recommendation + }); + + } catch (error) { + logger.error({ + message: 'Error fetching recommendation by ID', + error: error.message, + id: req.params.id + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch recommendation', + message: error.message + }); + } +}); + +/** + * GET /api/recommendations/stats + * Get statistics about stored recommendations + */ +router.get('/stats', async (req, res) => { + try { + const stats = await databaseService.getRecommendationStats(); + + res.json({ + success: true, + data: stats + }); + + } catch (error) { + logger.error({ + message: 'Error fetching recommendation stats', + error: error.message + }); + + res.status(500).json({ + success: false, + error: 'Failed to fetch recommendation statistics', + message: error.message + }); + } +}); + +module.exports = router; diff --git a/services/unison/src/services/claudeService.js b/services/unison/src/services/claudeService.js new file mode 100644 index 0000000..ed1df82 --- /dev/null +++ b/services/unison/src/services/claudeService.js @@ -0,0 +1,248 @@ +const axios = require('axios'); +const logger = require('../utils/logger'); + +class ClaudeService { + constructor() { + this.apiKey = process.env.CLAUDE_API_KEY; + this.model = process.env.CLAUDE_MODEL || 'claude-3-5-sonnet-20241022'; + this.maxTokens = parseInt(process.env.CLAUDE_MAX_TOKENS) || 4000; + this.timeout = parseInt(process.env.REQUEST_TIMEOUT) || 30000; + + if (!this.apiKey) { + logger.warn('Claude API key not configured. Claude integration will be disabled.'); + } + } + + /** + * Generate unified recommendation using Claude AI + * @param {Object} techStackRecommendation - Recommendation from tech-stack-selector + * @param {Object} templateRecommendation - Recommendation from template-manager + * @param {Object} requestParams - Original request parameters + * @returns {Promise} Unified recommendation + */ + async generateUnifiedRecommendation(techStackRecommendation, templateRecommendation, requestParams) { + if (!this.apiKey) { + throw new Error('Claude API key not configured'); + } + + try { + logger.info({ + message: 'Generating unified recommendation using Claude AI', + techStackSource: techStackRecommendation.source, + templateSource: templateRecommendation.source + }); + + const prompt = this.buildPrompt(techStackRecommendation, templateRecommendation, requestParams); + + const response = await axios.post( + 'https://api.anthropic.com/v1/messages', + { + model: this.model, + max_tokens: this.maxTokens, + messages: [ + { + role: 'user', + content: prompt + } + ] + }, + { + timeout: this.timeout, + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.apiKey, + 'anthropic-version': '2023-06-01', + 'User-Agent': 'Unison-Service/1.0' + } + } + ); + + const claudeResponse = response.data.content[0].text; + + // Parse Claude's response + const unifiedRecommendation = this.parseClaudeResponse(claudeResponse); + + logger.info({ + message: 'Successfully generated unified recommendation using Claude AI', + stackName: unifiedRecommendation.stack_name, + recommendationScore: unifiedRecommendation.recommendation_score + }); + + return { + success: true, + data: unifiedRecommendation, + source: 'claude-ai', + claudeModel: this.model + }; + + } catch (error) { + logger.error({ + message: 'Failed to generate unified recommendation using Claude AI', + error: error.message, + techStackSource: techStackRecommendation.source, + templateSource: templateRecommendation.source + }); + + throw new Error(`Claude AI service error: ${error.message}`); + } + } + + /** + * Build the prompt for Claude AI + * @param {Object} techStackRecommendation - Recommendation from tech-stack-selector + * @param {Object} templateRecommendation - Recommendation from template-manager + * @param {Object} requestParams - Original request parameters + * @returns {string} Formatted prompt + */ + buildPrompt(techStackRecommendation, templateRecommendation, requestParams) { + return `You are an expert tech stack architect. I need you to analyze two different tech stack recommendations and create a single, optimized recommendation that balances cost, domain requirements, and template-feature compatibility. + +## Original Request Parameters: +- Domain: ${requestParams.domain || 'Not specified'} +- Budget: $${requestParams.budget || 'Not specified'} +- Preferred Technologies: ${requestParams.preferredTechnologies ? requestParams.preferredTechnologies.join(', ') : 'Not specified'} +- Template ID: ${requestParams.templateId || 'Not specified'} + +## Tech Stack Selector Recommendation: +${JSON.stringify(techStackRecommendation.data, null, 2)} + +## Template Manager Recommendation: +${JSON.stringify(templateRecommendation.data, null, 2)} + +## Your Task: +Analyze both recommendations and create a single, optimized tech stack recommendation that: +1. Balances cost-effectiveness with the budget constraint +2. Matches the domain requirements +3. Incorporates the best features from the template recommendation +4. Considers the preferred technologies when possible +5. Provides realistic team size, development time, and success metrics + +## Required Output Format: +You MUST respond with ONLY a valid JSON object that matches this EXACT schema. Do NOT include any other text or formatting: + +{ + "stack_name": "string (descriptive name for the tech stack)", + "monthly_cost": number (monthly operational cost in USD), + "setup_cost": number (one-time setup cost in USD), + "team_size": "string (e.g., '1-2', '3-5', '6-10')", + "development_time": number (weeks to complete, 1-52), + "satisfaction": number (0-100, user satisfaction score), + "success_rate": number (0-100, project success rate), + "frontend": "string (specific frontend technology like 'React.js', 'Vue.js', 'Angular')", + "backend": "string (specific backend technology like 'Node.js', 'Django', 'Spring Boot')", + "database": "string (specific database like 'PostgreSQL', 'MongoDB', 'MySQL')", + "cloud": "string (specific cloud platform like 'AWS', 'DigitalOcean', 'Azure')", + "testing": "string (specific testing framework like 'Jest', 'pytest', 'Cypress')", + "mobile": "string (mobile technology like 'React Native', 'Flutter', 'Ionic' or 'None')", + "devops": "string (devops tool like 'Docker', 'GitHub Actions', 'Jenkins')", + "ai_ml": "string (AI/ML technology like 'TensorFlow', 'scikit-learn', 'PyTorch' or 'None')", + "recommended_tool": "string (primary recommended tool like 'Stripe', 'Firebase', 'Vercel')", + "recommendation_score": number (0-100, overall recommendation score), + "message": "string (brief explanation of the recommendation, max 500 characters)" +} + +## Important Notes: +- The JSON must be valid and complete +- All numeric values should be realistic +- The recommendation should be practical and implementable +- Consider the budget constraints carefully +- Balance between cost and quality +- Include reasoning in the message field + +Respond with ONLY the JSON object, no additional text or formatting.`; + } + + /** + * Parse Claude's response and validate it + * @param {string} claudeResponse - Raw response from Claude + * @returns {Object} Parsed and validated recommendation + */ + parseClaudeResponse(claudeResponse) { + try { + // Extract JSON from the response (in case there's extra text) + const jsonMatch = claudeResponse.match(/\{[\s\S]*\}/); + if (!jsonMatch) { + throw new Error('No JSON found in Claude response'); + } + + const parsedResponse = JSON.parse(jsonMatch[0]); + + // Validate required fields + const requiredFields = [ + 'stack_name', 'monthly_cost', 'setup_cost', 'team_size', 'development_time', + 'satisfaction', 'success_rate', 'frontend', 'backend', 'database', 'cloud', + 'testing', 'mobile', 'devops', 'ai_ml', 'recommended_tool', 'recommendation_score', 'message' + ]; + + const missingFields = requiredFields.filter(field => !(field in parsedResponse)); + if (missingFields.length > 0) { + throw new Error(`Missing required fields: ${missingFields.join(', ')}`); + } + + // Validate numeric ranges + const numericValidations = { + monthly_cost: { min: 0, max: 10000 }, + setup_cost: { min: 0, max: 50000 }, + development_time: { min: 1, max: 52 }, + satisfaction: { min: 0, max: 100 }, + success_rate: { min: 0, max: 100 }, + recommendation_score: { min: 0, max: 100 } + }; + + for (const [field, range] of Object.entries(numericValidations)) { + const value = parsedResponse[field]; + if (typeof value !== 'number' || value < range.min || value > range.max) { + throw new Error(`Invalid ${field}: ${value}. Must be a number between ${range.min} and ${range.max}`); + } + } + + // Validate string fields + const stringFields = ['stack_name', 'team_size', 'frontend', 'backend', 'database', 'cloud', 'testing', 'mobile', 'devops', 'ai_ml', 'recommended_tool', 'message']; + for (const field of stringFields) { + if (typeof parsedResponse[field] !== 'string' || parsedResponse[field].trim().length === 0) { + throw new Error(`Invalid ${field}: must be a non-empty string`); + } + } + + logger.info({ + message: 'Successfully parsed and validated Claude response', + stackName: parsedResponse.stack_name, + recommendationScore: parsedResponse.recommendation_score + }); + + return parsedResponse; + + } catch (error) { + logger.error({ + message: 'Failed to parse Claude response', + error: error.message, + claudeResponse: claudeResponse.substring(0, 500) + '...' + }); + + throw new Error(`Failed to parse Claude response: ${error.message}`); + } + } + + /** + * Check if Claude service is available + * @returns {boolean} Service availability + */ + isAvailable() { + return !!this.apiKey; + } + + /** + * Get service configuration + * @returns {Object} Service configuration + */ + getConfig() { + return { + available: this.isAvailable(), + model: this.model, + maxTokens: this.maxTokens, + timeout: this.timeout + }; + } +} + +module.exports = new ClaudeService(); diff --git a/services/unison/src/services/databaseService.js b/services/unison/src/services/databaseService.js new file mode 100644 index 0000000..4b1d0c1 --- /dev/null +++ b/services/unison/src/services/databaseService.js @@ -0,0 +1,271 @@ +const { Pool } = require('pg'); +const logger = require('../utils/logger'); + +class DatabaseService { + constructor() { + this.pool = new Pool({ + host: process.env.POSTGRES_HOST || 'postgres', + port: process.env.POSTGRES_PORT || 5432, + database: process.env.POSTGRES_DB || 'dev_pipeline', + user: process.env.POSTGRES_USER || 'pipeline_admin', + password: process.env.POSTGRES_PASSWORD || 'secure_pipeline_2024', + max: 20, + idleTimeoutMillis: 30000, + connectionTimeoutMillis: 2000, + }); + + this.initializeDatabase(); + } + + async initializeDatabase() { + try { + // Wait a bit for database to be ready + await new Promise(resolve => setTimeout(resolve, 2000)); + await this.createRecommendationsTable(); + logger.info('Database service initialized successfully'); + } catch (error) { + logger.error('Failed to initialize database service:', error); + // Don't throw error, just log it + } + } + + async createRecommendationsTable() { + const createTableQuery = ` + CREATE TABLE IF NOT EXISTS claude_recommendations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + request_id VARCHAR(255) UNIQUE NOT NULL, + domain VARCHAR(100) NOT NULL, + budget DECIMAL(10,2) NOT NULL, + preferred_technologies TEXT[], + template_id UUID, + stack_name VARCHAR(255) NOT NULL, + monthly_cost DECIMAL(10,2) NOT NULL, + setup_cost DECIMAL(10,2) NOT NULL, + team_size VARCHAR(50) NOT NULL, + development_time INTEGER NOT NULL, + satisfaction INTEGER NOT NULL CHECK (satisfaction >= 0 AND satisfaction <= 100), + success_rate INTEGER NOT NULL CHECK (success_rate >= 0 AND success_rate <= 100), + frontend VARCHAR(100) NOT NULL, + backend VARCHAR(100) NOT NULL, + database VARCHAR(100) NOT NULL, + cloud VARCHAR(100) NOT NULL, + testing VARCHAR(100) NOT NULL, + mobile VARCHAR(100), + devops VARCHAR(100) NOT NULL, + ai_ml VARCHAR(100), + recommended_tool VARCHAR(100) NOT NULL, + recommendation_score DECIMAL(5,2) NOT NULL CHECK (recommendation_score >= 0 AND recommendation_score <= 100), + message TEXT NOT NULL, + claude_model VARCHAR(100) NOT NULL, + processing_time INTEGER NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP + ); + `; + + const createIndexQuery = ` + CREATE INDEX IF NOT EXISTS idx_claude_recommendations_domain ON claude_recommendations(domain); + CREATE INDEX IF NOT EXISTS idx_claude_recommendations_budget ON claude_recommendations(budget); + CREATE INDEX IF NOT EXISTS idx_claude_recommendations_template_id ON claude_recommendations(template_id); + CREATE INDEX IF NOT EXISTS idx_claude_recommendations_created_at ON claude_recommendations(created_at); + `; + + try { + await this.pool.query(createTableQuery); + await this.pool.query(createIndexQuery); + logger.info('Claude recommendations table created successfully'); + } catch (error) { + logger.error('Error creating recommendations table:', error); + throw error; + } + } + + async storeRecommendation(recommendationData) { + const { + requestId, + domain, + budget, + preferredTechnologies, + templateId, + stackName, + monthlyCost, + setupCost, + teamSize, + developmentTime, + satisfaction, + successRate, + frontend, + backend, + database, + cloud, + testing, + mobile, + devops, + aiMl, + recommendedTool, + recommendationScore, + message, + claudeModel, + processingTime + } = recommendationData; + + const insertQuery = ` + INSERT INTO claude_recommendations ( + request_id, domain, budget, preferred_technologies, template_id, + stack_name, monthly_cost, setup_cost, team_size, development_time, + satisfaction, success_rate, frontend, backend, database, cloud, + testing, mobile, devops, ai_ml, recommended_tool, recommendation_score, + message, claude_model, processing_time + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, + $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, + $21, $22, $23, $24, $25 + ) + RETURNING id, created_at; + `; + + const values = [ + requestId, + domain, + budget, + preferredTechnologies || [], + templateId, + stackName, + monthlyCost, + setupCost, + teamSize, + developmentTime, + satisfaction, + successRate, + frontend, + backend, + database, + cloud, + testing, + mobile || null, + devops, + aiMl || null, + recommendedTool, + recommendationScore, + message, + claudeModel, + processingTime + ]; + + try { + const result = await this.pool.query(insertQuery, values); + logger.info(`Recommendation stored successfully with ID: ${result.rows[0].id}`); + return { + success: true, + id: result.rows[0].id, + createdAt: result.rows[0].created_at + }; + } catch (error) { + logger.error('Error storing recommendation:', error); + return { + success: false, + error: error.message + }; + } + } + + async getRecommendationById(id) { + const query = 'SELECT * FROM claude_recommendations WHERE id = $1'; + try { + const result = await this.pool.query(query, [id]); + return result.rows[0] || null; + } catch (error) { + logger.error('Error fetching recommendation by ID:', error); + return null; + } + } + + async getRecommendationsByDomain(domain, limit = 10) { + const query = ` + SELECT * FROM claude_recommendations + WHERE domain = $1 + ORDER BY created_at DESC + LIMIT $2 + `; + try { + const result = await this.pool.query(query, [domain, limit]); + return result.rows; + } catch (error) { + logger.error('Error fetching recommendations by domain:', error); + return []; + } + } + + async getRecommendationsByTemplateId(templateId, limit = 10) { + const query = ` + SELECT * FROM claude_recommendations + WHERE template_id = $1 + ORDER BY created_at DESC + LIMIT $2 + `; + try { + const result = await this.pool.query(query, [templateId, limit]); + return result.rows; + } catch (error) { + logger.error('Error fetching recommendations by template ID:', error); + return []; + } + } + + async getRecentRecommendations(limit = 20) { + const query = ` + SELECT * FROM claude_recommendations + ORDER BY created_at DESC + LIMIT $1 + `; + try { + const result = await this.pool.query(query, [limit]); + return result.rows; + } catch (error) { + logger.error('Error fetching recent recommendations:', error); + return []; + } + } + + async getRecommendationStats() { + const query = ` + SELECT + COUNT(*) as total_recommendations, + COUNT(DISTINCT domain) as unique_domains, + COUNT(DISTINCT template_id) as unique_templates, + AVG(recommendation_score) as avg_score, + AVG(processing_time) as avg_processing_time, + MIN(created_at) as first_recommendation, + MAX(created_at) as last_recommendation + FROM claude_recommendations + `; + try { + const result = await this.pool.query(query); + return result.rows[0]; + } catch (error) { + logger.error('Error fetching recommendation stats:', error); + return null; + } + } + + async isHealthy() { + try { + await this.pool.query('SELECT 1'); + return true; + } catch (error) { + logger.error('Database health check failed:', error); + return false; + } + } + + async close() { + try { + await this.pool.end(); + logger.info('Database connection pool closed'); + } catch (error) { + logger.error('Error closing database connection:', error); + } + } +} + +module.exports = DatabaseService; diff --git a/services/unison/src/services/techStackService.js b/services/unison/src/services/techStackService.js new file mode 100644 index 0000000..95f9f46 --- /dev/null +++ b/services/unison/src/services/techStackService.js @@ -0,0 +1,210 @@ +const axios = require('axios'); +const logger = require('../utils/logger'); + +class TechStackService { + constructor() { + this.baseURL = process.env.TECH_STACK_SELECTOR_URL || 'http://pipeline_tech_stack_selector:8002'; + this.timeout = parseInt(process.env.REQUEST_TIMEOUT) || 30000; + } + + /** + * Get tech stack recommendations from tech-stack-selector service + * @param {Object} params - Request parameters + * @param {string} params.domain - Domain for recommendations + * @param {number} params.budget - Budget constraint + * @param {Array} params.preferredTechnologies - Preferred technologies + * @returns {Promise} Recommendations from tech-stack-selector + */ + async getRecommendations({ domain, budget, preferredTechnologies }) { + try { + logger.info({ + message: 'Fetching recommendations from tech-stack-selector', + domain, + budget, + preferredTechnologies + }); + + const requestData = { + domain, + budget, + preferredTechnologies + }; + + // Remove undefined values + Object.keys(requestData).forEach(key => { + if (requestData[key] === undefined) { + delete requestData[key]; + } + }); + + const response = await axios.post( + `${this.baseURL}/recommend/best`, + requestData, + { + timeout: this.timeout, + headers: { + 'Content-Type': 'application/json', + 'User-Agent': 'Unison-Service/1.0' + } + } + ); + + if (response.data.success) { + logger.info({ + message: 'Successfully fetched recommendations from tech-stack-selector', + count: response.data.count, + budget: response.data.budget, + domain: response.data.domain + }); + + return { + success: true, + data: response.data, + source: 'tech-stack-selector' + }; + } else { + throw new Error(`Tech-stack-selector returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch recommendations from tech-stack-selector', + error: error.message, + domain, + budget, + preferredTechnologies + }); + + throw new Error(`Tech-stack-selector service error: ${error.message}`); + } + } + + /** + * Get price tiers from tech-stack-selector service + * @returns {Promise} Price tiers data + */ + async getPriceTiers() { + try { + logger.info('Fetching price tiers from tech-stack-selector'); + + const response = await axios.get( + `${this.baseURL}/api/price-tiers`, + { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0' + } + } + ); + + if (response.data.success) { + logger.info({ + message: 'Successfully fetched price tiers from tech-stack-selector', + count: response.data.count + }); + + return { + success: true, + data: response.data, + source: 'tech-stack-selector' + }; + } else { + throw new Error(`Tech-stack-selector returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch price tiers from tech-stack-selector', + error: error.message + }); + + throw new Error(`Tech-stack-selector service error: ${error.message}`); + } + } + + /** + * Get technologies by tier from tech-stack-selector service + * @param {string} tierName - Name of the price tier + * @returns {Promise} Technologies for the tier + */ + async getTechnologiesByTier(tierName) { + try { + logger.info({ + message: 'Fetching technologies by tier from tech-stack-selector', + tierName + }); + + const response = await axios.get( + `${this.baseURL}/api/technologies/${encodeURIComponent(tierName)}`, + { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0' + } + } + ); + + if (response.data.success) { + logger.info({ + message: 'Successfully fetched technologies by tier from tech-stack-selector', + tierName, + count: response.data.count + }); + + return { + success: true, + data: response.data, + source: 'tech-stack-selector' + }; + } else { + throw new Error(`Tech-stack-selector returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch technologies by tier from tech-stack-selector', + error: error.message, + tierName + }); + + throw new Error(`Tech-stack-selector service error: ${error.message}`); + } + } + + /** + * Check health of tech-stack-selector service + * @returns {Promise} Health status + */ + async checkHealth() { + try { + const response = await axios.get( + `${this.baseURL}/health`, + { + timeout: parseInt(process.env.HEALTH_CHECK_TIMEOUT) || 5000, + headers: { + 'User-Agent': 'Unison-HealthCheck/1.0' + } + } + ); + + return { + status: 'healthy', + data: response.data, + responseTime: response.headers['x-response-time'] || 'unknown' + }; + + } catch (error) { + logger.warn({ + message: 'Tech-stack-selector health check failed', + error: error.message + }); + + return { + status: 'unhealthy', + error: error.message + }; + } + } +} + +module.exports = new TechStackService(); diff --git a/services/unison/src/services/templateService.js b/services/unison/src/services/templateService.js new file mode 100644 index 0000000..c5ec68a --- /dev/null +++ b/services/unison/src/services/templateService.js @@ -0,0 +1,307 @@ +const axios = require('axios'); +const logger = require('../utils/logger'); + +class TemplateService { + constructor() { + this.baseURL = process.env.TEMPLATE_MANAGER_URL || 'http://pipeline_template_manager:8009'; + this.aiURL = process.env.TEMPLATE_MANAGER_AI_URL || 'http://pipeline_template_manager:8013'; + this.timeout = parseInt(process.env.REQUEST_TIMEOUT) || 30000; + } + + /** + * Get template by ID from template-manager service + * @param {string} templateId - Template ID + * @returns {Promise} Template data + */ + async getTemplate(templateId) { + try { + logger.info({ + message: 'Fetching template from template-manager', + templateId + }); + + const response = await axios.get( + `${this.baseURL}/api/templates/${templateId}`, + { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0' + } + } + ); + + if (response.data.success) { + logger.info({ + message: 'Successfully fetched template from template-manager', + templateId, + templateName: response.data.data?.name || 'Unknown' + }); + + return { + success: true, + data: response.data.data, + source: 'template-manager' + }; + } else { + throw new Error(`Template-manager returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch template from template-manager', + error: error.message, + templateId + }); + + throw new Error(`Template-manager service error: ${error.message}`); + } + } + + /** + * Get AI recommendations for a template + * @param {string} templateId - Template ID + * @param {Object} options - Request options + * @param {boolean} options.forceRefresh - Force refresh recommendations + * @returns {Promise} AI recommendations + */ + async getAIRecommendations(templateId, options = {}) { + try { + logger.info({ + message: 'Fetching AI recommendations from template-manager', + templateId, + options + }); + + const requestData = { + template_id: templateId + }; + + if (options.forceRefresh) { + requestData.force_refresh = true; + } + + const url = `${this.aiURL}/ai/recommendations`; + + const response = await axios.post(url, requestData, { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0', + 'Content-Type': 'application/json' + } + }); + + // AI service returns data directly (not wrapped in success object) + if (response.data && response.data.stack_name) { + logger.info({ + message: 'Successfully fetched AI recommendations from template-manager', + templateId, + stackName: response.data.stack_name || 'Unknown' + }); + + return { + success: true, + data: response.data, + source: 'template-manager-ai' + }; + } else { + throw new Error(`Template-manager AI returned invalid data: ${JSON.stringify(response.data)}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch AI recommendations from template-manager', + error: error.message, + templateId, + options + }); + + throw new Error(`Template-manager AI service error: ${error.message}`); + } + } + + /** + * Select template with additional options + * @param {string} templateId - Template ID + * @param {Object} options - Selection options + * @param {boolean} options.includeSimilar - Include similar templates + * @param {boolean} options.includeKeywords - Include keywords + * @returns {Promise} Template selection data + */ + async selectTemplate(templateId, options = {}) { + try { + logger.info({ + message: 'Selecting template from template-manager', + templateId, + options + }); + + const queryParams = new URLSearchParams(); + if (options.includeSimilar) { + queryParams.append('include_similar', 'true'); + } + if (options.includeKeywords) { + queryParams.append('include_keywords', 'true'); + } + + const url = `${this.baseURL}/api/templates/${templateId}/select${queryParams.toString() ? '?' + queryParams.toString() : ''}`; + + const response = await axios.get(url, { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0' + } + }); + + if (response.data.success) { + logger.info({ + message: 'Successfully selected template from template-manager', + templateId + }); + + return { + success: true, + data: response.data.data, + source: 'template-manager' + }; + } else { + throw new Error(`Template-manager returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to select template from template-manager', + error: error.message, + templateId, + options + }); + + throw new Error(`Template-manager service error: ${error.message}`); + } + } + + /** + * Get all templates from template-manager service + * @param {Object} options - Query options + * @returns {Promise} Templates list + */ + async getTemplates(options = {}) { + try { + logger.info({ + message: 'Fetching templates from template-manager', + options + }); + + const queryParams = new URLSearchParams(); + Object.keys(options).forEach(key => { + if (options[key] !== undefined) { + queryParams.append(key, options[key]); + } + }); + + const url = `${this.baseURL}/api/templates${queryParams.toString() ? '?' + queryParams.toString() : ''}`; + + const response = await axios.get(url, { + timeout: this.timeout, + headers: { + 'User-Agent': 'Unison-Service/1.0' + } + }); + + if (response.data.success) { + logger.info({ + message: 'Successfully fetched templates from template-manager', + count: response.data.data?.length || 0 + }); + + return { + success: true, + data: response.data.data, + source: 'template-manager' + }; + } else { + throw new Error(`Template-manager returned error: ${response.data.error || 'Unknown error'}`); + } + + } catch (error) { + logger.error({ + message: 'Failed to fetch templates from template-manager', + error: error.message, + options + }); + + throw new Error(`Template-manager service error: ${error.message}`); + } + } + + /** + * Check health of template-manager service + * @returns {Promise} Health status + */ + async checkHealth() { + try { + const response = await axios.get( + `${this.baseURL}/health`, + { + timeout: parseInt(process.env.HEALTH_CHECK_TIMEOUT) || 5000, + headers: { + 'User-Agent': 'Unison-HealthCheck/1.0' + } + } + ); + + return { + status: 'healthy', + data: response.data, + responseTime: response.headers['x-response-time'] || 'unknown' + }; + + } catch (error) { + logger.warn({ + message: 'Template-manager health check failed', + error: error.message + }); + + return { + status: 'unhealthy', + error: error.message + }; + } + } + + /** + * Check health of template-manager AI service + * @returns {Promise} Health status + */ + async checkAIHealth() { + try { + const response = await axios.get( + `${this.aiURL}/health`, + { + timeout: parseInt(process.env.HEALTH_CHECK_TIMEOUT) || 5000, + headers: { + 'User-Agent': 'Unison-HealthCheck/1.0' + } + } + ); + + return { + status: 'healthy', + data: response.data, + responseTime: response.headers['x-response-time'] || 'unknown' + }; + + } catch (error) { + logger.warn({ + message: 'Template-manager AI health check failed', + error: error.message + }); + + return { + status: 'unhealthy', + error: error.message + }; + } + } +} + +module.exports = new TemplateService(); diff --git a/services/unison/src/utils/logger.js b/services/unison/src/utils/logger.js new file mode 100644 index 0000000..03fa793 --- /dev/null +++ b/services/unison/src/utils/logger.js @@ -0,0 +1,63 @@ +const winston = require('winston'); +const path = require('path'); + +// Create logs directory if it doesn't exist +const fs = require('fs'); +const logDir = path.join(__dirname, '../../logs'); +if (!fs.existsSync(logDir)) { + fs.mkdirSync(logDir, { recursive: true }); +} + +// Define log format +const logFormat = winston.format.combine( + winston.format.timestamp({ + format: 'YYYY-MM-DD HH:mm:ss' + }), + winston.format.errors({ stack: true }), + winston.format.json(), + winston.format.prettyPrint() +); + +// Create logger instance +const logger = winston.createLogger({ + level: process.env.LOG_LEVEL || 'info', + format: logFormat, + defaultMeta: { service: 'unison' }, + transports: [ + // Write all logs with level 'error' and below to error.log + new winston.transports.File({ + filename: path.join(logDir, 'error.log'), + level: 'error', + maxsize: 5242880, // 5MB + maxFiles: 5, + }), + // Write all logs with level 'info' and below to combined.log + new winston.transports.File({ + filename: path.join(logDir, 'combined.log'), + maxsize: 5242880, // 5MB + maxFiles: 5, + }), + ], +}); + +// If we're not in production, log to the console as well +if (process.env.NODE_ENV !== 'production') { + logger.add(new winston.transports.Console({ + format: winston.format.combine( + winston.format.colorize(), + winston.format.simple(), + winston.format.printf(({ timestamp, level, message, ...meta }) => { + return `${timestamp} [${level}]: ${message} ${Object.keys(meta).length ? JSON.stringify(meta, null, 2) : ''}`; + }) + ) + })); +} + +// Create a stream object for Morgan HTTP logging +logger.stream = { + write: (message) => { + logger.info(message.trim()); + } +}; + +module.exports = logger; diff --git a/services/unison/src/utils/schemaValidator.js b/services/unison/src/utils/schemaValidator.js new file mode 100644 index 0000000..db0e8aa --- /dev/null +++ b/services/unison/src/utils/schemaValidator.js @@ -0,0 +1,308 @@ +const Ajv = require('ajv'); +const addFormats = require('ajv-formats'); +const logger = require('./logger'); + +class SchemaValidator { + constructor() { + this.ajv = new Ajv({ + allErrors: true, + verbose: true, + strict: false + }); + addFormats(this.ajv); + + // Define schemas + this.schemas = { + unifiedRecommendation: this.getUnifiedRecommendationSchema(), + techStackRequest: this.getTechStackRequestSchema(), + templateRequest: this.getTemplateRequestSchema() + }; + + // Compile schemas + this.compiledSchemas = {}; + for (const [name, schema] of Object.entries(this.schemas)) { + try { + this.compiledSchemas[name] = this.ajv.compile(schema); + logger.info(`Schema '${name}' compiled successfully`); + } catch (error) { + logger.error(`Failed to compile schema '${name}': ${error.message}`); + } + } + } + + /** + * Get the unified recommendation schema + * @returns {Object} JSON schema for unified recommendations + */ + getUnifiedRecommendationSchema() { + return { + type: 'object', + required: [ + 'stack_name', 'monthly_cost', 'setup_cost', 'team_size', 'development_time', + 'satisfaction', 'success_rate', 'frontend', 'backend', 'database', 'cloud', + 'testing', 'devops', 'recommended_tool', 'recommendation_score', 'message' + ], + properties: { + stack_name: { + type: 'string', + minLength: 1, + maxLength: 100, + description: 'Descriptive name for the tech stack' + }, + monthly_cost: { + type: 'number', + minimum: 0, + maximum: 10000, + description: 'Monthly operational cost in USD' + }, + setup_cost: { + type: 'number', + minimum: 0, + maximum: 50000, + description: 'One-time setup cost in USD' + }, + team_size: { + type: 'string', + pattern: '^[0-9]+-[0-9]+$', + description: 'Team size range (e.g., "1-2", "3-5")' + }, + development_time: { + type: 'number', + minimum: 1, + maximum: 52, + description: 'Development time in weeks' + }, + satisfaction: { + type: 'number', + minimum: 0, + maximum: 100, + description: 'User satisfaction score (0-100)' + }, + success_rate: { + type: 'number', + minimum: 0, + maximum: 100, + description: 'Project success rate (0-100)' + }, + frontend: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Frontend technology' + }, + backend: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Backend technology' + }, + database: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Database technology' + }, + cloud: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Cloud platform' + }, + testing: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Testing framework' + }, + mobile: { + type: 'string', + minLength: 0, + maxLength: 50, + description: 'Mobile technology' + }, + devops: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'DevOps tool' + }, + ai_ml: { + type: 'string', + minLength: 0, + maxLength: 50, + description: 'AI/ML technology' + }, + recommended_tool: { + type: 'string', + minLength: 1, + maxLength: 50, + description: 'Primary recommended tool' + }, + recommendation_score: { + type: 'number', + minimum: 0, + maximum: 100, + description: 'Overall recommendation score (0-100)' + }, + message: { + type: 'string', + minLength: 1, + maxLength: 500, + description: 'Brief explanation of the recommendation' + } + }, + additionalProperties: false + }; + } + + /** + * Get the tech stack request schema + * @returns {Object} JSON schema for tech stack requests + */ + getTechStackRequestSchema() { + return { + type: 'object', + properties: { + domain: { + type: 'string', + minLength: 1, + maxLength: 100, + description: 'Domain for recommendations' + }, + budget: { + type: 'number', + minimum: 0, + maximum: 100000, + description: 'Budget constraint in USD' + }, + preferredTechnologies: { + type: 'array', + items: { + type: 'string', + minLength: 1, + maxLength: 50 + }, + maxItems: 10, + description: 'Preferred technologies' + } + }, + additionalProperties: false + }; + } + + /** + * Get the template request schema + * @returns {Object} JSON schema for template requests + */ + getTemplateRequestSchema() { + return { + type: 'object', + properties: { + templateId: { + type: 'string', + format: 'uuid', + description: 'Template ID' + }, + includeSimilar: { + type: 'boolean', + description: 'Include similar templates' + }, + includeKeywords: { + type: 'boolean', + description: 'Include keywords' + }, + forceRefresh: { + type: 'boolean', + description: 'Force refresh recommendations' + } + }, + additionalProperties: false + }; + } + + /** + * Validate data against a schema + * @param {string} schemaName - Name of the schema + * @param {Object} data - Data to validate + * @returns {Object} Validation result + */ + validate(schemaName, data) { + if (!this.compiledSchemas[schemaName]) { + return { + valid: false, + errors: [`Schema '${schemaName}' not found`] + }; + } + + const valid = this.compiledSchemas[schemaName](data); + + if (valid) { + return { + valid: true, + errors: [] + }; + } else { + const errors = this.compiledSchemas[schemaName].errors.map(error => { + const path = error.instancePath || 'root'; + return `${path}: ${error.message}`; + }); + + logger.warn({ + message: `Schema validation failed for '${schemaName}'`, + errors, + data: JSON.stringify(data, null, 2) + }); + + return { + valid: false, + errors + }; + } + } + + /** + * Validate unified recommendation + * @param {Object} recommendation - Recommendation to validate + * @returns {Object} Validation result + */ + validateUnifiedRecommendation(recommendation) { + return this.validate('unifiedRecommendation', recommendation); + } + + /** + * Validate tech stack request + * @param {Object} request - Request to validate + * @returns {Object} Validation result + */ + validateTechStackRequest(request) { + return this.validate('techStackRequest', request); + } + + /** + * Validate template request + * @param {Object} request - Request to validate + * @returns {Object} Validation result + */ + validateTemplateRequest(request) { + return this.validate('templateRequest', request); + } + + /** + * Get all available schemas + * @returns {Array} List of schema names + */ + getAvailableSchemas() { + return Object.keys(this.schemas); + } + + /** + * Get schema definition + * @param {string} schemaName - Name of the schema + * @returns {Object|null} Schema definition + */ + getSchema(schemaName) { + return this.schemas[schemaName] || null; + } +} + +module.exports = new SchemaValidator(); diff --git a/services/unison/start.sh b/services/unison/start.sh new file mode 100644 index 0000000..28b1ba5 --- /dev/null +++ b/services/unison/start.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +# Unison Service Startup Script +# This script handles the startup of the Unison service with proper error handling and logging + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] ✓${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] ⚠${NC} $1" +} + +log_error() { + echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ✗${NC} $1" +} + +# Configuration +SERVICE_NAME="Unison" +SERVICE_PORT=${PORT:-8010} +SERVICE_HOST=${HOST:-0.0.0.0} +NODE_ENV=${NODE_ENV:-development} +LOG_LEVEL=${LOG_LEVEL:-info} + +# External service URLs (set by docker-compose.yml) +TECH_STACK_SELECTOR_URL=${TECH_STACK_SELECTOR_URL:-http://pipeline_tech_stack_selector:8002} +TEMPLATE_MANAGER_URL=${TEMPLATE_MANAGER_URL:-http://pipeline_template_manager:8009} +TEMPLATE_MANAGER_AI_URL=${TEMPLATE_MANAGER_AI_URL:-http://pipeline_template_manager:8013} + +# Health check URLs (set by docker-compose.yml) +TECH_STACK_SELECTOR_HEALTH_URL=${TECH_STACK_SELECTOR_HEALTH_URL:-http://pipeline_tech_stack_selector:8002/health} +TEMPLATE_MANAGER_HEALTH_URL=${TEMPLATE_MANAGER_HEALTH_URL:-http://pipeline_template_manager:8009/health} + +# Timeouts +REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-30000} +HEALTH_CHECK_TIMEOUT=${HEALTH_CHECK_TIMEOUT:-5000} + +# Create logs directory +mkdir -p logs + +# Load environment variables from config.env if it exists +if [ -f "config.env" ]; then + echo "Loading environment variables from config.env..." + export $(cat config.env | grep -v '^#' | xargs) +fi + +# Function to check if a service is healthy +check_service_health() { + local service_name=$1 + local health_url=$2 + local timeout=${3:-5000} + + log "Checking health of $service_name at $health_url..." + + if curl -f -s --max-time $((timeout / 1000)) "$health_url" > /dev/null 2>&1; then + log_success "$service_name is healthy" + return 0 + else + log_warning "$service_name is not responding" + return 1 + fi +} + +# Function to wait for external services +wait_for_services() { + log "Waiting for external services to be available..." + + local max_attempts=30 + local attempt=1 + + while [ $attempt -le $max_attempts ]; do + log "Attempt $attempt/$max_attempts: Checking external services..." + + local tech_stack_healthy=false + local template_manager_healthy=false + + if check_service_health "Tech Stack Selector" "$TECH_STACK_SELECTOR_HEALTH_URL" "$HEALTH_CHECK_TIMEOUT"; then + tech_stack_healthy=true + fi + + if check_service_health "Template Manager" "$TEMPLATE_MANAGER_HEALTH_URL" "$HEALTH_CHECK_TIMEOUT"; then + template_manager_healthy=true + fi + + if [ "$tech_stack_healthy" = true ] && [ "$template_manager_healthy" = true ]; then + log_success "All external services are healthy" + return 0 + fi + + log_warning "Some services are not ready yet. Waiting 10 seconds..." + sleep 10 + attempt=$((attempt + 1)) + done + + log_error "Timeout waiting for external services after $max_attempts attempts" + log_warning "Starting service anyway - it will handle service unavailability gracefully" + return 1 +} + +# Function to validate environment +validate_environment() { + log "Validating environment configuration..." + + # Check Node.js version + if ! command -v node &> /dev/null; then + log_error "Node.js is not installed" + exit 1 + fi + + local node_version=$(node --version) + log_success "Node.js version: $node_version" + + # Check if package.json exists + if [ ! -f "package.json" ]; then + log_error "package.json not found" + exit 1 + fi + + # Check if node_modules exists + if [ ! -d "node_modules" ]; then + log_warning "node_modules not found. Installing dependencies..." + npm install + fi + + # Check if source directory exists + if [ ! -d "src" ]; then + log_error "Source directory 'src' not found" + exit 1 + fi + + # Check if main app file exists + if [ ! -f "src/app.js" ]; then + log_error "Main application file 'src/app.js' not found" + exit 1 + fi + + log_success "Environment validation passed" +} + +# Function to start the service +start_service() { + log "Starting $SERVICE_NAME service..." + + # Set environment variables + export NODE_ENV + export PORT=$SERVICE_PORT + export HOST=$SERVICE_HOST + export LOG_LEVEL + export TECH_STACK_SELECTOR_URL + export TEMPLATE_MANAGER_URL + export TEMPLATE_MANAGER_AI_URL + export TECH_STACK_SELECTOR_HEALTH_URL + export TEMPLATE_MANAGER_HEALTH_URL + export REQUEST_TIMEOUT + export HEALTH_CHECK_TIMEOUT + + # Log configuration + log "Configuration:" + log " Service: $SERVICE_NAME" + log " Port: $SERVICE_PORT" + log " Host: $SERVICE_HOST" + log " Environment: $NODE_ENV" + log " Log Level: $LOG_LEVEL" + log " Tech Stack Selector: $TECH_STACK_SELECTOR_URL" + log " Template Manager: $TEMPLATE_MANAGER_URL" + log " Template Manager AI: $TEMPLATE_MANAGER_AI_URL" + + # Start the service + log "Starting Node.js application..." + exec node src/app.js +} + +# Function to handle graceful shutdown +cleanup() { + log "Received shutdown signal. Cleaning up..." + log_success "$SERVICE_NAME service stopped gracefully" + exit 0 +} + +# Set up signal handlers +trap cleanup SIGTERM SIGINT + +# Main execution +main() { + log "Starting $SERVICE_NAME service initialization..." + + # Validate environment + validate_environment + + # Wait for external services (non-blocking) + wait_for_services || true + + # Start the service + start_service +} + +# Run main function +main "$@" diff --git a/services/unison/unison_api.json b/services/unison/unison_api.json new file mode 100644 index 0000000..d30dbcc --- /dev/null +++ b/services/unison/unison_api.json @@ -0,0 +1,647 @@ +{ + "info": { + "name": "Unison - Unified Tech Stack Recommendation Service", + "_postman_id": "unison-api-complete-2025", + "description": "Complete API collection for Unison service - unified tech stack and template recommendations", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "variable": [ + { + "key": "baseUrl", + "value": "http://localhost:8010", + "type": "string", + "description": "Base URL for Unison service" + }, + { + "key": "templateId", + "value": "123e4567-e89b-12d3-a456-426614174000", + "type": "string", + "description": "Sample template ID for testing" + }, + { + "key": "recommendationId", + "value": "", + "type": "string", + "description": "Store recommendation ID from unified request" + } + ], + "item": [ + { + "name": "Service Health & Info", + "item": [ + { + "name": "Root Endpoint - Service Info", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/", + "host": ["{{baseUrl}}"] + } + }, + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Response has service info\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('message');", + " pm.expect(jsonData).to.have.property('version');", + " pm.expect(jsonData).to.have.property('status');", + "});" + ] + } + } + ] + }, + { + "name": "Health Check", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/health", + "host": ["{{baseUrl}}"], + "path": ["health"] + } + }, + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Health check responds\", function () {", + " pm.response.to.have.status.oneOf([200, 503]);", + "});", + "", + "pm.test(\"Has health status\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('status');", + " pm.expect(jsonData).to.have.property('service', 'unison');", + "});" + ] + } + } + ] + }, + { + "name": "Detailed Health Check", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/health/detailed", + "host": ["{{baseUrl}}"], + "path": ["health", "detailed"] + } + } + } + ] + }, + { + "name": "Unified Recommendations", + "item": [ + { + "name": "Unified - Domain Only (Basic)", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"healthcare\",\n \"budget\": 10000\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + }, + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Response has recommendation data\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success', true);", + " pm.expect(jsonData).to.have.property('data');", + " pm.expect(jsonData.data).to.have.property('stack_name');", + "});", + "", + "pm.test(\"Save request ID\", function () {", + " const jsonData = pm.response.json();", + " if (jsonData.requestId) {", + " pm.collectionVariables.set('recommendationId', jsonData.requestId);", + " }", + "});" + ] + } + } + ] + }, + { + "name": "Unified - With Template ID", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"ecommerce\",\n \"budget\": 15000,\n \"templateId\": \"{{templateId}}\",\n \"preferredTechnologies\": [\"React\", \"Node.js\"],\n \"includeSimilar\": true,\n \"includeKeywords\": true,\n \"forceRefresh\": false\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + }, + { + "name": "Unified - Full Parameters", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"fintech\",\n \"budget\": 25000,\n \"templateId\": \"{{templateId}}\",\n \"preferredTechnologies\": [\"React\", \"Python\", \"PostgreSQL\"],\n \"includeSimilar\": true,\n \"includeKeywords\": true,\n \"forceRefresh\": true\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + }, + { + "name": "Unified - Minimal Request", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + } + ] + }, + { + "name": "Individual Service Recommendations", + "item": [ + { + "name": "Tech Stack Only - Basic", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/tech-stack?domain=healthcare&budget=10000", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "tech-stack"], + "query": [ + { + "key": "domain", + "value": "healthcare" + }, + { + "key": "budget", + "value": "10000" + } + ] + } + } + }, + { + "name": "Tech Stack Only - With Preferences", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/tech-stack?domain=ecommerce&budget=15000&preferredTechnologies=React,Node.js,PostgreSQL", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "tech-stack"], + "query": [ + { + "key": "domain", + "value": "ecommerce" + }, + { + "key": "budget", + "value": "15000" + }, + { + "key": "preferredTechnologies", + "value": "React,Node.js,PostgreSQL" + } + ] + } + } + }, + { + "name": "Template Only - Basic", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/template/{{templateId}}", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "template", "{{templateId}}"] + } + } + }, + { + "name": "Template Only - Force Refresh", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/template/{{templateId}}?force_refresh=true", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "template", "{{templateId}}"], + "query": [ + { + "key": "force_refresh", + "value": "true" + } + ] + } + } + } + ] + }, + { + "name": "Stored Recommendations", + "item": [ + { + "name": "Get Recent Recommendations", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/stored?limit=10", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "stored"], + "query": [ + { + "key": "limit", + "value": "10" + } + ] + } + } + }, + { + "name": "Get Recommendations by Domain", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/stored?domain=healthcare&limit=5", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "stored"], + "query": [ + { + "key": "domain", + "value": "healthcare" + }, + { + "key": "limit", + "value": "5" + } + ] + } + } + }, + { + "name": "Get Recommendations by Template ID", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/stored?templateId={{templateId}}&limit=5", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "stored"], + "query": [ + { + "key": "templateId", + "value": "{{templateId}}" + }, + { + "key": "limit", + "value": "5" + } + ] + } + } + }, + { + "name": "Get Specific Recommendation by ID", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/stored/{{recommendationId}}", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "stored", "{{recommendationId}}"] + } + } + }, + { + "name": "Get Recommendation Statistics", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/stats", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "stats"] + } + } + } + ] + }, + { + "name": "Schemas & Validation", + "item": [ + { + "name": "Get Available Schemas", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/schemas", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "schemas"] + } + } + } + ] + }, + { + "name": "Error Testing", + "item": [ + { + "name": "Invalid Template ID", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/recommendations/template/invalid-uuid", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "template", "invalid-uuid"] + } + }, + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Should return error for invalid UUID\", function () {", + " pm.response.to.have.status.oneOf([400, 500]);", + "});", + "", + "pm.test(\"Error response has success false\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('success', false);", + "});" + ] + } + } + ] + }, + { + "name": "Invalid Unified Request", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"budget\": \"invalid-budget\",\n \"preferredTechnologies\": \"not-an-array\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + }, + { + "name": "404 Test - Invalid Route", + "request": { + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/api/nonexistent-endpoint", + "host": ["{{baseUrl}}"], + "path": ["api", "nonexistent-endpoint"] + } + }, + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 404\", function () {", + " pm.response.to.have.status(404);", + "});", + "", + "pm.test(\"Has error message\", function () {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData).to.have.property('error');", + "});" + ] + } + } + ] + } + ] + }, + { + "name": "Load Testing Scenarios", + "item": [ + { + "name": "Multiple Domains Test", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"{{$randomArrayElement(['healthcare', 'ecommerce', 'fintech', 'education', 'gaming'])}}\",\n \"budget\": {{$randomInt}}\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + }, + { + "name": "Concurrent Request Simulation", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"domain\": \"stress-test\",\n \"budget\": 5000,\n \"preferredTechnologies\": [\"React\", \"Node.js\"]\n}" + }, + "url": { + "raw": "{{baseUrl}}/api/recommendations/unified", + "host": ["{{baseUrl}}"], + "path": ["api", "recommendations", "unified"] + } + } + } + ] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "type": "text/javascript", + "exec": [ + "// Log request details", + "console.log('Making request to:', pm.request.url.toString());" + ] + } + }, + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Global test - log response time", + "const responseTime = pm.response.responseTime;", + "console.log('Response time:', responseTime + 'ms');", + "", + "// Global test - check for valid JSON", + "pm.test('Response is valid JSON', function () {", + " pm.response.to.be.json;", + "});" + ] + } + } + ] + } \ No newline at end of file diff --git a/services/user-auth/src/migrations/001_user_auth_schema.sql b/services/user-auth/src/migrations/001_user_auth_schema.sql index d3b594d..060fe05 100644 --- a/services/user-auth/src/migrations/001_user_auth_schema.sql +++ b/services/user-auth/src/migrations/001_user_auth_schema.sql @@ -1,17 +1,12 @@ -- User Authentication Database Schema -- JWT-based authentication with user preferences for template features --- Drop tables if they exist (for development) -DROP TABLE IF EXISTS user_feature_preferences CASCADE; -DROP TABLE IF EXISTS user_sessions CASCADE; -DROP TABLE IF EXISTS refresh_tokens CASCADE; -DROP TABLE IF EXISTS users CASCADE; -DROP TABLE IF EXISTS user_projects CASCADE; +-- Create tables only if they don't exist (production-safe) -- Users table - Core user accounts -CREATE TABLE users ( +CREATE TABLE IF NOT EXISTS users ( id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), username VARCHAR(50) NOT NULL UNIQUE, email VARCHAR(255) NOT NULL UNIQUE, @@ -27,7 +22,7 @@ CREATE TABLE users ( ); -- Refresh tokens table -CREATE TABLE refresh_tokens ( +CREATE TABLE IF NOT EXISTS refresh_tokens ( id UUID PRIMARY KEY, user_id UUID REFERENCES users(id) ON DELETE CASCADE, token_hash VARCHAR(255) NOT NULL, @@ -38,7 +33,7 @@ CREATE TABLE refresh_tokens ( ); -- User sessions table -CREATE TABLE user_sessions ( +CREATE TABLE IF NOT EXISTS user_sessions ( id UUID PRIMARY KEY, user_id UUID REFERENCES users(id) ON DELETE CASCADE, session_token VARCHAR(255) UNIQUE, @@ -51,7 +46,7 @@ CREATE TABLE user_sessions ( expires_at TIMESTAMP DEFAULT NOW() + INTERVAL '30 days' ); -- User feature preferences table -CREATE TABLE user_feature_preferences ( +CREATE TABLE IF NOT EXISTS user_feature_preferences ( id UUID PRIMARY KEY, user_id UUID REFERENCES users(id) ON DELETE CASCADE, template_type VARCHAR(100) NOT NULL, @@ -63,7 +58,7 @@ CREATE TABLE user_feature_preferences ( UNIQUE(user_id, template_type, feature_id, preference_type) ); -- User projects table -CREATE TABLE user_projects ( +CREATE TABLE IF NOT EXISTS user_projects ( id UUID PRIMARY KEY, user_id UUID REFERENCES users(id) ON DELETE CASCADE, project_name VARCHAR(200) NOT NULL,