docker setup done along with add spectotor and approver hndled from backend, dashboard created for metrics
This commit is contained in:
parent
18620235d8
commit
a6bafa8764
538
API_SIMPLIFIED_FORMAT.md
Normal file
538
API_SIMPLIFIED_FORMAT.md
Normal file
@ -0,0 +1,538 @@
|
||||
# Simplified Workflow API - Postman Guide
|
||||
|
||||
## ✅ Updated Simplified Format
|
||||
|
||||
The API has been updated to make workflow creation much simpler. You now only need to provide **email** and **tatHours** for approvers, and **email** for spectators. The backend automatically handles:
|
||||
|
||||
- User lookup/creation from Okta/Azure AD
|
||||
- Fetching user details (name, department, designation)
|
||||
- Auto-generating level names based on designation/department
|
||||
- Auto-detecting final approver (last level)
|
||||
- Proper validation with clear error messages
|
||||
|
||||
---
|
||||
|
||||
## Authentication
|
||||
|
||||
### Login
|
||||
```http
|
||||
POST {{baseUrl}}/auth/login
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"email": "your-email@example.com",
|
||||
"password": "your-password"
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
|
||||
"user": { "userId": "...", "email": "...", ... }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Create Workflow - Simplified Format
|
||||
|
||||
### Example 1: Simple Workflow (JSON)
|
||||
|
||||
**POST** `{{baseUrl}}/workflows`
|
||||
|
||||
**Headers:**
|
||||
```
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer <your_token>
|
||||
```
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Purchase Order Approval - Office Equipment",
|
||||
"description": "Approval needed for purchasing new office equipment including laptops and monitors. Total budget: $50,000",
|
||||
"priority": "STANDARD",
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "manager@royalenfield.com",
|
||||
"tatHours": 24
|
||||
},
|
||||
{
|
||||
"email": "director@royalenfield.com",
|
||||
"tatHours": 48
|
||||
},
|
||||
{
|
||||
"email": "cfo@royalenfield.com",
|
||||
"tatHours": 72
|
||||
}
|
||||
],
|
||||
"spectators": [
|
||||
{
|
||||
"email": "hr@royalenfield.com"
|
||||
},
|
||||
{
|
||||
"email": "finance@royalenfield.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Example 2: Express Priority with Final Approver Flag
|
||||
|
||||
```json
|
||||
{
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Urgent: Server Infrastructure Upgrade",
|
||||
"description": "Critical server infrastructure upgrade required immediately",
|
||||
"priority": "EXPRESS",
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "it-manager@royalenfield.com",
|
||||
"tatHours": 8
|
||||
},
|
||||
{
|
||||
"email": "cto@royalenfield.com",
|
||||
"tatHours": 16,
|
||||
"isFinalApprover": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Example 3: With Custom Level Names
|
||||
|
||||
```json
|
||||
{
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Vendor Contract Approval",
|
||||
"description": "New vendor contract for manufacturing components",
|
||||
"priority": "STANDARD",
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "procurement@royalenfield.com",
|
||||
"tatHours": 24,
|
||||
"levelName": "Procurement Review"
|
||||
},
|
||||
{
|
||||
"email": "legal@royalenfield.com",
|
||||
"tatHours": 48,
|
||||
"levelName": "Legal Compliance"
|
||||
},
|
||||
{
|
||||
"email": "vp@royalenfield.com",
|
||||
"tatHours": 72,
|
||||
"levelName": "Executive Approval",
|
||||
"isFinalApprover": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Example 4: Multipart with Files
|
||||
|
||||
**POST** `{{baseUrl}}/workflows/multipart`
|
||||
|
||||
**Headers:**
|
||||
```
|
||||
Authorization: Bearer <your_token>
|
||||
```
|
||||
|
||||
**Body (form-data):**
|
||||
| Key | Type | Value |
|
||||
|-----|------|-------|
|
||||
| `payload` | Text | `{"templateType":"CUSTOM","title":"Budget Request 2025","description":"Annual budget request","priority":"STANDARD","approvalLevels":[{"email":"finance-manager@royalenfield.com","tatHours":48},{"email":"cfo@royalenfield.com","tatHours":72}]}` |
|
||||
| `files` | File | Select PDF/Excel file(s) |
|
||||
| `category` | Text | `SUPPORTING` |
|
||||
|
||||
---
|
||||
|
||||
## Field Reference
|
||||
|
||||
### Required Fields
|
||||
|
||||
| Field | Type | Description | Example |
|
||||
|-------|------|-------------|---------|
|
||||
| `templateType` | string | Workflow type | `"CUSTOM"` or `"TEMPLATE"` |
|
||||
| `title` | string | Request title (max 500 chars) | `"Purchase Order Approval"` |
|
||||
| `description` | string | Detailed description | `"Approval needed for..."` |
|
||||
| `priority` | string | Request priority | `"STANDARD"` or `"EXPRESS"` |
|
||||
| `approvalLevels` | array | List of approvers (min 1, max 10) | See below |
|
||||
|
||||
### Approval Level Fields
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `email` | string | ✅ Yes | Approver's email address |
|
||||
| `tatHours` | number | ✅ Yes | Turn-around time in hours (positive number) |
|
||||
| `isFinalApprover` | boolean | ❌ No | Explicitly mark as final approver (auto-detected if last level) |
|
||||
| `levelName` | string | ❌ No | Custom level name (auto-generated if not provided) |
|
||||
|
||||
**Auto-generated `levelName` logic:**
|
||||
- If approver has **designation**: `"{Designation} Approval"` (e.g., "Manager Approval")
|
||||
- If approver has **department**: `"{Department} Approval"` (e.g., "Finance Approval")
|
||||
- Otherwise: `"Level {N} Approval"` (e.g., "Level 1 Approval")
|
||||
|
||||
### Spectator Fields
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `email` | string | ✅ Yes | Spectator's email address |
|
||||
|
||||
---
|
||||
|
||||
## Validation & Error Handling
|
||||
|
||||
The backend automatically validates and provides clear error messages:
|
||||
|
||||
### ✅ Successful Response
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"message": "Workflow created successfully",
|
||||
"data": {
|
||||
"requestId": "uuid",
|
||||
"requestNumber": "REQ-2025-12-0001",
|
||||
"title": "...",
|
||||
"status": "PENDING",
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Error: Invalid Email
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to create workflow",
|
||||
"details": "Approver email 'invalid@example.com' not found in organization directory. Please verify the email address."
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Error: Duplicate Email
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to create workflow",
|
||||
"details": "Duplicate approver email found: manager@example.com. Each approver must have a unique email."
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Error: Invalid Initiator
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to create workflow",
|
||||
"details": "Invalid initiator: User with ID '...' not found. Please ensure you are logged in with a valid account."
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Error: Validation Failed
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Validation failed",
|
||||
"details": "approvalLevels.0.email: Valid email is required; approvalLevels.0.tatHours: TAT hours must be positive"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What Happens Behind the Scenes
|
||||
|
||||
When you create a workflow, the backend:
|
||||
|
||||
1. **Validates Initiator**: Ensures the logged-in user exists
|
||||
2. **Enriches Approval Levels**:
|
||||
- Searches for each approver in the local database
|
||||
- If not found, fetches from Okta/Azure AD
|
||||
- Creates user record if they exist in AD but not in DB
|
||||
- Extracts: `userId`, `displayName`, `designation`, `department`
|
||||
- Auto-generates `levelName` if not provided
|
||||
- Auto-detects `isFinalApprover` (last level = true)
|
||||
3. **Enriches Spectators**:
|
||||
- Same lookup/creation process as approvers
|
||||
- Sets default permissions (view + comment, no download)
|
||||
4. **Creates Workflow**:
|
||||
- Saves workflow request
|
||||
- Creates approval levels
|
||||
- Creates participants
|
||||
- Sends notifications
|
||||
- Logs activity
|
||||
|
||||
---
|
||||
|
||||
## Migration from Old Format
|
||||
|
||||
### ❌ Old Format (No Longer Required)
|
||||
```json
|
||||
{
|
||||
"approvalLevels": [
|
||||
{
|
||||
"levelNumber": 1,
|
||||
"levelName": "Manager Approval",
|
||||
"approverId": "uuid-123",
|
||||
"approverEmail": "manager@example.com",
|
||||
"approverName": "John Doe",
|
||||
"tatHours": 24,
|
||||
"isFinalApprover": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### ✅ New Simplified Format
|
||||
```json
|
||||
{
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "manager@example.com",
|
||||
"tatHours": 24
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**The backend handles everything else automatically!**
|
||||
|
||||
---
|
||||
|
||||
## Tips & Best Practices
|
||||
|
||||
1. **Use Valid Email Addresses**: Ensure all approver/spectator emails exist in your Okta/Azure AD
|
||||
2. **TAT Hours**: Set realistic turn-around times based on priority:
|
||||
- STANDARD: 24-72 hours per level
|
||||
- EXPRESS: 8-24 hours per level
|
||||
3. **Final Approver**: Last level is automatically marked as final approver (you can override with `isFinalApprover: true` on any level)
|
||||
4. **Level Names**: Let the system auto-generate based on designation/department, or provide custom names
|
||||
5. **Spectators**: Add users who need visibility but not approval authority
|
||||
6. **Documents**: Use `/multipart` endpoint for file uploads
|
||||
|
||||
---
|
||||
|
||||
## Testing in Postman
|
||||
|
||||
1. **Set Environment Variables**:
|
||||
- `baseUrl`: `http://localhost:5000/api/v1`
|
||||
- `token`: Your auth token from login
|
||||
|
||||
2. **Login First**:
|
||||
- Call `POST /auth/login`
|
||||
- Copy the `token` from response
|
||||
- Set as environment variable
|
||||
|
||||
3. **Create Workflow**:
|
||||
- Use simplified format
|
||||
- Only provide email + tatHours
|
||||
- Backend handles the rest
|
||||
|
||||
4. **Check Response**:
|
||||
- Verify `requestNumber` is generated
|
||||
- Check `approvalLevels` are enriched with user data
|
||||
- Confirm `participants` includes spectators
|
||||
|
||||
---
|
||||
|
||||
## Add Approver/Spectator After Request Creation
|
||||
|
||||
These endpoints allow adding approvers or spectators to an existing request. They follow the same simplified pattern - just provide email, and the backend handles user lookup/creation.
|
||||
|
||||
### Add Approver at Specific Level
|
||||
|
||||
**POST** `{{baseUrl}}/workflows/:requestId/approvers/at-level`
|
||||
|
||||
**Headers:**
|
||||
```
|
||||
Authorization: Bearer <your_token>
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"email": "newapprover@royalenfield.com",
|
||||
"tatHours": 24,
|
||||
"level": 2
|
||||
}
|
||||
```
|
||||
|
||||
**What Happens:**
|
||||
- ✅ Finds user by email in DB, or syncs from Okta/AD if not found
|
||||
- ✅ Auto-generates levelName based on designation/department
|
||||
- ✅ Shifts existing levels if needed
|
||||
- ✅ Updates final approver flag
|
||||
- ✅ Sends notification to new approver
|
||||
- ✅ Logs activity
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"message": "Approver added successfully",
|
||||
"data": {
|
||||
"levelId": "uuid",
|
||||
"levelNumber": 2,
|
||||
"levelName": "Manager Approval",
|
||||
"approverId": "uuid",
|
||||
"approverEmail": "newapprover@royalenfield.com",
|
||||
"approverName": "John Doe",
|
||||
"tatHours": 24,
|
||||
"status": "PENDING"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Add Simple Approver (General)
|
||||
|
||||
**POST** `{{baseUrl}}/workflows/:requestId/participants/approver`
|
||||
|
||||
**Headers:**
|
||||
```
|
||||
Authorization: Bearer <your_token>
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"email": "approver@royalenfield.com"
|
||||
}
|
||||
```
|
||||
|
||||
*Note: This adds them as a general approver participant, not at a specific level.*
|
||||
|
||||
---
|
||||
|
||||
### Add Spectator
|
||||
|
||||
**POST** `{{baseUrl}}/workflows/:requestId/participants/spectator`
|
||||
|
||||
**Headers:**
|
||||
```
|
||||
Authorization: Bearer <your_token>
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"email": "spectator@royalenfield.com"
|
||||
}
|
||||
```
|
||||
|
||||
**What Happens:**
|
||||
- ✅ Finds user by email in DB, or syncs from Okta/AD if not found
|
||||
- ✅ Sets spectator permissions (view + comment, no download)
|
||||
- ✅ Sends notification to new spectator
|
||||
- ✅ Logs activity
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"participantId": "uuid",
|
||||
"userId": "uuid",
|
||||
"userEmail": "spectator@royalenfield.com",
|
||||
"userName": "Jane Doe",
|
||||
"participantType": "SPECTATOR",
|
||||
"canComment": true,
|
||||
"canViewDocuments": true,
|
||||
"canDownloadDocuments": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Error Handling for Add Operations
|
||||
|
||||
**❌ User Not Found in AD:**
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to add approver",
|
||||
"details": "Approver email 'invalid@example.com' not found in organization directory. Please verify the email address."
|
||||
}
|
||||
```
|
||||
|
||||
**❌ User Already a Participant:**
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to add spectator",
|
||||
"details": "User is already a participant in this request"
|
||||
}
|
||||
```
|
||||
|
||||
**❌ Invalid Level:**
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Failed to add approver at level",
|
||||
"details": "Cannot add approver at level 1 - level has already been completed"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Complete Flow Example
|
||||
|
||||
### 1. Login
|
||||
```bash
|
||||
POST /api/v1/auth/login
|
||||
Body: { "email": "user@example.com", "password": "pass" }
|
||||
```
|
||||
|
||||
### 2. Create Workflow (Simplified)
|
||||
```bash
|
||||
POST /api/v1/workflows
|
||||
Body: {
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Purchase Order",
|
||||
"description": "Office equipment",
|
||||
"priority": "STANDARD",
|
||||
"approvalLevels": [
|
||||
{ "email": "manager@example.com", "tatHours": 24 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Add Additional Approver (After Creation)
|
||||
```bash
|
||||
POST /api/v1/workflows/:requestId/approvers/at-level
|
||||
Body: {
|
||||
"email": "director@example.com",
|
||||
"tatHours": 48,
|
||||
"level": 2
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Add Spectator
|
||||
```bash
|
||||
POST /api/v1/workflows/:requestId/participants/spectator
|
||||
Body: { "email": "hr@example.com" }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Need Help?
|
||||
|
||||
If you encounter any issues:
|
||||
1. Check the error message - it will tell you exactly what's wrong
|
||||
2. Verify emails exist in your organization directory
|
||||
3. Ensure you're logged in with a valid token
|
||||
4. Check backend logs for detailed error information
|
||||
|
||||
266
POSTMAN_COLLECTION_UPDATES.md
Normal file
266
POSTMAN_COLLECTION_UPDATES.md
Normal file
@ -0,0 +1,266 @@
|
||||
# Postman Collection Updates - Simplified API
|
||||
|
||||
## ✅ Updated Endpoints
|
||||
|
||||
The Postman collection has been updated to use the **simplified API format**. Here's what changed:
|
||||
|
||||
---
|
||||
|
||||
### **1. Create Workflow (JSON) - Simplified** ✨
|
||||
|
||||
**Old Format (REMOVED):**
|
||||
```json
|
||||
{
|
||||
"requestTitle": "...",
|
||||
"requestDescription": "...",
|
||||
"requestingDepartment": "IT",
|
||||
"requestCategory": "PURCHASE_ORDER",
|
||||
"approvers": [
|
||||
{ "email": "...", "tatHours": 24, "level": 1 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**New Simplified Format:**
|
||||
```json
|
||||
{
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Purchase Order Approval for Office Equipment",
|
||||
"description": "Approval needed for purchasing new office equipment...",
|
||||
"priority": "STANDARD",
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "manager@royalenfield.com",
|
||||
"tatHours": 24
|
||||
},
|
||||
{
|
||||
"email": "director@royalenfield.com",
|
||||
"tatHours": 48
|
||||
},
|
||||
{
|
||||
"email": "cfo@royalenfield.com",
|
||||
"tatHours": 72
|
||||
}
|
||||
],
|
||||
"spectators": [
|
||||
{
|
||||
"email": "hr@royalenfield.com"
|
||||
},
|
||||
{
|
||||
"email": "finance@royalenfield.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**What Backend Does Automatically:**
|
||||
- ✅ Finds/creates users from Okta/AD
|
||||
- ✅ Generates level names from designation/department
|
||||
- ✅ Auto-detects final approver (last level)
|
||||
- ✅ Sets proper permissions
|
||||
|
||||
---
|
||||
|
||||
### **2. Create Workflow (Multipart with Files) - Simplified** ✨
|
||||
|
||||
**Updated Form Data:**
|
||||
| Key | Value |
|
||||
|-----|-------|
|
||||
| `payload` | `{"templateType":"CUSTOM","title":"...","description":"...","priority":"STANDARD","approvalLevels":[{"email":"manager@royalenfield.com","tatHours":24}],"spectators":[{"email":"hr@royalenfield.com"}]}` |
|
||||
| `files` | Select file(s) |
|
||||
| `category` | `SUPPORTING` (optional) |
|
||||
|
||||
**Changes:**
|
||||
- ❌ Removed: `requestTitle`, `requestDescription`, `requestingDepartment`, `requestCategory`
|
||||
- ❌ Removed: Complex approver format with level numbers
|
||||
- ✅ Added: Single `payload` field with simplified JSON
|
||||
- ✅ Simplified: Only `email` and `tatHours` per approver
|
||||
|
||||
---
|
||||
|
||||
### **3. Add Approver at Level - Simplified** 🆕
|
||||
|
||||
**NEW Endpoint Added!**
|
||||
|
||||
**Method:** `POST`
|
||||
**URL:** `{{baseUrl}}/workflows/:id/approvers/at-level`
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"email": "newapprover@royalenfield.com",
|
||||
"tatHours": 24,
|
||||
"level": 2
|
||||
}
|
||||
```
|
||||
|
||||
**What Backend Does:**
|
||||
- ✅ Finds/creates user from Okta/AD
|
||||
- ✅ Generates smart level name
|
||||
- ✅ Shifts existing levels if needed
|
||||
- ✅ Updates final approver flag
|
||||
- ✅ Sends notifications
|
||||
|
||||
---
|
||||
|
||||
### **4. Add Spectator - Simplified** 🆕
|
||||
|
||||
**NEW Endpoint Added!**
|
||||
|
||||
**Method:** `POST`
|
||||
**URL:** `{{baseUrl}}/workflows/:id/participants/spectator`
|
||||
|
||||
**Body:**
|
||||
```json
|
||||
{
|
||||
"email": "spectator@royalenfield.com"
|
||||
}
|
||||
```
|
||||
|
||||
**What Backend Does:**
|
||||
- ✅ Finds/creates user from Okta/AD
|
||||
- ✅ Sets spectator permissions (view + comment)
|
||||
- ✅ Sends notification
|
||||
|
||||
---
|
||||
|
||||
## 📋 Complete Workflow Example
|
||||
|
||||
### Step 1: Login
|
||||
```http
|
||||
POST {{baseUrl}}/auth/login
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"email": "user@royalenfield.com",
|
||||
"password": "your-password"
|
||||
}
|
||||
```
|
||||
|
||||
**Response:** Save the `token` from response
|
||||
|
||||
---
|
||||
|
||||
### Step 2: Create Workflow (Simplified)
|
||||
```http
|
||||
POST {{baseUrl}}/workflows
|
||||
Authorization: Bearer <token>
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"templateType": "CUSTOM",
|
||||
"title": "Purchase Order - Office Equipment",
|
||||
"description": "Approval for office equipment purchase",
|
||||
"priority": "STANDARD",
|
||||
"approvalLevels": [
|
||||
{
|
||||
"email": "manager@royalenfield.com",
|
||||
"tatHours": 24
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Response:** Save the `requestId` or `requestNumber`
|
||||
|
||||
---
|
||||
|
||||
### Step 3: Add Additional Approver
|
||||
```http
|
||||
POST {{baseUrl}}/workflows/REQ-2024-0001/approvers/at-level
|
||||
Authorization: Bearer <token>
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"email": "director@royalenfield.com",
|
||||
"tatHours": 48,
|
||||
"level": 2
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Step 4: Add Spectator
|
||||
```http
|
||||
POST {{baseUrl}}/workflows/REQ-2024-0001/participants/spectator
|
||||
Authorization: Bearer <token>
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"email": "hr@royalenfield.com"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Key Benefits
|
||||
|
||||
### Before (Old Format):
|
||||
- ❌ Required user IDs, names manually
|
||||
- ❌ Complex payload structure
|
||||
- ❌ Manual level naming
|
||||
- ❌ Manual final approver detection
|
||||
|
||||
### After (New Simplified Format):
|
||||
- ✅ Only email required
|
||||
- ✅ Simple, clean JSON
|
||||
- ✅ Auto-generated level names
|
||||
- ✅ Auto-detected final approver
|
||||
- ✅ Auto user creation from Okta/AD
|
||||
- ✅ Clear error messages
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Environment Variables
|
||||
|
||||
Make sure to set these in Postman:
|
||||
|
||||
| Variable | Value | Example |
|
||||
|----------|-------|---------|
|
||||
| `baseUrl` | Backend API URL | `http://localhost:5000/api/v1` |
|
||||
| `token` | Auth token from login | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...` |
|
||||
|
||||
---
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
1. **Backward Compatible:** The backend still accepts the old format, but the new format is recommended
|
||||
2. **Auto User Creation:** If a user exists in Okta/AD but not in the database, they will be created automatically
|
||||
3. **Smart Level Names:** Level names are generated from:
|
||||
- User's designation (e.g., "Manager Approval")
|
||||
- User's department (e.g., "Finance Approval")
|
||||
- Fallback: "Level N Approval"
|
||||
4. **Final Approver:** Last approval level is automatically marked as final approver
|
||||
5. **Error Messages:** Clear, actionable error messages for invalid emails or users not found in AD
|
||||
|
||||
---
|
||||
|
||||
## ❓ Troubleshooting
|
||||
|
||||
### Error: "User not found in organization directory"
|
||||
- **Cause:** Email doesn't exist in Okta/AD
|
||||
- **Solution:** Verify email address is correct and user has an active account
|
||||
|
||||
### Error: "Duplicate approver email found"
|
||||
- **Cause:** Same email used for multiple approval levels
|
||||
- **Solution:** Each approver must have a unique email
|
||||
|
||||
### Error: "Invalid initiator"
|
||||
- **Cause:** Auth token is invalid or user doesn't exist
|
||||
- **Solution:** Re-login to get a fresh token
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
1. **Import Collection:** Import `Royal_Enfield_API_Collection.postman_collection.json` into Postman
|
||||
2. **Set Environment:** Configure `baseUrl` and `token` variables
|
||||
3. **Login:** Call the login endpoint to get your token
|
||||
4. **Create Workflow:** Use the simplified "Create Workflow (JSON) - Simplified" endpoint
|
||||
5. **Test:** Try adding approvers and spectators using the new simplified endpoints
|
||||
|
||||
---
|
||||
|
||||
**Updated:** December 2, 2025
|
||||
**Version:** 2.0 - Simplified API Format
|
||||
|
||||
@ -359,7 +359,7 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Create Workflow (JSON)",
|
||||
"name": "Create Workflow (JSON) - Simplified",
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"header": [
|
||||
@ -370,18 +370,18 @@
|
||||
],
|
||||
"body": {
|
||||
"mode": "raw",
|
||||
"raw": "{\n // Request title - brief description\n \"requestTitle\": \"Purchase Order Approval for Office Equipment\",\n \n // Detailed description of the request\n \"requestDescription\": \"Approval needed for purchasing new office equipment including laptops, monitors, and office furniture. Total budget: $50,000\",\n \n // Priority: STANDARD | EXPRESS\n \"priority\": \"STANDARD\",\n \n // Department requesting approval\n \"requestingDepartment\": \"IT\",\n \n // Category of request\n \"requestCategory\": \"PURCHASE_ORDER\",\n \n // Approvers list - array of approval levels\n \"approvers\": [\n {\n // Approver's email\n \"email\": \"manager@example.com\",\n \n // TAT (Turn Around Time) in hours\n \"tatHours\": 24,\n \n // Level number (sequential)\n \"level\": 1\n },\n {\n \"email\": \"director@example.com\",\n \"tatHours\": 48,\n \"level\": 2\n },\n {\n \"email\": \"cfo@example.com\",\n \"tatHours\": 72,\n \"level\": 3\n }\n ],\n \n // Spectators (optional) - users who can view but not approve\n \"spectators\": [\n {\n \"email\": \"hr@example.com\"\n },\n {\n \"email\": \"finance@example.com\"\n }\n ],\n \n // Document IDs (if documents uploaded separately)\n \"documentIds\": []\n}"
|
||||
"raw": "{\n \"templateType\": \"CUSTOM\",\n \"title\": \"Purchase Order Approval for Office Equipment\",\n \"description\": \"Approval needed for purchasing new office equipment including laptops, monitors, and office furniture. Total budget: $50,000\",\n \"priority\": \"STANDARD\",\n \"approvalLevels\": [\n {\n \"email\": \"manager@royalenfield.com\",\n \"tatHours\": 24\n },\n {\n \"email\": \"director@royalenfield.com\",\n \"tatHours\": 48\n },\n {\n \"email\": \"cfo@royalenfield.com\",\n \"tatHours\": 72\n }\n ],\n \"spectators\": [\n {\n \"email\": \"hr@royalenfield.com\"\n },\n {\n \"email\": \"finance@royalenfield.com\"\n }\n ]\n}"
|
||||
},
|
||||
"url": {
|
||||
"raw": "{{baseUrl}}/workflows",
|
||||
"host": ["{{baseUrl}}"],
|
||||
"path": ["workflows"]
|
||||
},
|
||||
"description": "Create new workflow request with JSON payload"
|
||||
"description": "Create new workflow request with JSON payload. Backend automatically:\n- Finds/creates users from Okta/AD\n- Generates level names from designation/department\n- Auto-detects final approver (last level)\n- Sets proper permissions\n\nOnly email and tatHours required per approver!"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Create Workflow (Multipart with Files)",
|
||||
"name": "Create Workflow (Multipart with Files) - Simplified",
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"header": [],
|
||||
@ -389,52 +389,22 @@
|
||||
"mode": "formdata",
|
||||
"formdata": [
|
||||
{
|
||||
"key": "requestTitle",
|
||||
"value": "Purchase Order Approval for Office Equipment",
|
||||
"key": "payload",
|
||||
"value": "{\"templateType\":\"CUSTOM\",\"title\":\"Purchase Order Approval with Documents\",\"description\":\"Approval needed for office equipment purchase with supporting documents\",\"priority\":\"STANDARD\",\"approvalLevels\":[{\"email\":\"manager@royalenfield.com\",\"tatHours\":24},{\"email\":\"director@royalenfield.com\",\"tatHours\":48}],\"spectators\":[{\"email\":\"hr@royalenfield.com\"}]}",
|
||||
"type": "text",
|
||||
"description": "Request title"
|
||||
},
|
||||
{
|
||||
"key": "requestDescription",
|
||||
"value": "Approval needed for purchasing new office equipment",
|
||||
"type": "text",
|
||||
"description": "Detailed description"
|
||||
},
|
||||
{
|
||||
"key": "priority",
|
||||
"value": "STANDARD",
|
||||
"type": "text",
|
||||
"description": "STANDARD or EXPRESS"
|
||||
},
|
||||
{
|
||||
"key": "requestingDepartment",
|
||||
"value": "IT",
|
||||
"type": "text",
|
||||
"description": "Department name"
|
||||
},
|
||||
{
|
||||
"key": "requestCategory",
|
||||
"value": "PURCHASE_ORDER",
|
||||
"type": "text",
|
||||
"description": "Category of request"
|
||||
},
|
||||
{
|
||||
"key": "approvers",
|
||||
"value": "[{\"email\":\"manager@example.com\",\"tatHours\":24,\"level\":1},{\"email\":\"director@example.com\",\"tatHours\":48,\"level\":2}]",
|
||||
"type": "text",
|
||||
"description": "JSON array of approvers"
|
||||
},
|
||||
{
|
||||
"key": "spectators",
|
||||
"value": "[{\"email\":\"hr@example.com\"}]",
|
||||
"type": "text",
|
||||
"description": "JSON array of spectators (optional)"
|
||||
"description": "JSON payload with simplified format (email + tatHours only)"
|
||||
},
|
||||
{
|
||||
"key": "files",
|
||||
"type": "file",
|
||||
"src": [],
|
||||
"description": "Upload files (multiple files supported)"
|
||||
},
|
||||
{
|
||||
"key": "category",
|
||||
"value": "SUPPORTING",
|
||||
"type": "text",
|
||||
"description": "Document category: SUPPORTING | APPROVAL | REFERENCE | FINAL | OTHER"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -443,7 +413,7 @@
|
||||
"host": ["{{baseUrl}}"],
|
||||
"path": ["workflows", "multipart"]
|
||||
},
|
||||
"description": "Create workflow with file uploads using multipart/form-data"
|
||||
"description": "Create workflow with file uploads. Backend automatically:\n- Finds/creates users from Okta/AD\n- Generates level names\n- Auto-detects final approver\n- Uploads and attaches documents\n\nOnly email and tatHours required per approver!"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -572,6 +542,64 @@
|
||||
"description": "Submit workflow for approval (changes status from DRAFT to OPEN)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Add Approver at Level - Simplified",
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"header": [
|
||||
{
|
||||
"key": "Content-Type",
|
||||
"value": "application/json"
|
||||
}
|
||||
],
|
||||
"body": {
|
||||
"mode": "raw",
|
||||
"raw": "{\n \"email\": \"newapprover@royalenfield.com\",\n \"tatHours\": 24,\n \"level\": 2\n}"
|
||||
},
|
||||
"url": {
|
||||
"raw": "{{baseUrl}}/workflows/:id/approvers/at-level",
|
||||
"host": ["{{baseUrl}}"],
|
||||
"path": ["workflows", ":id", "approvers", "at-level"],
|
||||
"variable": [
|
||||
{
|
||||
"key": "id",
|
||||
"value": "REQ-2024-0001",
|
||||
"description": "Workflow ID or Request Number"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Add a new approver at specific level. Backend automatically:\n- Finds/creates user from Okta/AD\n- Generates level name from designation/department\n- Shifts existing levels if needed\n- Updates final approver flag\n- Sends notifications\n\nOnly email, tatHours, and level required!"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Add Spectator - Simplified",
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"header": [
|
||||
{
|
||||
"key": "Content-Type",
|
||||
"value": "application/json"
|
||||
}
|
||||
],
|
||||
"body": {
|
||||
"mode": "raw",
|
||||
"raw": "{\n \"email\": \"spectator@royalenfield.com\"\n}"
|
||||
},
|
||||
"url": {
|
||||
"raw": "{{baseUrl}}/workflows/:id/participants/spectator",
|
||||
"host": ["{{baseUrl}}"],
|
||||
"path": ["workflows", ":id", "participants", "spectator"],
|
||||
"variable": [
|
||||
{
|
||||
"key": "id",
|
||||
"value": "REQ-2024-0001",
|
||||
"description": "Workflow ID or Request Number"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Add a spectator to request. Backend automatically:\n- Finds/creates user from Okta/AD\n- Sets spectator permissions (view + comment, no download)\n- Sends notification\n\nOnly email required!"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Get Workflow Activity",
|
||||
"request": {
|
||||
|
||||
228
docker-compose.full.yml
Normal file
228
docker-compose.full.yml
Normal file
@ -0,0 +1,228 @@
|
||||
# =============================================================================
|
||||
# RE Workflow - Full Stack Docker Compose
|
||||
# Includes: Application + Database + Monitoring Stack
|
||||
# =============================================================================
|
||||
# Usage:
|
||||
# docker-compose -f docker-compose.full.yml up -d
|
||||
# =============================================================================
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# ===========================================================================
|
||||
# APPLICATION SERVICES
|
||||
# ===========================================================================
|
||||
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: re_workflow_db
|
||||
environment:
|
||||
POSTGRES_USER: ${DB_USER:-laxman}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-Admin@123}
|
||||
POSTGRES_DB: ${DB_NAME:-re_workflow_db}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./database/schema:/docker-entrypoint-initdb.d
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-laxman}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: re_workflow_redis
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
backend:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: re_workflow_backend
|
||||
environment:
|
||||
NODE_ENV: development
|
||||
DB_HOST: postgres
|
||||
DB_PORT: 5432
|
||||
DB_USER: ${DB_USER:-laxman}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-Admin@123}
|
||||
DB_NAME: ${DB_NAME:-re_workflow_db}
|
||||
REDIS_URL: redis://redis:6379
|
||||
PORT: 5000
|
||||
# Loki for logging
|
||||
LOKI_HOST: http://loki:3100
|
||||
ports:
|
||||
- "5000:5000"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ./uploads:/app/uploads
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:5000/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})\""]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# ===========================================================================
|
||||
# MONITORING SERVICES
|
||||
# ===========================================================================
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.47.2
|
||||
container_name: re_prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- ./monitoring/prometheus/alert.rules.yml:/etc/prometheus/alert.rules.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--storage.tsdb.retention.time=15d'
|
||||
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||
- '--web.enable-lifecycle'
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
loki:
|
||||
image: grafana/loki:2.9.2
|
||||
container_name: re_loki
|
||||
ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- ./monitoring/loki/loki-config.yml:/etc/loki/local-config.yaml:ro
|
||||
- loki_data:/loki
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
|
||||
promtail:
|
||||
image: grafana/promtail:2.9.2
|
||||
container_name: re_promtail
|
||||
volumes:
|
||||
- ./monitoring/promtail/promtail-config.yml:/etc/promtail/config.yml:ro
|
||||
- ./logs:/var/log/app:ro
|
||||
- promtail_data:/tmp/promtail
|
||||
command: -config.file=/etc/promtail/config.yml
|
||||
depends_on:
|
||||
- loki
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.2
|
||||
container_name: re_grafana
|
||||
ports:
|
||||
- "3001:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=REWorkflow@2024
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
- GF_FEATURE_TOGGLES_ENABLE=publicDashboards
|
||||
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./monitoring/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||
- ./monitoring/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
depends_on:
|
||||
- prometheus
|
||||
- loki
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
node-exporter:
|
||||
image: prom/node-exporter:v1.6.1
|
||||
container_name: re_node_exporter
|
||||
ports:
|
||||
- "9100:9100"
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
|
||||
alertmanager:
|
||||
image: prom/alertmanager:v0.26.0
|
||||
container_name: re_alertmanager
|
||||
ports:
|
||||
- "9093:9093"
|
||||
volumes:
|
||||
- ./monitoring/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||||
- alertmanager_data:/alertmanager
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||
- '--storage.path=/alertmanager'
|
||||
networks:
|
||||
- re_workflow_network
|
||||
restart: unless-stopped
|
||||
|
||||
# ===========================================================================
|
||||
# NETWORKS
|
||||
# ===========================================================================
|
||||
networks:
|
||||
re_workflow_network:
|
||||
driver: bridge
|
||||
name: re_workflow_network
|
||||
|
||||
# ===========================================================================
|
||||
# VOLUMES
|
||||
# ===========================================================================
|
||||
volumes:
|
||||
postgres_data:
|
||||
name: re_postgres_data
|
||||
redis_data:
|
||||
name: re_redis_data
|
||||
prometheus_data:
|
||||
name: re_prometheus_data
|
||||
loki_data:
|
||||
name: re_loki_data
|
||||
promtail_data:
|
||||
name: re_promtail_data
|
||||
grafana_data:
|
||||
name: re_grafana_data
|
||||
alertmanager_data:
|
||||
name: re_alertmanager_data
|
||||
|
||||
326
docs/GCP_STORAGE_SETUP.md
Normal file
326
docs/GCP_STORAGE_SETUP.md
Normal file
@ -0,0 +1,326 @@
|
||||
# GCP Cloud Storage Setup Guide for RE Workflow
|
||||
|
||||
## Project Information
|
||||
|
||||
| Item | Value |
|
||||
|------|-------|
|
||||
| **Application** | RE Workflow System |
|
||||
| **Environment** | UAT |
|
||||
| **Domain** | https://reflow-uat.royalenfield.com |
|
||||
| **Purpose** | Store workflow documents and attachments |
|
||||
|
||||
---
|
||||
|
||||
## 1. Requirements Overview
|
||||
|
||||
The RE Workflow application needs Google Cloud Storage to store:
|
||||
- Request documents (uploaded during workflow creation)
|
||||
- Attachments (added during approval process)
|
||||
- Supporting documents
|
||||
|
||||
### Folder Structure in Bucket
|
||||
|
||||
```
|
||||
reflow-documents-uat/
|
||||
├── requests/
|
||||
│ ├── REQ-2025-12-0001/
|
||||
│ │ ├── documents/
|
||||
│ │ │ ├── proposal.pdf
|
||||
│ │ │ └── specification.docx
|
||||
│ │ └── attachments/
|
||||
│ │ ├── approval_note.pdf
|
||||
│ │ └── signature.png
|
||||
│ │
|
||||
│ ├── REQ-2025-12-0002/
|
||||
│ │ ├── documents/
|
||||
│ │ │ └── budget_report.xlsx
|
||||
│ │ └── attachments/
|
||||
│ │ └── manager_approval.pdf
|
||||
│ │
|
||||
│ └── REQ-2025-12-0003/
|
||||
│ ├── documents/
|
||||
│ └── attachments/
|
||||
│
|
||||
└── temp/
|
||||
└── (temporary uploads before processing)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. GCP Bucket Configuration
|
||||
|
||||
### 2.1 Create Bucket
|
||||
|
||||
| Setting | Value |
|
||||
|---------|-------|
|
||||
| **Bucket Name** | `reflow-documents-uat` (UAT) / `reflow-documents-prod` (Production) |
|
||||
| **Location Type** | Region |
|
||||
| **Region** | `asia-south1` (Mumbai) |
|
||||
| **Storage Class** | Standard |
|
||||
| **Access Control** | Uniform |
|
||||
| **Public Access Prevention** | Enforced (Block all public access) |
|
||||
|
||||
### 2.2 Console Commands (gcloud CLI)
|
||||
|
||||
```bash
|
||||
# Create bucket
|
||||
gcloud storage buckets create gs://reflow-documents-uat \
|
||||
--project=YOUR_PROJECT_ID \
|
||||
--location=asia-south1 \
|
||||
--uniform-bucket-level-access
|
||||
|
||||
# Block public access
|
||||
gcloud storage buckets update gs://reflow-documents-uat \
|
||||
--public-access-prevention
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Service Account Setup
|
||||
|
||||
### 3.1 Create Service Account
|
||||
|
||||
| Setting | Value |
|
||||
|---------|-------|
|
||||
| **Name** | `reflow-storage-sa` |
|
||||
| **Description** | Service account for RE Workflow file storage |
|
||||
|
||||
```bash
|
||||
# Create service account
|
||||
gcloud iam service-accounts create reflow-storage-sa \
|
||||
--display-name="RE Workflow Storage Service Account" \
|
||||
--project=YOUR_PROJECT_ID
|
||||
```
|
||||
|
||||
### 3.2 Assign Permissions
|
||||
|
||||
The service account needs these roles:
|
||||
|
||||
| Role | Purpose |
|
||||
|------|---------|
|
||||
| `roles/storage.objectCreator` | Upload files |
|
||||
| `roles/storage.objectViewer` | Download/preview files |
|
||||
| `roles/storage.objectAdmin` | Delete files |
|
||||
|
||||
```bash
|
||||
# Grant permissions
|
||||
gcloud projects add-iam-policy-binding YOUR_PROJECT_ID \
|
||||
--member="serviceAccount:reflow-storage-sa@YOUR_PROJECT_ID.iam.gserviceaccount.com" \
|
||||
--role="roles/storage.objectAdmin"
|
||||
```
|
||||
|
||||
### 3.3 Generate JSON Key
|
||||
|
||||
```bash
|
||||
# Generate key file
|
||||
gcloud iam service-accounts keys create gcp-key.json \
|
||||
--iam-account=reflow-storage-sa@YOUR_PROJECT_ID.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
⚠️ **Security:** Share this key file securely (not via email). Use a secure file transfer method.
|
||||
|
||||
---
|
||||
|
||||
## 4. CORS Configuration
|
||||
|
||||
Apply this CORS policy to allow browser uploads:
|
||||
|
||||
### 4.1 Create `cors-config.json`
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"origin": [
|
||||
"https://reflow-uat.royalenfield.com",
|
||||
"https://reflow.royalenfield.com"
|
||||
],
|
||||
"method": ["GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"],
|
||||
"responseHeader": [
|
||||
"Content-Type",
|
||||
"Content-Disposition",
|
||||
"Content-Length",
|
||||
"Cache-Control",
|
||||
"x-goog-meta-*"
|
||||
],
|
||||
"maxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### 4.2 Apply CORS Policy
|
||||
|
||||
```bash
|
||||
gcloud storage buckets update gs://reflow-documents-uat \
|
||||
--cors-file=cors-config.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Lifecycle Rules (Optional but Recommended)
|
||||
|
||||
### 5.1 Auto-delete Temporary Files
|
||||
|
||||
Delete files in `temp/` folder after 24 hours:
|
||||
|
||||
```json
|
||||
{
|
||||
"lifecycle": {
|
||||
"rule": [
|
||||
{
|
||||
"action": { "type": "Delete" },
|
||||
"condition": {
|
||||
"age": 1,
|
||||
"matchesPrefix": ["temp/"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
gcloud storage buckets update gs://reflow-documents-uat \
|
||||
--lifecycle-file=lifecycle-config.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Bucket Versioning (Recommended)
|
||||
|
||||
Enable versioning for accidental delete recovery:
|
||||
|
||||
```bash
|
||||
gcloud storage buckets update gs://reflow-documents-uat \
|
||||
--versioning
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Deliverables to Application Team
|
||||
|
||||
Please provide the following to the development team:
|
||||
|
||||
### 7.1 Environment Variables
|
||||
|
||||
| Variable | Value |
|
||||
|----------|-------|
|
||||
| `GCP_PROJECT_ID` | `your-gcp-project-id` |
|
||||
| `GCP_BUCKET_NAME` | `reflow-documents-uat` |
|
||||
| `GCP_KEY_FILE` | `./config/gcp-key.json` |
|
||||
|
||||
### 7.2 Files to Share
|
||||
|
||||
| File | Description | How to Share |
|
||||
|------|-------------|--------------|
|
||||
| `gcp-key.json` | Service account key | Secure transfer (not email) |
|
||||
|
||||
---
|
||||
|
||||
## 8. Verification Steps
|
||||
|
||||
After setup, verify with:
|
||||
|
||||
```bash
|
||||
# List bucket contents
|
||||
gcloud storage ls gs://reflow-documents-uat/
|
||||
|
||||
# Test upload
|
||||
echo "test" > test.txt
|
||||
gcloud storage cp test.txt gs://reflow-documents-uat/temp/
|
||||
|
||||
# Test download
|
||||
gcloud storage cp gs://reflow-documents-uat/temp/test.txt ./downloaded.txt
|
||||
|
||||
# Test delete
|
||||
gcloud storage rm gs://reflow-documents-uat/temp/test.txt
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. Environment-Specific Buckets
|
||||
|
||||
| Environment | Bucket Name | Region |
|
||||
|-------------|-------------|--------|
|
||||
| Development | `reflow-documents-dev` | asia-south1 |
|
||||
| UAT | `reflow-documents-uat` | asia-south1 |
|
||||
| Production | `reflow-documents-prod` | asia-south1 |
|
||||
|
||||
---
|
||||
|
||||
## 10. Monitoring & Alerts (Optional)
|
||||
|
||||
### 10.1 Enable Logging
|
||||
|
||||
```bash
|
||||
gcloud storage buckets update gs://reflow-documents-uat \
|
||||
--log-bucket=gs://your-logging-bucket \
|
||||
--log-object-prefix=reflow-storage-logs/
|
||||
```
|
||||
|
||||
### 10.2 Storage Alerts
|
||||
|
||||
Set up alerts for:
|
||||
- Storage exceeds 80% of quota
|
||||
- Unusual download patterns
|
||||
- Failed access attempts
|
||||
|
||||
---
|
||||
|
||||
## 11. Cost Estimation
|
||||
|
||||
| Item | Estimate (Monthly) |
|
||||
|------|-------------------|
|
||||
| Storage (100GB) | ~$2.00 |
|
||||
| Operations (10K) | ~$0.05 |
|
||||
| Network Egress | Varies by usage |
|
||||
|
||||
---
|
||||
|
||||
## 12. Security Checklist
|
||||
|
||||
- [ ] Public access prevention enabled
|
||||
- [ ] Service account has minimal required permissions
|
||||
- [ ] JSON key stored securely (not in Git)
|
||||
- [ ] CORS configured for specific domains only
|
||||
- [ ] Bucket versioning enabled
|
||||
- [ ] Lifecycle rules for temp files
|
||||
- [ ] Access logging enabled
|
||||
|
||||
---
|
||||
|
||||
## 13. Contact
|
||||
|
||||
| Role | Contact |
|
||||
|------|---------|
|
||||
| Application Team | [Your Email] |
|
||||
| DevOps Team | [DevOps Email] |
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Quick Reference
|
||||
|
||||
### GCP Console URLs
|
||||
|
||||
- **Buckets:** https://console.cloud.google.com/storage/browser
|
||||
- **Service Accounts:** https://console.cloud.google.com/iam-admin/serviceaccounts
|
||||
- **IAM:** https://console.cloud.google.com/iam-admin/iam
|
||||
|
||||
### gcloud Commands Summary
|
||||
|
||||
```bash
|
||||
# Create bucket
|
||||
gcloud storage buckets create gs://BUCKET_NAME --location=asia-south1
|
||||
|
||||
# Create service account
|
||||
gcloud iam service-accounts create SA_NAME
|
||||
|
||||
# Generate key
|
||||
gcloud iam service-accounts keys create key.json --iam-account=SA@PROJECT.iam.gserviceaccount.com
|
||||
|
||||
# Set CORS
|
||||
gcloud storage buckets update gs://BUCKET_NAME --cors-file=cors.json
|
||||
|
||||
# Enable versioning
|
||||
gcloud storage buckets update gs://BUCKET_NAME --versioning
|
||||
```
|
||||
|
||||
726
docs/LOKI_DEPLOYMENT_GUIDE.md
Normal file
726
docs/LOKI_DEPLOYMENT_GUIDE.md
Normal file
@ -0,0 +1,726 @@
|
||||
# Loki + Grafana Deployment Guide for RE Workflow
|
||||
|
||||
## Overview
|
||||
|
||||
This guide covers deploying **Loki with Grafana** for log aggregation in the RE Workflow application.
|
||||
|
||||
```
|
||||
┌─────────────────────────┐ ┌─────────────────────────┐
|
||||
│ RE Workflow Backend │──────────▶│ Loki │
|
||||
│ (Node.js + Winston) │ HTTP │ (Log Storage) │
|
||||
└─────────────────────────┘ :3100 └───────────┬─────────────┘
|
||||
│
|
||||
┌───────────▼─────────────┐
|
||||
│ Grafana │
|
||||
│ monitoring.cloudtopiaa │
|
||||
│ (Your existing!) │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**Why Loki + Grafana?**
|
||||
- ✅ Lightweight - designed for logs (unlike ELK)
|
||||
- ✅ Uses your existing Grafana instance
|
||||
- ✅ Same query language as Prometheus (LogQL)
|
||||
- ✅ Cost-effective - indexes labels, not content
|
||||
|
||||
---
|
||||
|
||||
# Part 1: Windows Development Setup
|
||||
|
||||
## Prerequisites (Windows)
|
||||
|
||||
- Docker Desktop for Windows installed
|
||||
- WSL2 enabled (recommended)
|
||||
- 4GB+ RAM available for Docker
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Install Docker Desktop
|
||||
|
||||
1. Download from: https://www.docker.com/products/docker-desktop/
|
||||
2. Run installer
|
||||
3. Enable WSL2 integration when prompted
|
||||
4. Restart computer
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Create Project Directory
|
||||
|
||||
Open PowerShell as Administrator:
|
||||
|
||||
```powershell
|
||||
# Create directory
|
||||
mkdir C:\loki
|
||||
cd C:\loki
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Create Loki Configuration (Windows)
|
||||
|
||||
Create file `C:\loki\loki-config.yaml`:
|
||||
|
||||
```powershell
|
||||
# Using PowerShell
|
||||
notepad C:\loki\loki-config.yaml
|
||||
```
|
||||
|
||||
**Paste this configuration:**
|
||||
|
||||
```yaml
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
limits_config:
|
||||
retention_period: 7d
|
||||
ingestion_rate_mb: 10
|
||||
ingestion_burst_size_mb: 20
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Create Docker Compose (Windows)
|
||||
|
||||
Create file `C:\loki\docker-compose.yml`:
|
||||
|
||||
```powershell
|
||||
notepad C:\loki\docker-compose.yml
|
||||
```
|
||||
|
||||
**Paste this configuration:**
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
loki:
|
||||
image: grafana/loki:2.9.2
|
||||
container_name: loki
|
||||
ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- loki-data:/loki
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
container_name: grafana
|
||||
ports:
|
||||
- "3001:3000" # Using 3001 since 3000 is used by React frontend
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin123
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
depends_on:
|
||||
- loki
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
loki-data:
|
||||
grafana-data:
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 5: Start Services (Windows)
|
||||
|
||||
```powershell
|
||||
cd C:\loki
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
**Wait 30 seconds for services to initialize.**
|
||||
|
||||
---
|
||||
|
||||
## Step 6: Verify Services (Windows)
|
||||
|
||||
```powershell
|
||||
# Check containers are running
|
||||
docker ps
|
||||
|
||||
# Test Loki health
|
||||
Invoke-WebRequest -Uri http://localhost:3100/ready
|
||||
|
||||
# Or using curl (if installed)
|
||||
curl http://localhost:3100/ready
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 7: Configure Grafana (Windows Dev)
|
||||
|
||||
1. Open browser: `http://localhost:3001` *(port 3001 to avoid conflict with React on 3000)*
|
||||
2. Login: `admin` / `admin123`
|
||||
3. Go to: **Connections → Data Sources → Add data source**
|
||||
4. Select: **Loki**
|
||||
5. Configure:
|
||||
- URL: `http://loki:3100`
|
||||
6. Click: **Save & Test**
|
||||
|
||||
---
|
||||
|
||||
## Step 8: Configure Backend .env (Windows Dev)
|
||||
|
||||
```env
|
||||
# Development - Local Loki
|
||||
LOKI_HOST=http://localhost:3100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Windows Commands Reference
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `docker-compose up -d` | Start Loki + Grafana |
|
||||
| `docker-compose down` | Stop services |
|
||||
| `docker-compose logs -f loki` | View Loki logs |
|
||||
| `docker-compose restart` | Restart services |
|
||||
| `docker ps` | Check running containers |
|
||||
|
||||
---
|
||||
|
||||
# Part 2: Linux Production Setup (DevOps)
|
||||
|
||||
## Prerequisites (Linux)
|
||||
|
||||
- Ubuntu 20.04+ / CentOS 7+ / RHEL 8+
|
||||
- Docker & Docker Compose installed
|
||||
- 2GB+ RAM (4GB recommended)
|
||||
- 10GB+ disk space
|
||||
- Grafana running at `http://monitoring.cloudtopiaa.com/`
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Install Docker (if not installed)
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
# Update packages
|
||||
sudo apt update
|
||||
|
||||
# Install Docker
|
||||
sudo apt install -y docker.io docker-compose
|
||||
|
||||
# Start Docker
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker
|
||||
|
||||
# Add user to docker group
|
||||
sudo usermod -aG docker $USER
|
||||
```
|
||||
|
||||
**CentOS/RHEL:**
|
||||
```bash
|
||||
# Install Docker
|
||||
sudo yum install -y docker docker-compose
|
||||
|
||||
# Start Docker
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Create Loki Directory
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /opt/loki
|
||||
cd /opt/loki
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Create Loki Configuration (Linux)
|
||||
|
||||
```bash
|
||||
sudo nano /opt/loki/loki-config.yaml
|
||||
```
|
||||
|
||||
**Paste this configuration:**
|
||||
|
||||
```yaml
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /tmp/loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /tmp/loki/chunks
|
||||
rules_directory: /tmp/loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
||||
|
||||
limits_config:
|
||||
retention_period: 30d
|
||||
ingestion_rate_mb: 10
|
||||
ingestion_burst_size_mb: 20
|
||||
|
||||
# Storage retention
|
||||
compactor:
|
||||
working_directory: /tmp/loki/compactor
|
||||
retention_enabled: true
|
||||
retention_delete_delay: 2h
|
||||
delete_request_store: filesystem
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Create Docker Compose (Linux Production)
|
||||
|
||||
```bash
|
||||
sudo nano /opt/loki/docker-compose.yml
|
||||
```
|
||||
|
||||
**Paste this configuration (Loki only - uses existing Grafana):**
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
loki:
|
||||
image: grafana/loki:2.9.2
|
||||
container_name: loki
|
||||
ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- loki-data:/tmp/loki
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- monitoring
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
loki-data:
|
||||
driver: local
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 5: Start Loki (Linux)
|
||||
|
||||
```bash
|
||||
cd /opt/loki
|
||||
sudo docker-compose up -d
|
||||
```
|
||||
|
||||
**Wait 30 seconds for Loki to initialize.**
|
||||
|
||||
---
|
||||
|
||||
## Step 6: Verify Loki (Linux)
|
||||
|
||||
```bash
|
||||
# Check container
|
||||
sudo docker ps | grep loki
|
||||
|
||||
# Test Loki health
|
||||
curl http://localhost:3100/ready
|
||||
|
||||
# Test Loki is accepting logs
|
||||
curl http://localhost:3100/loki/api/v1/labels
|
||||
```
|
||||
|
||||
**Expected response:**
|
||||
```json
|
||||
{"status":"success","data":[]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 7: Open Firewall Port (Linux)
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
sudo ufw allow 3100/tcp
|
||||
sudo ufw reload
|
||||
```
|
||||
|
||||
**CentOS/RHEL:**
|
||||
```bash
|
||||
sudo firewall-cmd --permanent --add-port=3100/tcp
|
||||
sudo firewall-cmd --reload
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 8: Add Loki to Existing Grafana
|
||||
|
||||
1. **Open Grafana:** `http://monitoring.cloudtopiaa.com/`
|
||||
2. **Login** with admin credentials
|
||||
3. **Go to:** Connections → Data Sources → Add data source
|
||||
4. **Select:** Loki
|
||||
5. **Configure:**
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Name | `RE-Workflow-Logs` |
|
||||
| URL | `http://<loki-server-ip>:3100` |
|
||||
| Timeout | `60` |
|
||||
|
||||
6. **Click:** Save & Test
|
||||
7. **Should see:** ✅ "Data source successfully connected"
|
||||
|
||||
---
|
||||
|
||||
## Step 9: Configure Backend .env (Production)
|
||||
|
||||
```env
|
||||
# Production - Remote Loki
|
||||
LOKI_HOST=http://<loki-server-ip>:3100
|
||||
# LOKI_USER= # Optional: if basic auth enabled
|
||||
# LOKI_PASSWORD= # Optional: if basic auth enabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Linux Commands Reference
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `sudo docker-compose up -d` | Start Loki |
|
||||
| `sudo docker-compose down` | Stop Loki |
|
||||
| `sudo docker-compose logs -f` | View logs |
|
||||
| `sudo docker-compose restart` | Restart |
|
||||
| `sudo docker ps` | Check containers |
|
||||
|
||||
---
|
||||
|
||||
## Step 10: Enable Basic Auth (Optional - Production)
|
||||
|
||||
For added security, enable basic auth:
|
||||
|
||||
```bash
|
||||
# Install apache2-utils for htpasswd
|
||||
sudo apt install apache2-utils
|
||||
|
||||
# Create password file
|
||||
sudo htpasswd -c /opt/loki/.htpasswd lokiuser
|
||||
|
||||
# Update docker-compose.yml to use nginx reverse proxy with auth
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Part 3: Grafana Dashboard Setup
|
||||
|
||||
## Create Dashboard
|
||||
|
||||
1. Go to: `http://monitoring.cloudtopiaa.com/dashboards` (or `http://localhost:3001` for dev)
|
||||
2. Click: **New → New Dashboard**
|
||||
3. Add panels as described below
|
||||
|
||||
---
|
||||
|
||||
### Panel 1: Error Count (Stat)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
count_over_time({app="re-workflow"} |= "error" [24h])
|
||||
```
|
||||
- Visualization: **Stat**
|
||||
- Title: "Errors (24h)"
|
||||
|
||||
---
|
||||
|
||||
### Panel 2: Error Timeline (Time Series)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
sum by (level) (count_over_time({app="re-workflow"} | json | level=~"error|warn" [5m]))
|
||||
```
|
||||
- Visualization: **Time Series**
|
||||
- Title: "Errors Over Time"
|
||||
|
||||
---
|
||||
|
||||
### Panel 3: Recent Errors (Logs)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
{app="re-workflow"} | json | level="error"
|
||||
```
|
||||
- Visualization: **Logs**
|
||||
- Title: "Recent Errors"
|
||||
|
||||
---
|
||||
|
||||
### Panel 4: TAT Breaches (Stat)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
count_over_time({app="re-workflow"} | json | tatEvent="breached" [24h])
|
||||
```
|
||||
- Visualization: **Stat**
|
||||
- Title: "TAT Breaches"
|
||||
- Color: Red
|
||||
|
||||
---
|
||||
|
||||
### Panel 5: Workflow Events (Pie)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
sum by (workflowEvent) (count_over_time({app="re-workflow"} | json | workflowEvent!="" [24h]))
|
||||
```
|
||||
- Visualization: **Pie Chart**
|
||||
- Title: "Workflow Events"
|
||||
|
||||
---
|
||||
|
||||
### Panel 6: Auth Failures (Table)
|
||||
|
||||
**Query (LogQL):**
|
||||
```
|
||||
{app="re-workflow"} | json | authEvent="auth_failure"
|
||||
```
|
||||
- Visualization: **Table**
|
||||
- Title: "Authentication Failures"
|
||||
|
||||
---
|
||||
|
||||
## Useful LogQL Queries
|
||||
|
||||
| Purpose | Query |
|
||||
|---------|-------|
|
||||
| All errors | `{app="re-workflow"} \| json \| level="error"` |
|
||||
| Specific request | `{app="re-workflow"} \| json \| requestId="REQ-2024-001"` |
|
||||
| User activity | `{app="re-workflow"} \| json \| userId="user-123"` |
|
||||
| TAT breaches | `{app="re-workflow"} \| json \| tatEvent="breached"` |
|
||||
| Auth failures | `{app="re-workflow"} \| json \| authEvent="auth_failure"` |
|
||||
| Workflow created | `{app="re-workflow"} \| json \| workflowEvent="created"` |
|
||||
| API errors (5xx) | `{app="re-workflow"} \| json \| statusCode>=500` |
|
||||
| Slow requests | `{app="re-workflow"} \| json \| duration>3000` |
|
||||
| Error rate | `sum(rate({app="re-workflow"} \| json \| level="error"[5m]))` |
|
||||
| By department | `{app="re-workflow"} \| json \| department="Engineering"` |
|
||||
|
||||
---
|
||||
|
||||
# Part 4: Alerting Setup
|
||||
|
||||
## Alert 1: High Error Rate
|
||||
|
||||
1. Go to: **Alerting → Alert Rules → New Alert Rule**
|
||||
2. Configure:
|
||||
- Name: `RE Workflow - High Error Rate`
|
||||
- Data source: `RE-Workflow-Logs`
|
||||
- Query: `count_over_time({app="re-workflow"} | json | level="error" [5m])`
|
||||
- Condition: IS ABOVE 10
|
||||
3. Add notification (Slack, Email)
|
||||
|
||||
## Alert 2: TAT Breach
|
||||
|
||||
1. Create new alert rule
|
||||
2. Configure:
|
||||
- Name: `RE Workflow - TAT Breach`
|
||||
- Query: `count_over_time({app="re-workflow"} | json | tatEvent="breached" [15m])`
|
||||
- Condition: IS ABOVE 0
|
||||
3. Add notification
|
||||
|
||||
## Alert 3: Auth Attack Detection
|
||||
|
||||
1. Create new alert rule
|
||||
2. Configure:
|
||||
- Name: `RE Workflow - Auth Attack`
|
||||
- Query: `count_over_time({app="re-workflow"} | json | authEvent="auth_failure" [5m])`
|
||||
- Condition: IS ABOVE 20
|
||||
3. Add notification to Security team
|
||||
|
||||
---
|
||||
|
||||
# Part 5: Troubleshooting
|
||||
|
||||
## Windows Issues
|
||||
|
||||
### Docker Desktop not starting
|
||||
```powershell
|
||||
# Restart Docker Desktop service
|
||||
Restart-Service docker
|
||||
|
||||
# Or restart Docker Desktop from system tray
|
||||
```
|
||||
|
||||
### Port 3100 already in use
|
||||
```powershell
|
||||
# Find process using port
|
||||
netstat -ano | findstr :3100
|
||||
|
||||
# Kill process
|
||||
taskkill /PID <pid> /F
|
||||
```
|
||||
|
||||
### WSL2 issues
|
||||
```powershell
|
||||
# Update WSL
|
||||
wsl --update
|
||||
|
||||
# Restart WSL
|
||||
wsl --shutdown
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Linux Issues
|
||||
|
||||
### Loki won't start
|
||||
|
||||
```bash
|
||||
# Check logs
|
||||
sudo docker logs loki
|
||||
|
||||
# Common fix - permissions
|
||||
sudo chown -R 10001:10001 /opt/loki
|
||||
```
|
||||
|
||||
### Grafana can't connect to Loki
|
||||
|
||||
```bash
|
||||
# Verify Loki is healthy
|
||||
curl http://localhost:3100/ready
|
||||
|
||||
# Check network from Grafana server
|
||||
curl http://loki-server:3100/ready
|
||||
|
||||
# Restart Loki
|
||||
sudo docker-compose restart
|
||||
```
|
||||
|
||||
### Logs not appearing in Grafana
|
||||
|
||||
1. Check application env has correct `LOKI_HOST`
|
||||
2. Verify network connectivity: `curl http://loki:3100/ready`
|
||||
3. Check labels: `curl http://localhost:3100/loki/api/v1/labels`
|
||||
4. Wait for application to send first logs
|
||||
|
||||
### High memory usage
|
||||
|
||||
```bash
|
||||
# Reduce retention period in loki-config.yaml
|
||||
limits_config:
|
||||
retention_period: 7d # Reduce from 30d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Quick Reference
|
||||
|
||||
## Environment Comparison
|
||||
|
||||
| Setting | Windows Dev | Linux Production |
|
||||
|---------|-------------|------------------|
|
||||
| LOKI_HOST | `http://localhost:3100` | `http://<server-ip>:3100` |
|
||||
| Grafana URL | `http://localhost:3001` | `http://monitoring.cloudtopiaa.com` |
|
||||
| Config Path | `C:\loki\` | `/opt/loki/` |
|
||||
| Retention | 7 days | 30 days |
|
||||
|
||||
## Port Reference
|
||||
|
||||
| Service | Port | URL |
|
||||
|---------|------|-----|
|
||||
| Loki | 3100 | `http://server:3100` |
|
||||
| Grafana (Dev) | 3001 | `http://localhost:3001` |
|
||||
| Grafana (Prod) | 80/443 | `http://monitoring.cloudtopiaa.com/` |
|
||||
| React Frontend | 3000 | `http://localhost:3000` |
|
||||
|
||||
---
|
||||
|
||||
# Verification Checklist
|
||||
|
||||
## Windows Development
|
||||
- [ ] Docker Desktop running
|
||||
- [ ] `docker ps` shows loki and grafana containers
|
||||
- [ ] `http://localhost:3100/ready` returns "ready"
|
||||
- [ ] `http://localhost:3001` shows Grafana login
|
||||
- [ ] Loki data source connected in Grafana
|
||||
- [ ] Backend `.env` has `LOKI_HOST=http://localhost:3100`
|
||||
|
||||
## Linux Production
|
||||
- [ ] Loki container running (`docker ps`)
|
||||
- [ ] `curl localhost:3100/ready` returns "ready"
|
||||
- [ ] Firewall port 3100 open
|
||||
- [ ] Grafana connected to Loki
|
||||
- [ ] Backend `.env` has correct `LOKI_HOST`
|
||||
- [ ] Logs appearing in Grafana Explore
|
||||
- [ ] Dashboard created
|
||||
- [ ] Alerts configured
|
||||
|
||||
---
|
||||
|
||||
# Contact
|
||||
|
||||
For issues with this setup:
|
||||
- Backend logs: Check Grafana dashboard
|
||||
- Infrastructure: Contact DevOps team
|
||||
@ -52,6 +52,12 @@ GEMINI_MODEL=gemini-2.0-flash-lite
|
||||
# Logging
|
||||
LOG_LEVEL=info
|
||||
LOG_FILE_PATH=./logs
|
||||
APP_VERSION=1.2.0
|
||||
|
||||
# ============ Loki Configuration (Grafana Log Aggregation) ============
|
||||
LOKI_HOST= # e.g., http://loki:3100 or http://monitoring.cloudtopiaa.com:3100
|
||||
LOKI_USER= # Optional: Basic auth username
|
||||
LOKI_PASSWORD= # Optional: Basic auth password
|
||||
|
||||
# CORS
|
||||
CORS_ORIGIN="*"
|
||||
|
||||
248
monitoring/README.md
Normal file
248
monitoring/README.md
Normal file
@ -0,0 +1,248 @@
|
||||
# RE Workflow Monitoring Stack
|
||||
|
||||
Complete monitoring solution with **Grafana**, **Prometheus**, **Loki**, and **Promtail** for the RE Workflow Management System.
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────────┐
|
||||
│ RE Workflow System │
|
||||
├────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
|
||||
│ │ Node.js API │────│ PostgreSQL │────│ Redis │ │
|
||||
│ │ (Port 5000) │ │ (Port 5432) │ │ (Port 6379) │ │
|
||||
│ └────────┬────────┘ └─────────────────┘ └─────────────────┘ │
|
||||
│ │ │
|
||||
│ │ /metrics endpoint │
|
||||
│ │ Log files (./logs/) │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Monitoring Stack │ │
|
||||
│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │ │
|
||||
│ │ │ Prometheus │──│ Loki │──│ Promtail │ │ │
|
||||
│ │ │ (Port 9090)│ │ (Port 3100) │ │ (Collects log files) │ │ │
|
||||
│ │ └──────┬──────┘ └──────┬──────┘ └─────────────────────────┘ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ └────────┬───────┘ │ │
|
||||
│ │ ▼ │ │
|
||||
│ │ ┌─────────────────┐ │ │
|
||||
│ │ │ Grafana │ │ │
|
||||
│ │ │ (Port 3001) │◄── Pre-configured Dashboards │ │
|
||||
│ │ └─────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Docker Desktop** installed and running
|
||||
- **WSL2** enabled (recommended for Windows)
|
||||
- Backend API running on port 5000
|
||||
|
||||
### Step 1: Start Monitoring Stack
|
||||
|
||||
```powershell
|
||||
# Navigate to monitoring folder
|
||||
cd C:\Laxman\Royal_Enfield\Re_Backend\monitoring
|
||||
|
||||
# Start all monitoring services
|
||||
docker-compose -f docker-compose.monitoring.yml up -d
|
||||
|
||||
# Check status
|
||||
docker ps
|
||||
```
|
||||
|
||||
### Step 2: Configure Backend Environment
|
||||
|
||||
Add these to your backend `.env` file:
|
||||
|
||||
```env
|
||||
# Loki configuration (for direct log shipping from Winston)
|
||||
LOKI_HOST=http://localhost:3100
|
||||
|
||||
# Optional: Basic auth if enabled
|
||||
# LOKI_USER=your_username
|
||||
# LOKI_PASSWORD=your_password
|
||||
```
|
||||
|
||||
### Step 3: Access Dashboards
|
||||
|
||||
| Service | URL | Credentials |
|
||||
|---------|-----|-------------|
|
||||
| **Grafana** | http://localhost:3001 | admin / REWorkflow@2024 |
|
||||
| **Prometheus** | http://localhost:9090 | - |
|
||||
| **Loki** | http://localhost:3100 | - |
|
||||
| **Alertmanager** | http://localhost:9093 | - |
|
||||
|
||||
## 📊 Available Dashboards
|
||||
|
||||
### 1. RE Workflow Overview
|
||||
Pre-configured dashboard with:
|
||||
- **API Metrics**: Request rate, error rate, latency percentiles
|
||||
- **Logs Overview**: Error count, warnings, TAT breaches
|
||||
- **Node.js Runtime**: Memory usage, event loop lag, CPU
|
||||
|
||||
### 2. Custom LogQL Queries
|
||||
|
||||
| Purpose | Query |
|
||||
|---------|-------|
|
||||
| All errors | `{app="re-workflow"} \| json \| level="error"` |
|
||||
| TAT breaches | `{app="re-workflow"} \| json \| tatEvent="breached"` |
|
||||
| Auth failures | `{app="re-workflow"} \| json \| authEvent="auth_failure"` |
|
||||
| Slow requests (>3s) | `{app="re-workflow"} \| json \| duration>3000` |
|
||||
| By user | `{app="re-workflow"} \| json \| userId="USER-ID"` |
|
||||
| By request | `{app="re-workflow"} \| json \| requestId="REQ-XXX"` |
|
||||
|
||||
### 3. PromQL Queries (Prometheus)
|
||||
|
||||
| Purpose | Query |
|
||||
|---------|-------|
|
||||
| Request rate | `rate(http_requests_total{job="re-workflow-backend"}[5m])` |
|
||||
| Error rate | `rate(http_request_errors_total[5m]) / rate(http_requests_total[5m])` |
|
||||
| P95 latency | `histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))` |
|
||||
| Memory usage | `process_resident_memory_bytes{job="re-workflow-backend"}` |
|
||||
| Event loop lag | `nodejs_eventloop_lag_seconds{job="re-workflow-backend"}` |
|
||||
|
||||
## 📁 File Structure
|
||||
|
||||
```
|
||||
monitoring/
|
||||
├── docker-compose.monitoring.yml # Main compose file
|
||||
├── prometheus/
|
||||
│ ├── prometheus.yml # Prometheus configuration
|
||||
│ └── alert.rules.yml # Alert rules
|
||||
├── loki/
|
||||
│ └── loki-config.yml # Loki configuration
|
||||
├── promtail/
|
||||
│ └── promtail-config.yml # Promtail log shipper config
|
||||
├── alertmanager/
|
||||
│ └── alertmanager.yml # Alert notification config
|
||||
└── grafana/
|
||||
├── provisioning/
|
||||
│ ├── datasources/
|
||||
│ │ └── datasources.yml # Auto-configure data sources
|
||||
│ └── dashboards/
|
||||
│ └── dashboards.yml # Dashboard provisioning
|
||||
└── dashboards/
|
||||
└── re-workflow-overview.json # Pre-built dashboard
|
||||
```
|
||||
|
||||
## 🔧 Configuration
|
||||
|
||||
### Prometheus Scrape Targets
|
||||
|
||||
Edit `prometheus/prometheus.yml` to add/modify scrape targets:
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
- job_name: 're-workflow-backend'
|
||||
static_configs:
|
||||
# For local development (backend outside Docker)
|
||||
- targets: ['host.docker.internal:5000']
|
||||
# For Docker deployment (backend in Docker)
|
||||
# - targets: ['re_workflow_backend:5000']
|
||||
```
|
||||
|
||||
### Log Retention
|
||||
|
||||
Edit `loki/loki-config.yml`:
|
||||
|
||||
```yaml
|
||||
limits_config:
|
||||
retention_period: 15d # Adjust retention period
|
||||
```
|
||||
|
||||
### Alert Notifications
|
||||
|
||||
Edit `alertmanager/alertmanager.yml` to configure:
|
||||
- **Email** notifications
|
||||
- **Slack** webhooks
|
||||
- **Custom** webhook endpoints
|
||||
|
||||
## 🛠️ Common Commands
|
||||
|
||||
```powershell
|
||||
# Start services
|
||||
docker-compose -f docker-compose.monitoring.yml up -d
|
||||
|
||||
# Stop services
|
||||
docker-compose -f docker-compose.monitoring.yml down
|
||||
|
||||
# View logs
|
||||
docker-compose -f docker-compose.monitoring.yml logs -f
|
||||
|
||||
# View specific service logs
|
||||
docker-compose -f docker-compose.monitoring.yml logs -f grafana
|
||||
|
||||
# Restart a service
|
||||
docker-compose -f docker-compose.monitoring.yml restart prometheus
|
||||
|
||||
# Check service health
|
||||
docker ps
|
||||
|
||||
# Remove all data (fresh start)
|
||||
docker-compose -f docker-compose.monitoring.yml down -v
|
||||
```
|
||||
|
||||
## ⚡ Metrics Exposed by Backend
|
||||
|
||||
The backend exposes these metrics at `/metrics`:
|
||||
|
||||
### HTTP Metrics
|
||||
- `http_requests_total` - Total HTTP requests (by method, route, status)
|
||||
- `http_request_duration_seconds` - Request latency histogram
|
||||
- `http_request_errors_total` - Error count (4xx, 5xx)
|
||||
- `http_active_connections` - Current active connections
|
||||
|
||||
### Business Metrics
|
||||
- `tat_breaches_total` - TAT breach events
|
||||
- `pending_workflows_count` - Pending workflow gauge
|
||||
- `workflow_operations_total` - Workflow operation count
|
||||
- `auth_events_total` - Authentication events
|
||||
|
||||
### Node.js Runtime
|
||||
- `nodejs_heap_size_*` - Heap memory metrics
|
||||
- `nodejs_eventloop_lag_*` - Event loop lag
|
||||
- `process_cpu_*` - CPU usage
|
||||
- `process_resident_memory_bytes` - RSS memory
|
||||
|
||||
## 🔒 Security Notes
|
||||
|
||||
1. **Change default passwords** in production
|
||||
2. **Enable TLS** for external access
|
||||
3. **Configure firewall** to restrict access to monitoring ports
|
||||
4. **Use reverse proxy** (nginx) for HTTPS
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### Prometheus can't scrape backend
|
||||
1. Ensure backend is running on port 5000
|
||||
2. Check `/metrics` endpoint: `curl http://localhost:5000/metrics`
|
||||
3. For Docker: use `host.docker.internal:5000`
|
||||
|
||||
### Logs not appearing in Loki
|
||||
1. Check Promtail logs: `docker logs re_promtail`
|
||||
2. Verify log file path in `promtail-config.yml`
|
||||
3. Ensure backend has `LOKI_HOST` configured
|
||||
|
||||
### Grafana dashboards empty
|
||||
1. Wait 30-60 seconds for data collection
|
||||
2. Check data source configuration in Grafana
|
||||
3. Verify time range selection
|
||||
|
||||
### Docker memory issues
|
||||
```powershell
|
||||
# Increase Docker Desktop memory allocation
|
||||
# Settings → Resources → Memory → 4GB+
|
||||
```
|
||||
|
||||
## 📞 Support
|
||||
|
||||
For issues with the monitoring stack:
|
||||
1. Check container logs: `docker logs <container_name>`
|
||||
2. Verify configuration files syntax
|
||||
3. Ensure Docker Desktop is running with sufficient resources
|
||||
|
||||
88
monitoring/alertmanager/alertmanager.yml
Normal file
88
monitoring/alertmanager/alertmanager.yml
Normal file
@ -0,0 +1,88 @@
|
||||
# =============================================================================
|
||||
# Alertmanager Configuration for RE Workflow
|
||||
# =============================================================================
|
||||
|
||||
global:
|
||||
# Global configuration options
|
||||
resolve_timeout: 5m
|
||||
|
||||
# Route configuration
|
||||
route:
|
||||
# Default receiver
|
||||
receiver: 'default-receiver'
|
||||
|
||||
# Group alerts by these labels
|
||||
group_by: ['alertname', 'service', 'severity']
|
||||
|
||||
# Wait before sending grouped notifications
|
||||
group_wait: 30s
|
||||
|
||||
# Interval for sending updates for a group
|
||||
group_interval: 5m
|
||||
|
||||
# Interval for resending notifications
|
||||
repeat_interval: 4h
|
||||
|
||||
# Child routes for specific routing
|
||||
routes:
|
||||
# Critical alerts - immediate notification
|
||||
- match:
|
||||
severity: critical
|
||||
receiver: 'critical-receiver'
|
||||
group_wait: 10s
|
||||
repeat_interval: 1h
|
||||
|
||||
# Warning alerts
|
||||
- match:
|
||||
severity: warning
|
||||
receiver: 'warning-receiver'
|
||||
group_wait: 1m
|
||||
repeat_interval: 4h
|
||||
|
||||
# Receivers configuration
|
||||
receivers:
|
||||
# Default receiver (logs to console)
|
||||
- name: 'default-receiver'
|
||||
# Webhook receiver for testing
|
||||
webhook_configs:
|
||||
- url: 'http://localhost:5000/api/webhooks/alerts'
|
||||
send_resolved: true
|
||||
|
||||
# Critical alerts receiver
|
||||
- name: 'critical-receiver'
|
||||
# Configure email notifications
|
||||
# email_configs:
|
||||
# - to: 'devops@royalenfield.com'
|
||||
# from: 'alerts@royalenfield.com'
|
||||
# smarthost: 'smtp.gmail.com:587'
|
||||
# auth_username: 'alerts@royalenfield.com'
|
||||
# auth_password: 'your-app-password'
|
||||
# send_resolved: true
|
||||
|
||||
# Slack notifications (uncomment and configure)
|
||||
# slack_configs:
|
||||
# - api_url: 'https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK'
|
||||
# channel: '#alerts-critical'
|
||||
# send_resolved: true
|
||||
# title: '{{ .Status | toUpper }}: {{ .CommonAnnotations.summary }}'
|
||||
# text: '{{ .CommonAnnotations.description }}'
|
||||
|
||||
webhook_configs:
|
||||
- url: 'http://host.docker.internal:5000/api/webhooks/alerts'
|
||||
send_resolved: true
|
||||
|
||||
# Warning alerts receiver
|
||||
- name: 'warning-receiver'
|
||||
webhook_configs:
|
||||
- url: 'http://host.docker.internal:5000/api/webhooks/alerts'
|
||||
send_resolved: true
|
||||
|
||||
# Inhibition rules - prevent duplicate alerts
|
||||
inhibit_rules:
|
||||
# If critical alert fires, inhibit warning alerts for same alertname
|
||||
- source_match:
|
||||
severity: 'critical'
|
||||
target_match:
|
||||
severity: 'warning'
|
||||
equal: ['alertname', 'service']
|
||||
|
||||
170
monitoring/docker-compose.monitoring.yml
Normal file
170
monitoring/docker-compose.monitoring.yml
Normal file
@ -0,0 +1,170 @@
|
||||
# =============================================================================
|
||||
# RE Workflow - Complete Monitoring Stack
|
||||
# Docker Compose for Grafana, Prometheus, Loki, and Promtail
|
||||
# =============================================================================
|
||||
# Usage:
|
||||
# cd monitoring
|
||||
# docker-compose -f docker-compose.monitoring.yml up -d
|
||||
# =============================================================================
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# ===========================================================================
|
||||
# PROMETHEUS - Metrics Collection
|
||||
# ===========================================================================
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.47.2
|
||||
container_name: re_prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- ./prometheus/alert.rules.yml:/etc/prometheus/alert.rules.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--storage.tsdb.retention.time=15d'
|
||||
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||
- '--web.enable-lifecycle'
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# ===========================================================================
|
||||
# LOKI - Log Aggregation
|
||||
# ===========================================================================
|
||||
loki:
|
||||
image: grafana/loki:2.9.2
|
||||
container_name: re_loki
|
||||
ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- ./loki/loki-config.yml:/etc/loki/local-config.yaml:ro
|
||||
- loki_data:/loki
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
|
||||
# ===========================================================================
|
||||
# PROMTAIL - Log Shipping Agent
|
||||
# ===========================================================================
|
||||
promtail:
|
||||
image: grafana/promtail:2.9.2
|
||||
container_name: re_promtail
|
||||
volumes:
|
||||
- ./promtail/promtail-config.yml:/etc/promtail/config.yml:ro
|
||||
- ../logs:/var/log/app:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- promtail_data:/tmp/promtail
|
||||
command: -config.file=/etc/promtail/config.yml
|
||||
depends_on:
|
||||
- loki
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
|
||||
# ===========================================================================
|
||||
# GRAFANA - Visualization & Dashboards
|
||||
# ===========================================================================
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.2
|
||||
container_name: re_grafana
|
||||
ports:
|
||||
- "3001:3000" # Using 3001 to avoid conflict with React frontend (3000)
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=REWorkflow@2024
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
- GF_FEATURE_TOGGLES_ENABLE=publicDashboards
|
||||
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||
- ./grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
depends_on:
|
||||
- prometheus
|
||||
- loki
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# ===========================================================================
|
||||
# NODE EXPORTER - Host Metrics (Optional but recommended)
|
||||
# ===========================================================================
|
||||
node-exporter:
|
||||
image: prom/node-exporter:v1.6.1
|
||||
container_name: re_node_exporter
|
||||
ports:
|
||||
- "9100:9100"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
command:
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
|
||||
# ===========================================================================
|
||||
# ALERTMANAGER - Alert Notifications (Optional)
|
||||
# ===========================================================================
|
||||
alertmanager:
|
||||
image: prom/alertmanager:v0.26.0
|
||||
container_name: re_alertmanager
|
||||
ports:
|
||||
- "9093:9093"
|
||||
volumes:
|
||||
- ./alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||||
- alertmanager_data:/alertmanager
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||
- '--storage.path=/alertmanager'
|
||||
networks:
|
||||
- monitoring_network
|
||||
restart: unless-stopped
|
||||
|
||||
# ===========================================================================
|
||||
# NETWORKS
|
||||
# ===========================================================================
|
||||
networks:
|
||||
monitoring_network:
|
||||
driver: bridge
|
||||
name: re_monitoring_network
|
||||
|
||||
# ===========================================================================
|
||||
# VOLUMES
|
||||
# ===========================================================================
|
||||
volumes:
|
||||
prometheus_data:
|
||||
name: re_prometheus_data
|
||||
loki_data:
|
||||
name: re_loki_data
|
||||
promtail_data:
|
||||
name: re_promtail_data
|
||||
grafana_data:
|
||||
name: re_grafana_data
|
||||
alertmanager_data:
|
||||
name: re_alertmanager_data
|
||||
|
||||
651
monitoring/grafana/dashboards/re-workflow-overview.json
Normal file
651
monitoring/grafana/dashboards/re-workflow-overview.json
Normal file
@ -0,0 +1,651 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
|
||||
"id": 100,
|
||||
"panels": [],
|
||||
"title": "📊 API Overview",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 100 },
|
||||
{ "color": "red", "value": 500 }
|
||||
]
|
||||
},
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 1 },
|
||||
"id": 1,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "10.2.2",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m]))",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Request Rate",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 0.01 },
|
||||
{ "color": "red", "value": 0.05 }
|
||||
]
|
||||
},
|
||||
"unit": "percentunit"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 1 },
|
||||
"id": 2,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(http_request_errors_total{job=\"re-workflow-backend\"}[5m])) / sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m]))",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Error Rate",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 0.5 },
|
||||
{ "color": "red", "value": 1 }
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 1 },
|
||||
"id": 3,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "P95 Latency",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "red", "value": null },
|
||||
{ "color": "green", "value": 1 }
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 6, "x": 18, "y": 1 },
|
||||
"id": 4,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "none",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "up{job=\"re-workflow-backend\"}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "API Status",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 5 },
|
||||
"id": 5,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m])) by (method)",
|
||||
"legendFormat": "{{method}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Request Rate by Method",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 5 },
|
||||
"id": 6,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||
"legendFormat": "P50",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||
"legendFormat": "P90",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||
"legendFormat": "P99",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "Response Time Percentiles",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 13 },
|
||||
"id": 101,
|
||||
"panels": [],
|
||||
"title": "📝 Logs",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "loki", "uid": "loki" },
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 14 },
|
||||
"id": 7,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "10.2.2",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "count_over_time({app=\"re-workflow\"} | json | level=\"error\" [$__range])",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors (Time Range)",
|
||||
"type": "stat",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 10 },
|
||||
{ "color": "red", "value": 50 }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "loki", "uid": "loki" },
|
||||
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 14 },
|
||||
"id": 8,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "10.2.2",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "count_over_time({app=\"re-workflow\"} | json | level=\"warn\" [$__range])",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Warnings (Time Range)",
|
||||
"type": "stat",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 50 },
|
||||
{ "color": "orange", "value": 200 }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "loki", "uid": "loki" },
|
||||
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 14 },
|
||||
"id": 9,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "10.2.2",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "count_over_time({app=\"re-workflow\"} | json | tatEvent=\"breached\" [$__range])",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "TAT Breaches (Time Range)",
|
||||
"type": "stat",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 5 },
|
||||
{ "color": "red", "value": 20 }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "loki", "uid": "loki" },
|
||||
"gridPos": { "h": 4, "w": 6, "x": 18, "y": 14 },
|
||||
"id": 10,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "10.2.2",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "count_over_time({app=\"re-workflow\"} | json | authEvent=\"auth_failure\" [$__range])",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Auth Failures (Time Range)",
|
||||
"type": "stat",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 10 },
|
||||
{ "color": "red", "value": 50 }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "loki", "uid": "loki" },
|
||||
"gridPos": { "h": 10, "w": 24, "x": 0, "y": 18 },
|
||||
"id": 11,
|
||||
"options": {
|
||||
"dedupStrategy": "none",
|
||||
"enableLogDetails": true,
|
||||
"prettifyLogMessage": false,
|
||||
"showCommonLabels": false,
|
||||
"showLabels": false,
|
||||
"showTime": true,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{app=\"re-workflow\"} | json | level=~\"error|warn\"",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Recent Errors & Warnings",
|
||||
"type": "logs"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 28 },
|
||||
"id": 102,
|
||||
"panels": [],
|
||||
"title": "💻 Node.js Runtime",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "bytes"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 29 },
|
||||
"id": 12,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "process_resident_memory_bytes{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "RSS Memory",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "nodejs_heap_size_used_bytes{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "Heap Used",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "nodejs_heap_size_total_bytes{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "Heap Total",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "Memory Usage",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 29 },
|
||||
"id": 13,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "nodejs_eventloop_lag_seconds{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "Event Loop Lag",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Event Loop Lag",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "short"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 37 },
|
||||
"id": 14,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "nodejs_active_handles_total{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "Active Handles",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "nodejs_active_requests_total{job=\"re-workflow-backend\"}",
|
||||
"legendFormat": "Active Requests",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Active Handles & Requests",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 20,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||
"lineInterpolation": "smooth",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": { "type": "linear" },
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": { "group": "A", "mode": "none" },
|
||||
"thresholdsStyle": { "mode": "off" }
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||
"unit": "percentunit"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 37 },
|
||||
"id": 15,
|
||||
"options": {
|
||||
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(process_cpu_seconds_total{job=\"re-workflow-backend\"}[5m])",
|
||||
"legendFormat": "CPU Usage",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "CPU Usage",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "30s",
|
||||
"schemaVersion": 38,
|
||||
"style": "dark",
|
||||
"tags": ["re-workflow", "backend", "monitoring"],
|
||||
"templating": { "list": [] },
|
||||
"time": { "from": "now-1h", "to": "now" },
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "RE Workflow - Overview",
|
||||
"uid": "re-workflow-overview",
|
||||
"version": 1
|
||||
}
|
||||
|
||||
19
monitoring/grafana/provisioning/dashboards/dashboards.yml
Normal file
19
monitoring/grafana/provisioning/dashboards/dashboards.yml
Normal file
@ -0,0 +1,19 @@
|
||||
# =============================================================================
|
||||
# Grafana Dashboards Provisioning
|
||||
# Auto-loads dashboards from JSON files
|
||||
# =============================================================================
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'RE Workflow Dashboards'
|
||||
orgId: 1
|
||||
folder: 'RE Workflow'
|
||||
folderUid: 're-workflow'
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
|
||||
43
monitoring/grafana/provisioning/datasources/datasources.yml
Normal file
43
monitoring/grafana/provisioning/datasources/datasources.yml
Normal file
@ -0,0 +1,43 @@
|
||||
# =============================================================================
|
||||
# Grafana Datasources Provisioning
|
||||
# Auto-configures Prometheus and Loki as data sources
|
||||
# =============================================================================
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
# Prometheus - Metrics
|
||||
- name: Prometheus
|
||||
uid: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
editable: false
|
||||
jsonData:
|
||||
httpMethod: POST
|
||||
manageAlerts: true
|
||||
prometheusType: Prometheus
|
||||
prometheusVersion: 2.47.2
|
||||
|
||||
# Loki - Logs
|
||||
- name: Loki
|
||||
uid: loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://loki:3100
|
||||
editable: false
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
timeout: 60
|
||||
|
||||
# Alertmanager
|
||||
- name: Alertmanager
|
||||
uid: alertmanager
|
||||
type: alertmanager
|
||||
access: proxy
|
||||
url: http://alertmanager:9093
|
||||
editable: false
|
||||
jsonData:
|
||||
implementation: prometheus
|
||||
|
||||
79
monitoring/loki/loki-config.yml
Normal file
79
monitoring/loki/loki-config.yml
Normal file
@ -0,0 +1,79 @@
|
||||
# =============================================================================
|
||||
# Loki Configuration for RE Workflow
|
||||
# =============================================================================
|
||||
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
log_level: info
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
# Query range settings
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
# Schema configuration
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
# Ingestion limits
|
||||
limits_config:
|
||||
retention_period: 15d # Keep logs for 15 days
|
||||
ingestion_rate_mb: 10 # 10MB/s ingestion rate
|
||||
ingestion_burst_size_mb: 20 # 20MB burst
|
||||
max_streams_per_user: 10000 # Max number of streams
|
||||
max_line_size: 256kb # Max log line size
|
||||
max_entries_limit_per_query: 5000 # Max entries per query
|
||||
max_query_length: 721h # Max query time range (30 days)
|
||||
|
||||
# Compactor for retention
|
||||
compactor:
|
||||
working_directory: /loki/compactor
|
||||
retention_enabled: true
|
||||
retention_delete_delay: 2h
|
||||
delete_request_store: filesystem
|
||||
compaction_interval: 10m
|
||||
|
||||
# Ruler configuration (for alerting)
|
||||
ruler:
|
||||
alertmanager_url: http://alertmanager:9093
|
||||
storage:
|
||||
type: local
|
||||
local:
|
||||
directory: /loki/rules
|
||||
rule_path: /loki/rules-temp
|
||||
enable_api: true
|
||||
|
||||
# Table manager (for index retention)
|
||||
table_manager:
|
||||
retention_deletes_enabled: true
|
||||
retention_period: 360h # 15 days
|
||||
|
||||
# Analytics (optional - disable for privacy)
|
||||
analytics:
|
||||
reporting_enabled: false
|
||||
|
||||
150
monitoring/prometheus/alert.rules.yml
Normal file
150
monitoring/prometheus/alert.rules.yml
Normal file
@ -0,0 +1,150 @@
|
||||
# =============================================================================
|
||||
# Prometheus Alert Rules for RE Workflow
|
||||
# =============================================================================
|
||||
|
||||
groups:
|
||||
# ===========================================================================
|
||||
# Backend API Alerts
|
||||
# ===========================================================================
|
||||
- name: re-workflow-backend
|
||||
interval: 30s
|
||||
rules:
|
||||
# High Error Rate
|
||||
- alert: HighErrorRate
|
||||
expr: rate(http_request_errors_total{job="re-workflow-backend"}[5m]) > 0.1
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
service: backend
|
||||
annotations:
|
||||
summary: "High error rate detected in RE Workflow Backend"
|
||||
description: "Error rate is {{ $value | printf \"%.2f\" }} errors/sec for the last 5 minutes."
|
||||
|
||||
# High Request Latency
|
||||
- alert: HighRequestLatency
|
||||
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket{job="re-workflow-backend"}[5m])) > 2
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
service: backend
|
||||
annotations:
|
||||
summary: "High API latency detected"
|
||||
description: "95th percentile latency is {{ $value | printf \"%.2f\" }}s"
|
||||
|
||||
# API Down
|
||||
- alert: BackendDown
|
||||
expr: up{job="re-workflow-backend"} == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
service: backend
|
||||
annotations:
|
||||
summary: "RE Workflow Backend is DOWN"
|
||||
description: "Backend API has been unreachable for more than 1 minute."
|
||||
|
||||
# High Memory Usage
|
||||
- alert: HighMemoryUsage
|
||||
expr: process_resident_memory_bytes{job="re-workflow-backend"} / 1024 / 1024 > 500
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
service: backend
|
||||
annotations:
|
||||
summary: "High memory usage in Backend"
|
||||
description: "Memory usage is {{ $value | printf \"%.0f\" }}MB"
|
||||
|
||||
# Event Loop Lag
|
||||
- alert: HighEventLoopLag
|
||||
expr: nodejs_eventloop_lag_seconds{job="re-workflow-backend"} > 0.5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
service: backend
|
||||
annotations:
|
||||
summary: "High Node.js event loop lag"
|
||||
description: "Event loop lag is {{ $value | printf \"%.3f\" }}s"
|
||||
|
||||
# ===========================================================================
|
||||
# TAT/Workflow Alerts
|
||||
# ===========================================================================
|
||||
- name: re-workflow-tat
|
||||
interval: 1m
|
||||
rules:
|
||||
# TAT Breach Rate
|
||||
- alert: HighTATBreachRate
|
||||
expr: increase(tat_breaches_total[1h]) > 10
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
service: workflow
|
||||
annotations:
|
||||
summary: "High TAT breach rate detected"
|
||||
description: "{{ $value | printf \"%.0f\" }} TAT breaches in the last hour"
|
||||
|
||||
# Pending Workflows Queue
|
||||
- alert: LargePendingQueue
|
||||
expr: pending_workflows_count > 100
|
||||
for: 30m
|
||||
labels:
|
||||
severity: warning
|
||||
service: workflow
|
||||
annotations:
|
||||
summary: "Large number of pending workflows"
|
||||
description: "{{ $value | printf \"%.0f\" }} workflows pending approval"
|
||||
|
||||
# ===========================================================================
|
||||
# Infrastructure Alerts
|
||||
# ===========================================================================
|
||||
- name: infrastructure
|
||||
interval: 30s
|
||||
rules:
|
||||
# High CPU Usage (Node Exporter)
|
||||
- alert: HighCPUUsage
|
||||
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
service: infrastructure
|
||||
annotations:
|
||||
summary: "High CPU usage on {{ $labels.instance }}"
|
||||
description: "CPU usage is {{ $value | printf \"%.1f\" }}%"
|
||||
|
||||
# High Disk Usage
|
||||
- alert: HighDiskUsage
|
||||
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes * 100 > 85
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
service: infrastructure
|
||||
annotations:
|
||||
summary: "High disk usage on {{ $labels.instance }}"
|
||||
description: "Disk usage is {{ $value | printf \"%.1f\" }}%"
|
||||
|
||||
# Low Memory
|
||||
- alert: LowMemory
|
||||
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 < 15
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
service: infrastructure
|
||||
annotations:
|
||||
summary: "Low memory on {{ $labels.instance }}"
|
||||
description: "Available memory is {{ $value | printf \"%.1f\" }}%"
|
||||
|
||||
# ===========================================================================
|
||||
# Loki/Logging Alerts
|
||||
# ===========================================================================
|
||||
- name: logging
|
||||
interval: 1m
|
||||
rules:
|
||||
# Loki Down
|
||||
- alert: LokiDown
|
||||
expr: up{job="loki"} == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: critical
|
||||
service: loki
|
||||
annotations:
|
||||
summary: "Loki is DOWN"
|
||||
description: "Loki has been unreachable for more than 2 minutes."
|
||||
|
||||
61
monitoring/prometheus/prometheus-docker.yml
Normal file
61
monitoring/prometheus/prometheus-docker.yml
Normal file
@ -0,0 +1,61 @@
|
||||
# =============================================================================
|
||||
# Prometheus Configuration for RE Workflow (Full Docker Stack)
|
||||
# Use this when running docker-compose.full.yml
|
||||
# =============================================================================
|
||||
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
monitor: 're-workflow-monitor'
|
||||
environment: 'docker'
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
rule_files:
|
||||
- /etc/prometheus/alert.rules.yml
|
||||
|
||||
scrape_configs:
|
||||
# Prometheus Self-Monitoring
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
labels:
|
||||
service: 'prometheus'
|
||||
|
||||
# RE Workflow Backend (running in Docker)
|
||||
- job_name: 're-workflow-backend'
|
||||
static_configs:
|
||||
- targets: ['re_workflow_backend:5000']
|
||||
labels:
|
||||
service: 'backend'
|
||||
environment: 'docker'
|
||||
metrics_path: /metrics
|
||||
scrape_interval: 10s
|
||||
scrape_timeout: 5s
|
||||
|
||||
# Node Exporter
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
labels:
|
||||
service: 'node-exporter'
|
||||
|
||||
# Loki
|
||||
- job_name: 'loki'
|
||||
static_configs:
|
||||
- targets: ['loki:3100']
|
||||
labels:
|
||||
service: 'loki'
|
||||
|
||||
# Grafana
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['grafana:3000']
|
||||
labels:
|
||||
service: 'grafana'
|
||||
|
||||
100
monitoring/prometheus/prometheus.yml
Normal file
100
monitoring/prometheus/prometheus.yml
Normal file
@ -0,0 +1,100 @@
|
||||
# =============================================================================
|
||||
# Prometheus Configuration for RE Workflow
|
||||
# =============================================================================
|
||||
|
||||
global:
|
||||
scrape_interval: 15s # How frequently to scrape targets
|
||||
evaluation_interval: 15s # How frequently to evaluate rules
|
||||
external_labels:
|
||||
monitor: 're-workflow-monitor'
|
||||
environment: 'development'
|
||||
|
||||
# Alerting configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Rule files
|
||||
rule_files:
|
||||
- /etc/prometheus/alert.rules.yml
|
||||
|
||||
# Scrape configurations
|
||||
scrape_configs:
|
||||
# ============================================
|
||||
# Prometheus Self-Monitoring
|
||||
# ============================================
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
labels:
|
||||
service: 'prometheus'
|
||||
|
||||
# ============================================
|
||||
# RE Workflow Backend API Metrics
|
||||
# ============================================
|
||||
- job_name: 're-workflow-backend'
|
||||
static_configs:
|
||||
# Option 1: Backend running locally (outside Docker monitoring stack)
|
||||
- targets: ['host.docker.internal:5000']
|
||||
labels:
|
||||
service: 'backend'
|
||||
environment: 'development'
|
||||
deployment: 'local'
|
||||
# Option 2: Backend running in Docker (docker-compose.full.yml)
|
||||
# Uncomment below and comment above when using full stack
|
||||
# - targets: ['re_workflow_backend:5000']
|
||||
# labels:
|
||||
# service: 'backend'
|
||||
# environment: 'development'
|
||||
# deployment: 'docker'
|
||||
metrics_path: /metrics
|
||||
scrape_interval: 10s
|
||||
scrape_timeout: 5s
|
||||
|
||||
# ============================================
|
||||
# Node Exporter - Host Metrics
|
||||
# ============================================
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
labels:
|
||||
service: 'node-exporter'
|
||||
|
||||
# ============================================
|
||||
# PostgreSQL Metrics (if using pg_exporter)
|
||||
# ============================================
|
||||
# - job_name: 'postgres'
|
||||
# static_configs:
|
||||
# - targets: ['postgres-exporter:9187']
|
||||
# labels:
|
||||
# service: 'postgresql'
|
||||
|
||||
# ============================================
|
||||
# Redis Metrics (if using redis_exporter)
|
||||
# ============================================
|
||||
# - job_name: 'redis'
|
||||
# static_configs:
|
||||
# - targets: ['redis-exporter:9121']
|
||||
# labels:
|
||||
# service: 'redis'
|
||||
|
||||
# ============================================
|
||||
# Loki Metrics
|
||||
# ============================================
|
||||
- job_name: 'loki'
|
||||
static_configs:
|
||||
- targets: ['loki:3100']
|
||||
labels:
|
||||
service: 'loki'
|
||||
|
||||
# ============================================
|
||||
# Grafana Metrics
|
||||
# ============================================
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['grafana:3000']
|
||||
labels:
|
||||
service: 'grafana'
|
||||
|
||||
129
monitoring/promtail/promtail-config.yml
Normal file
129
monitoring/promtail/promtail-config.yml
Normal file
@ -0,0 +1,129 @@
|
||||
# =============================================================================
|
||||
# Promtail Configuration for RE Workflow
|
||||
# Ships logs from application log files to Loki
|
||||
# =============================================================================
|
||||
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
# Positions file (tracks what's been read)
|
||||
positions:
|
||||
filename: /tmp/promtail/positions.yaml
|
||||
|
||||
# Loki client configuration
|
||||
clients:
|
||||
- url: http://loki:3100/loki/api/v1/push
|
||||
batchwait: 1s
|
||||
batchsize: 1048576 # 1MB
|
||||
timeout: 10s
|
||||
|
||||
# Scrape configurations
|
||||
scrape_configs:
|
||||
# ============================================
|
||||
# RE Workflow Backend Application Logs
|
||||
# ============================================
|
||||
- job_name: re-workflow-app
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: re-workflow
|
||||
app: re-workflow
|
||||
service: backend
|
||||
__path__: /var/log/app/*.log
|
||||
|
||||
pipeline_stages:
|
||||
# Parse JSON logs
|
||||
- json:
|
||||
expressions:
|
||||
level: level
|
||||
message: message
|
||||
timestamp: timestamp
|
||||
requestId: requestId
|
||||
userId: userId
|
||||
method: method
|
||||
url: url
|
||||
statusCode: statusCode
|
||||
duration: duration
|
||||
workflowEvent: workflowEvent
|
||||
tatEvent: tatEvent
|
||||
authEvent: authEvent
|
||||
error: error
|
||||
|
||||
# Set log level as label
|
||||
- labels:
|
||||
level:
|
||||
requestId:
|
||||
workflowEvent:
|
||||
tatEvent:
|
||||
authEvent:
|
||||
|
||||
# Timestamp parsing
|
||||
- timestamp:
|
||||
source: timestamp
|
||||
format: "2006-01-02 15:04:05"
|
||||
fallback_formats:
|
||||
- RFC3339
|
||||
|
||||
# Output stage
|
||||
- output:
|
||||
source: message
|
||||
|
||||
# ============================================
|
||||
# Docker Container Logs (if running in Docker)
|
||||
# ============================================
|
||||
- job_name: docker-containers
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: docker
|
||||
__path__: /var/lib/docker/containers/*/*-json.log
|
||||
|
||||
pipeline_stages:
|
||||
# Parse Docker JSON format
|
||||
- json:
|
||||
expressions:
|
||||
output: log
|
||||
stream: stream
|
||||
time: time
|
||||
|
||||
# Extract container info from path
|
||||
- regex:
|
||||
source: filename
|
||||
expression: '/var/lib/docker/containers/(?P<container_id>[a-f0-9]+)/.*'
|
||||
|
||||
# Add labels
|
||||
- labels:
|
||||
stream:
|
||||
container_id:
|
||||
|
||||
# Parse application JSON from log field
|
||||
- json:
|
||||
source: output
|
||||
expressions:
|
||||
level: level
|
||||
message: message
|
||||
service: service
|
||||
|
||||
# Add level as label if present
|
||||
- labels:
|
||||
level:
|
||||
service:
|
||||
|
||||
# Output the log message
|
||||
- output:
|
||||
source: output
|
||||
|
||||
# ============================================
|
||||
# System Logs (optional - for infrastructure monitoring)
|
||||
# ============================================
|
||||
# - job_name: system
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - localhost
|
||||
# labels:
|
||||
# job: system
|
||||
# __path__: /var/log/syslog
|
||||
|
||||
68
monitoring/start-monitoring.bat
Normal file
68
monitoring/start-monitoring.bat
Normal file
@ -0,0 +1,68 @@
|
||||
@echo off
|
||||
echo ============================================================
|
||||
echo RE Workflow Monitoring Stack - Startup Script
|
||||
echo ============================================================
|
||||
echo.
|
||||
|
||||
:: Check if Docker is running
|
||||
docker info >nul 2>&1
|
||||
if errorlevel 1 (
|
||||
echo [ERROR] Docker is not running. Please start Docker Desktop first.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo [INFO] Docker is running.
|
||||
echo.
|
||||
|
||||
:: Navigate to monitoring directory
|
||||
cd /d "%~dp0"
|
||||
echo [INFO] Working directory: %cd%
|
||||
echo.
|
||||
|
||||
:: Start monitoring stack
|
||||
echo [INFO] Starting monitoring stack...
|
||||
echo.
|
||||
docker-compose -f docker-compose.monitoring.yml up -d
|
||||
|
||||
if errorlevel 1 (
|
||||
echo.
|
||||
echo [ERROR] Failed to start monitoring stack.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo.
|
||||
echo ============================================================
|
||||
echo Monitoring Stack Started Successfully!
|
||||
echo ============================================================
|
||||
echo.
|
||||
echo Services:
|
||||
echo ---------------------------------------------------------
|
||||
echo Grafana: http://localhost:3001
|
||||
echo Username: admin
|
||||
echo Password: REWorkflow@2024
|
||||
echo.
|
||||
echo Prometheus: http://localhost:9090
|
||||
echo.
|
||||
echo Loki: http://localhost:3100
|
||||
echo.
|
||||
echo Alertmanager: http://localhost:9093
|
||||
echo ---------------------------------------------------------
|
||||
echo.
|
||||
echo Next Steps:
|
||||
echo 1. Add LOKI_HOST=http://localhost:3100 to your .env file
|
||||
echo 2. Restart your backend application
|
||||
echo 3. Open Grafana at http://localhost:3001
|
||||
echo 4. Navigate to Dashboards ^> RE Workflow
|
||||
echo.
|
||||
echo ============================================================
|
||||
|
||||
:: Show container status
|
||||
echo.
|
||||
echo [INFO] Container Status:
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||
echo.
|
||||
|
||||
pause
|
||||
|
||||
36
monitoring/stop-monitoring.bat
Normal file
36
monitoring/stop-monitoring.bat
Normal file
@ -0,0 +1,36 @@
|
||||
@echo off
|
||||
echo ============================================================
|
||||
echo RE Workflow Monitoring Stack - Shutdown Script
|
||||
echo ============================================================
|
||||
echo.
|
||||
|
||||
:: Navigate to monitoring directory
|
||||
cd /d "%~dp0"
|
||||
|
||||
echo [INFO] Stopping monitoring stack...
|
||||
echo.
|
||||
|
||||
docker-compose -f docker-compose.monitoring.yml down
|
||||
|
||||
if errorlevel 1 (
|
||||
echo.
|
||||
echo [ERROR] Failed to stop monitoring stack.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo.
|
||||
echo ============================================================
|
||||
echo Monitoring Stack Stopped Successfully!
|
||||
echo ============================================================
|
||||
echo.
|
||||
echo Note: Data volumes are preserved. Use the following
|
||||
echo command to remove all data:
|
||||
echo.
|
||||
echo docker-compose -f docker-compose.monitoring.yml down -v
|
||||
echo.
|
||||
echo ============================================================
|
||||
echo.
|
||||
|
||||
pause
|
||||
|
||||
551
package-lock.json
generated
551
package-lock.json
generated
@ -32,11 +32,13 @@
|
||||
"passport-jwt": "^4.0.1",
|
||||
"pg": "^8.13.1",
|
||||
"pg-hstore": "^2.3.4",
|
||||
"prom-client": "^15.1.3",
|
||||
"sequelize": "^6.37.5",
|
||||
"socket.io": "^4.8.1",
|
||||
"uuid": "^8.3.2",
|
||||
"web-push": "^3.6.7",
|
||||
"winston": "^3.17.0",
|
||||
"winston-loki": "^6.1.3",
|
||||
"zod": "^3.24.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@ -663,6 +665,37 @@
|
||||
"kuler": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@emnapi/core": {
|
||||
"version": "1.7.1",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.7.1.tgz",
|
||||
"integrity": "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/wasi-threads": "1.1.0",
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@emnapi/runtime": {
|
||||
"version": "1.7.1",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.7.1.tgz",
|
||||
"integrity": "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@emnapi/wasi-threads": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz",
|
||||
"integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@eslint-community/eslint-utils": {
|
||||
"version": "4.9.0",
|
||||
"resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
|
||||
@ -1616,6 +1649,306 @@
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-android-arm-eabi": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-android-arm-eabi/-/snappy-android-arm-eabi-7.3.3.tgz",
|
||||
"integrity": "sha512-d4vUFFzNBvazGfB/KU8MnEax6itTIgRWXodPdZDnWKHy9HwVBndpCiedQDcSNHcZNYV36rx034rpn7SAuTL2NA==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"android"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-android-arm64": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-android-arm64/-/snappy-android-arm64-7.3.3.tgz",
|
||||
"integrity": "sha512-Uh+w18dhzjVl85MGhRnojb7OLlX2ErvMsYIunO/7l3Frvc2zQvfqsWsFJanu2dwqlE2YDooeNP84S+ywgN9sxg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"android"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-darwin-arm64": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-darwin-arm64/-/snappy-darwin-arm64-7.3.3.tgz",
|
||||
"integrity": "sha512-AmJn+6yOu/0V0YNHLKmRUNYkn93iv/1wtPayC7O1OHtfY6YqHQ31/MVeeRBiEYtQW9TwVZxXrDirxSB1PxRdtw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-darwin-x64": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-darwin-x64/-/snappy-darwin-x64-7.3.3.tgz",
|
||||
"integrity": "sha512-biLTXBmPjPmO7HIpv+5BaV9Gy/4+QJSUNJW8Pjx1UlWAVnocPy7um+zbvAWStZssTI5sfn/jOClrAegD4w09UA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-freebsd-x64": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-freebsd-x64/-/snappy-freebsd-x64-7.3.3.tgz",
|
||||
"integrity": "sha512-E3R3ewm8Mrjm0yL2TC3VgnphDsQaCPixNJqBbGiz3NTshVDhlPlOgPKF0NGYqKiKaDGdD9PKtUgOR4vagUtn7g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"freebsd"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-arm-gnueabihf": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm-gnueabihf/-/snappy-linux-arm-gnueabihf-7.3.3.tgz",
|
||||
"integrity": "sha512-ZuNgtmk9j0KyT7TfLyEnvZJxOhbkyNR761nk04F0Q4NTHMICP28wQj0xgEsnCHUsEeA9OXrRL4R7waiLn+rOQA==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-arm64-gnu": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm64-gnu/-/snappy-linux-arm64-gnu-7.3.3.tgz",
|
||||
"integrity": "sha512-KIzwtq0dAzshzpqZWjg0Q9lUx93iZN7wCCUzCdLYIQ+mvJZKM10VCdn0RcuQze1R3UJTPwpPLXQIVskNMBYyPA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-arm64-musl": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm64-musl/-/snappy-linux-arm64-musl-7.3.3.tgz",
|
||||
"integrity": "sha512-AAED4cQS74xPvktsyVmz5sy8vSxG/+3d7Rq2FDBZzj3Fv6v5vux6uZnECPCAqpALCdTtJ61unqpOyqO7hZCt1Q==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-ppc64-gnu": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-ppc64-gnu/-/snappy-linux-ppc64-gnu-7.3.3.tgz",
|
||||
"integrity": "sha512-pofO5eSLg8ZTBwVae4WHHwJxJGZI8NEb4r5Mppvq12J/1/Hq1HecClXmfY3A7bdT2fsS2Td+Q7CI9VdBOj2sbA==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-riscv64-gnu": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-riscv64-gnu/-/snappy-linux-riscv64-gnu-7.3.3.tgz",
|
||||
"integrity": "sha512-OiHYdeuwj0TVBXADUmmQDQ4lL1TB+8EwmXnFgOutoDVXHaUl0CJFyXLa6tYUXe+gRY8hs1v7eb0vyE97LKY06Q==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-s390x-gnu": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-s390x-gnu/-/snappy-linux-s390x-gnu-7.3.3.tgz",
|
||||
"integrity": "sha512-66QdmuV9CTq/S/xifZXlMy3PsZTviAgkqqpZ+7vPCmLtuP+nqhaeupShOFf/sIDsS0gZePazPosPTeTBbhkLHg==",
|
||||
"cpu": [
|
||||
"s390x"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-x64-gnu": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-x64-gnu/-/snappy-linux-x64-gnu-7.3.3.tgz",
|
||||
"integrity": "sha512-g6KURjOxrgb8yXDEZMuIcHkUr/7TKlDwSiydEQtMtP3n4iI4sNjkcE/WNKlR3+t9bZh1pFGAq7NFRBtouQGHpQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-linux-x64-musl": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-x64-musl/-/snappy-linux-x64-musl-7.3.3.tgz",
|
||||
"integrity": "sha512-6UvOyczHknpaKjrlKKSlX3rwpOrfJwiMG6qA0NRKJFgbcCAEUxmN9A8JvW4inP46DKdQ0bekdOxwRtAhFiTDfg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-openharmony-arm64": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-openharmony-arm64/-/snappy-openharmony-arm64-7.3.3.tgz",
|
||||
"integrity": "sha512-I5mak/5rTprobf7wMCk0vFhClmWOL/QiIJM4XontysnadmP/R9hAcmuFmoMV2GaxC9MblqLA7Z++gy8ou5hJVw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openharmony"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-wasm32-wasi": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-wasm32-wasi/-/snappy-wasm32-wasi-7.3.3.tgz",
|
||||
"integrity": "sha512-+EroeygVYo9RksOchjF206frhMkfD2PaIun3yH4Zp5j/Y0oIEgs/+VhAYx/f+zHRylQYUIdLzDRclcoepvlR8Q==",
|
||||
"cpu": [
|
||||
"wasm32"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@napi-rs/wasm-runtime": "^1.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-win32-arm64-msvc": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-arm64-msvc/-/snappy-win32-arm64-msvc-7.3.3.tgz",
|
||||
"integrity": "sha512-rxqfntBsCfzgOha/OlG8ld2hs6YSMGhpMUbFjeQLyVDbooY041fRXv3S7yk52DfO6H4QQhLT5+p7cW0mYdhyiQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-win32-ia32-msvc": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-ia32-msvc/-/snappy-win32-ia32-msvc-7.3.3.tgz",
|
||||
"integrity": "sha512-joRV16DsRtqjGt0CdSpxGCkO0UlHGeTZ/GqvdscoALpRKbikR2Top4C61dxEchmOd3lSYsXutuwWWGg3Nr++WA==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/snappy-win32-x64-msvc": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-x64-msvc/-/snappy-win32-x64-msvc-7.3.3.tgz",
|
||||
"integrity": "sha512-cEnQwcsdJyOU7HSZODWsHpKuQoSYM4jaqw/hn9pOXYbRN1+02WxYppD3fdMuKN6TOA6YG5KA5PHRNeVilNX86Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/wasm-runtime": {
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.0.7.tgz",
|
||||
"integrity": "sha512-SeDnOO0Tk7Okiq6DbXmmBODgOAb9dp9gjlphokTUxmt8U3liIP1ZsozBahH69j/RJv+Rfs6IwUKHTgQYJ/HBAw==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/core": "^1.5.0",
|
||||
"@emnapi/runtime": "^1.5.0",
|
||||
"@tybys/wasm-util": "^0.10.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@noble/hashes": {
|
||||
"version": "1.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz",
|
||||
@ -1674,6 +2007,15 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@opentelemetry/api": {
|
||||
"version": "1.9.0",
|
||||
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
||||
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@paralleldrive/cuid2": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.3.1.tgz",
|
||||
@ -1695,6 +2037,70 @@
|
||||
"node": ">=14"
|
||||
}
|
||||
},
|
||||
"node_modules/@protobufjs/aspromise": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
|
||||
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/base64": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
|
||||
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/codegen": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
|
||||
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/eventemitter": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
|
||||
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/fetch": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
|
||||
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
|
||||
"license": "BSD-3-Clause",
|
||||
"dependencies": {
|
||||
"@protobufjs/aspromise": "^1.1.1",
|
||||
"@protobufjs/inquire": "^1.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@protobufjs/float": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
|
||||
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/inquire": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
|
||||
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/path": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
|
||||
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/pool": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
|
||||
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@protobufjs/utf8": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
|
||||
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/@sinclair/typebox": {
|
||||
"version": "0.27.8",
|
||||
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
||||
@ -1775,6 +2181,16 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@tybys/wasm-util": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz",
|
||||
"integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/babel__core": {
|
||||
"version": "7.20.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
||||
@ -2717,6 +3133,15 @@
|
||||
"integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/async-exit-hook": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz",
|
||||
"integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/async-retry": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.3.tgz",
|
||||
@ -2971,6 +3396,12 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/bintrees": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.2.tgz",
|
||||
"integrity": "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/bluebird": {
|
||||
"version": "3.7.2",
|
||||
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
|
||||
@ -3103,6 +3534,18 @@
|
||||
"node-int64": "^0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/btoa": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/btoa/-/btoa-1.2.1.tgz",
|
||||
"integrity": "sha512-SB4/MIGlsiVkMcHmT+pSmIPoNDoHg+7cMzmt3Uxt628MTz2487DKSqK/fuhFBrkuqrYv5UCEnACpF4dTFNKc/g==",
|
||||
"license": "(MIT OR Apache-2.0)",
|
||||
"bin": {
|
||||
"btoa": "bin/btoa.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/buffer-equal-constant-time": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
|
||||
@ -6688,6 +7131,12 @@
|
||||
"node": ">= 12.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/long": {
|
||||
"version": "5.3.2",
|
||||
"resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz",
|
||||
"integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==",
|
||||
"license": "Apache-2.0"
|
||||
},
|
||||
"node_modules/lru-cache": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
||||
@ -7876,6 +8325,19 @@
|
||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/prom-client": {
|
||||
"version": "15.1.3",
|
||||
"resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz",
|
||||
"integrity": "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g==",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@opentelemetry/api": "^1.4.0",
|
||||
"tdigest": "^0.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^16 || ^18 || >=20"
|
||||
}
|
||||
},
|
||||
"node_modules/prompts": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
|
||||
@ -7897,6 +8359,30 @@
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/protobufjs": {
|
||||
"version": "7.5.4",
|
||||
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz",
|
||||
"integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==",
|
||||
"hasInstallScript": true,
|
||||
"license": "BSD-3-Clause",
|
||||
"dependencies": {
|
||||
"@protobufjs/aspromise": "^1.1.2",
|
||||
"@protobufjs/base64": "^1.1.2",
|
||||
"@protobufjs/codegen": "^2.0.4",
|
||||
"@protobufjs/eventemitter": "^1.1.0",
|
||||
"@protobufjs/fetch": "^1.1.0",
|
||||
"@protobufjs/float": "^1.0.2",
|
||||
"@protobufjs/inquire": "^1.1.0",
|
||||
"@protobufjs/path": "^1.1.2",
|
||||
"@protobufjs/pool": "^1.1.0",
|
||||
"@protobufjs/utf8": "^1.1.0",
|
||||
"@types/node": ">=13.7.0",
|
||||
"long": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/proxy-addr": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
||||
@ -8632,6 +9118,40 @@
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/snappy": {
|
||||
"version": "7.3.3",
|
||||
"resolved": "https://registry.npmjs.org/snappy/-/snappy-7.3.3.tgz",
|
||||
"integrity": "sha512-UDJVCunvgblRpfTOjo/uT7pQzfrTsSICJ4yVS4aq7SsGBaUSpJwaVP15nF//jqinSLpN7boe/BqbUmtWMTQ5MQ==",
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"funding": {
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/Brooooooklyn"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@napi-rs/snappy-android-arm-eabi": "7.3.3",
|
||||
"@napi-rs/snappy-android-arm64": "7.3.3",
|
||||
"@napi-rs/snappy-darwin-arm64": "7.3.3",
|
||||
"@napi-rs/snappy-darwin-x64": "7.3.3",
|
||||
"@napi-rs/snappy-freebsd-x64": "7.3.3",
|
||||
"@napi-rs/snappy-linux-arm-gnueabihf": "7.3.3",
|
||||
"@napi-rs/snappy-linux-arm64-gnu": "7.3.3",
|
||||
"@napi-rs/snappy-linux-arm64-musl": "7.3.3",
|
||||
"@napi-rs/snappy-linux-ppc64-gnu": "7.3.3",
|
||||
"@napi-rs/snappy-linux-riscv64-gnu": "7.3.3",
|
||||
"@napi-rs/snappy-linux-s390x-gnu": "7.3.3",
|
||||
"@napi-rs/snappy-linux-x64-gnu": "7.3.3",
|
||||
"@napi-rs/snappy-linux-x64-musl": "7.3.3",
|
||||
"@napi-rs/snappy-openharmony-arm64": "7.3.3",
|
||||
"@napi-rs/snappy-wasm32-wasi": "7.3.3",
|
||||
"@napi-rs/snappy-win32-arm64-msvc": "7.3.3",
|
||||
"@napi-rs/snappy-win32-ia32-msvc": "7.3.3",
|
||||
"@napi-rs/snappy-win32-x64-msvc": "7.3.3"
|
||||
}
|
||||
},
|
||||
"node_modules/socket.io": {
|
||||
"version": "4.8.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz",
|
||||
@ -9058,6 +9578,15 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/tdigest": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.2.tgz",
|
||||
"integrity": "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"bintrees": "1.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/teeny-request": {
|
||||
"version": "9.0.0",
|
||||
"resolved": "https://registry.npmjs.org/teeny-request/-/teeny-request-9.0.0.tgz",
|
||||
@ -9675,6 +10204,12 @@
|
||||
"punycode": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/url-polyfill": {
|
||||
"version": "1.1.14",
|
||||
"resolved": "https://registry.npmjs.org/url-polyfill/-/url-polyfill-1.1.14.tgz",
|
||||
"integrity": "sha512-p4f3TTAG6ADVF3mwbXw7hGw+QJyw5CnNGvYh5fCuQQZIiuKUswqcznyV3pGDP9j0TSmC4UvRKm8kl1QsX1diiQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
@ -9822,6 +10357,22 @@
|
||||
"node": ">= 12.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/winston-loki": {
|
||||
"version": "6.1.3",
|
||||
"resolved": "https://registry.npmjs.org/winston-loki/-/winston-loki-6.1.3.tgz",
|
||||
"integrity": "sha512-DjWtJ230xHyYQWr9mZJa93yhwHttn3JEtSYWP8vXZWJOahiQheUhf+88dSIidbGXB3u0oLweV6G1vkL/ouT62Q==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"async-exit-hook": "2.0.1",
|
||||
"btoa": "^1.2.1",
|
||||
"protobufjs": "^7.2.4",
|
||||
"url-polyfill": "^1.1.12",
|
||||
"winston-transport": "^4.3.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"snappy": "^7.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/winston-transport": {
|
||||
"version": "4.9.0",
|
||||
"resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz",
|
||||
|
||||
@ -44,11 +44,13 @@
|
||||
"passport-jwt": "^4.0.1",
|
||||
"pg": "^8.13.1",
|
||||
"pg-hstore": "^2.3.4",
|
||||
"prom-client": "^15.1.3",
|
||||
"sequelize": "^6.37.5",
|
||||
"socket.io": "^4.8.1",
|
||||
"uuid": "^8.3.2",
|
||||
"web-push": "^3.6.7",
|
||||
"winston": "^3.17.0",
|
||||
"winston-loki": "^6.1.3",
|
||||
"zod": "^3.24.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@ -7,6 +7,7 @@ import { UserService } from './services/user.service';
|
||||
import { SSOUserData } from './types/auth.types';
|
||||
import { sequelize } from './config/database';
|
||||
import { corsMiddleware } from './middlewares/cors.middleware';
|
||||
import { metricsMiddleware, createMetricsRouter } from './middlewares/metrics.middleware';
|
||||
import routes from './routes/index';
|
||||
import { ensureUploadDir, UPLOAD_DIR } from './config/storage';
|
||||
import path from 'path';
|
||||
@ -101,6 +102,12 @@ app.use(express.urlencoded({ extended: true, limit: '10mb' }));
|
||||
// Logging middleware
|
||||
app.use(morgan('combined'));
|
||||
|
||||
// Prometheus metrics middleware - collect request metrics
|
||||
app.use(metricsMiddleware);
|
||||
|
||||
// Prometheus metrics endpoint - expose metrics for scraping
|
||||
app.use(createMetricsRouter());
|
||||
|
||||
// Health check endpoint (before API routes)
|
||||
app.get('/health', (_req: express.Request, res: express.Response) => {
|
||||
res.status(200).json({
|
||||
|
||||
@ -348,16 +348,18 @@ export class ConclusionController {
|
||||
logger.info(`[Conclusion] ✅ Request ${requestId} finalized and closed`);
|
||||
|
||||
// Automatically create summary when request is closed (idempotent - returns existing if already exists)
|
||||
// Since the initiator is finalizing, this should always succeed
|
||||
let summaryId = null;
|
||||
try {
|
||||
const { summaryService } = await import('@services/summary.service');
|
||||
const summary = await summaryService.createSummary(requestId, userId);
|
||||
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||
const summary = await summaryService.createSummary(requestId, userId, { userRole });
|
||||
summaryId = (summary as any).summaryId;
|
||||
logger.info(`[Conclusion] ✅ Summary ${summaryId} created automatically for closed request ${requestId}`);
|
||||
} catch (summaryError: any) {
|
||||
// Log error but don't fail the closure if summary creation fails
|
||||
// Frontend can retry summary creation if needed
|
||||
logger.error(`[Conclusion] Failed to create summary for request ${requestId}:`, summaryError);
|
||||
logger.error(`[Conclusion] Failed to create summary for request ${requestId}:`, summaryError.message);
|
||||
}
|
||||
|
||||
// Log activity
|
||||
|
||||
@ -8,6 +8,7 @@ import { activityService } from '@services/activity.service';
|
||||
import type { AuthenticatedRequest } from '../types/express';
|
||||
import { getRequestMetadata } from '@utils/requestUtils';
|
||||
import { getConfigNumber, getConfigValue } from '@services/configReader.service';
|
||||
import { logDocumentEvent, logWithContext } from '@utils/logger';
|
||||
|
||||
export class DocumentController {
|
||||
async upload(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||
@ -82,6 +83,16 @@ export class DocumentController {
|
||||
downloadCount: 0,
|
||||
} as any);
|
||||
|
||||
// Log document upload event
|
||||
logDocumentEvent('uploaded', doc.documentId, {
|
||||
requestId,
|
||||
userId,
|
||||
fileName: file.originalname,
|
||||
fileType: extension,
|
||||
fileSize: file.size,
|
||||
category,
|
||||
});
|
||||
|
||||
// Get user details for activity logging
|
||||
const user = await User.findByPk(userId);
|
||||
const uploaderName = (user as any)?.displayName || (user as any)?.email || 'User';
|
||||
@ -108,6 +119,11 @@ export class DocumentController {
|
||||
ResponseHandler.success(res, doc, 'File uploaded', 201);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
logWithContext('error', 'Document upload failed', {
|
||||
userId: req.user?.userId,
|
||||
requestId: req.body?.requestId,
|
||||
error,
|
||||
});
|
||||
ResponseHandler.error(res, 'Upload failed', 500, message);
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,7 +12,7 @@ const pauseWorkflowSchema = z.object({
|
||||
});
|
||||
|
||||
const resumeWorkflowSchema = z.object({
|
||||
// No body required for resume
|
||||
notes: z.string().max(1000, 'Notes must be less than 1000 characters').optional()
|
||||
});
|
||||
|
||||
export class PauseController {
|
||||
@ -72,13 +72,20 @@ export class PauseController {
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await pauseService.resumeWorkflow(id, userId);
|
||||
// Validate request body (notes is optional)
|
||||
const validated = resumeWorkflowSchema.parse(req.body || {});
|
||||
|
||||
const result = await pauseService.resumeWorkflow(id, userId, validated.notes);
|
||||
|
||||
ResponseHandler.success(res, {
|
||||
workflow: result.workflow,
|
||||
level: result.level
|
||||
}, 'Workflow resumed successfully', 200);
|
||||
} catch (error: any) {
|
||||
if (error instanceof z.ZodError) {
|
||||
ResponseHandler.error(res, 'Validation failed', 400, error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join('; '));
|
||||
return;
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
ResponseHandler.error(res, 'Failed to resume workflow', 400, errorMessage);
|
||||
}
|
||||
|
||||
@ -2,30 +2,69 @@ import { Request, Response } from 'express';
|
||||
import { summaryService } from '@services/summary.service';
|
||||
import { ResponseHandler } from '@utils/responseHandler';
|
||||
import type { AuthenticatedRequest } from '../types/express';
|
||||
import logger from '@utils/logger';
|
||||
|
||||
export class SummaryController {
|
||||
/**
|
||||
* Create a summary for a closed request
|
||||
* POST /api/v1/summaries
|
||||
*
|
||||
* Access: Initiator or Admin/Management users
|
||||
* Body: { requestId: string, regenerate?: boolean }
|
||||
*/
|
||||
async createSummary(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||
try {
|
||||
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||
const { requestId } = req.body;
|
||||
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||
const { requestId, regenerate } = req.body;
|
||||
|
||||
if (!requestId) {
|
||||
ResponseHandler.error(res, 'requestId is required', 400);
|
||||
return;
|
||||
}
|
||||
|
||||
const summary = await summaryService.createSummary(requestId, userId);
|
||||
ResponseHandler.success(res, summary, 'Summary created successfully', 201);
|
||||
const summary = await summaryService.createSummary(requestId, userId, {
|
||||
userRole,
|
||||
regenerate: regenerate === true
|
||||
});
|
||||
|
||||
const message = regenerate ? 'Summary regenerated successfully' : 'Summary created successfully';
|
||||
ResponseHandler.success(res, summary, message, 201);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
ResponseHandler.error(res, 'Failed to create summary', 400, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Regenerate summary for a closed request (deletes existing and creates new)
|
||||
* POST /api/v1/summaries/:requestId/regenerate
|
||||
*
|
||||
* Access: Initiator or Admin/Management users
|
||||
*/
|
||||
async regenerateSummary(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||
try {
|
||||
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||
const { requestId } = req.params;
|
||||
|
||||
if (!requestId) {
|
||||
ResponseHandler.error(res, 'requestId is required', 400);
|
||||
return;
|
||||
}
|
||||
|
||||
const summary = await summaryService.createSummary(requestId, userId, {
|
||||
userRole,
|
||||
regenerate: true
|
||||
});
|
||||
|
||||
ResponseHandler.success(res, summary, 'Summary regenerated successfully', 201);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
ResponseHandler.error(res, 'Failed to regenerate summary', 400, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get summary details
|
||||
* GET /api/v1/summaries/:summaryId
|
||||
@ -35,21 +74,27 @@ export class SummaryController {
|
||||
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||
const { summaryId } = req.params;
|
||||
|
||||
// Check if this is a sharedSummaryId (UUID format) - if it starts with a shared summary pattern, try that first
|
||||
// For now, we'll check if it's a shared summary by trying to get it
|
||||
// If it fails, fall back to regular summary lookup
|
||||
// The ID can be either a sharedSummaryId or a summaryId
|
||||
// Try shared summary first (for SharedSummaryDetail component)
|
||||
// If not found, try regular summary (for SummaryTab component)
|
||||
try {
|
||||
const summary = await summaryService.getSummaryDetailsBySharedId(summaryId, userId);
|
||||
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
||||
return;
|
||||
} catch (sharedError) {
|
||||
// If it's not a shared summary, try regular summary lookup
|
||||
} catch (sharedError: any) {
|
||||
// Only log error if it's not "not found" (other errors are real issues)
|
||||
if (!sharedError.message?.includes('not found')) {
|
||||
logger.error('[Summary] Error getting summary by shared ID:', sharedError);
|
||||
}
|
||||
|
||||
// Try as regular summaryId (the service checks for both initiator and shared access)
|
||||
const summary = await summaryService.getSummaryDetails(summaryId, userId);
|
||||
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
const statusCode = errorMessage.includes('not found') || errorMessage.includes('Access denied') ? 404 : 500;
|
||||
const statusCode = errorMessage.includes('not found') ? 404 :
|
||||
errorMessage.includes('Access denied') ? 403 : 500;
|
||||
ResponseHandler.error(res, 'Failed to get summary details', statusCode, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
@ -11,6 +11,8 @@ import fs from 'fs';
|
||||
import path from 'path';
|
||||
import crypto from 'crypto';
|
||||
import { getRequestMetadata } from '@utils/requestUtils';
|
||||
import { enrichApprovalLevels, enrichSpectators, validateInitiator } from '@services/userEnrichment.service';
|
||||
import logger from '@utils/logger';
|
||||
|
||||
const workflowService = new WorkflowService();
|
||||
|
||||
@ -18,11 +20,88 @@ export class WorkflowController {
|
||||
async createWorkflow(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||
try {
|
||||
const validatedData = validateCreateWorkflow(req.body);
|
||||
|
||||
// Validate initiator exists
|
||||
await validateInitiator(req.user.userId);
|
||||
|
||||
// Handle frontend format: map 'approvers' -> 'approvalLevels' for backward compatibility
|
||||
let approvalLevels = validatedData.approvalLevels || [];
|
||||
if (!approvalLevels.length && (req.body as any).approvers) {
|
||||
const approvers = (req.body as any).approvers || [];
|
||||
approvalLevels = approvers.map((a: any, index: number) => ({
|
||||
levelNumber: index + 1,
|
||||
email: a.email || a.approverEmail,
|
||||
tatHours: a.tatType === 'days' ? (a.tat || 0) * 24 : (a.tat || a.tatHours || 24),
|
||||
isFinalApprover: index === approvers.length - 1,
|
||||
}));
|
||||
}
|
||||
|
||||
// Normalize approval levels: map approverEmail -> email for backward compatibility
|
||||
const normalizedApprovalLevels = approvalLevels.map((level: any) => ({
|
||||
...level,
|
||||
email: level.email || level.approverEmail, // Support both formats
|
||||
}));
|
||||
|
||||
// Enrich approval levels with user data (auto-lookup from AD if not in DB)
|
||||
logger.info(`[WorkflowController] Enriching ${normalizedApprovalLevels.length} approval levels`);
|
||||
const enrichedApprovalLevels = await enrichApprovalLevels(normalizedApprovalLevels as any);
|
||||
|
||||
// Enrich spectators if provided
|
||||
// Normalize spectators: map userEmail -> email for backward compatibility
|
||||
// Filter participants to only include SPECTATOR type (exclude INITIATOR and APPROVER)
|
||||
const allParticipants = validatedData.spectators || validatedData.participants || [];
|
||||
const spectators = allParticipants.filter((p: any) =>
|
||||
!p.participantType || p.participantType === 'SPECTATOR'
|
||||
);
|
||||
const normalizedSpectators = spectators.map((spec: any) => ({
|
||||
...spec,
|
||||
email: spec.email || spec.userEmail, // Support both formats
|
||||
})).filter((spec: any) => spec.email); // Only include entries with email
|
||||
const enrichedSpectators = normalizedSpectators.length > 0
|
||||
? await enrichSpectators(normalizedSpectators as any)
|
||||
: [];
|
||||
|
||||
// Build complete participants array automatically
|
||||
// This includes: INITIATOR + all APPROVERs + all SPECTATORs
|
||||
const initiator = await User.findByPk(req.user.userId);
|
||||
const initiatorEmail = (initiator as any).email;
|
||||
const initiatorName = (initiator as any).displayName || (initiator as any).email;
|
||||
|
||||
const autoGeneratedParticipants = [
|
||||
// Add initiator
|
||||
{
|
||||
userId: req.user.userId,
|
||||
userEmail: initiatorEmail,
|
||||
userName: initiatorName,
|
||||
participantType: 'INITIATOR' as const,
|
||||
canComment: true,
|
||||
canViewDocuments: true,
|
||||
canDownloadDocuments: true,
|
||||
notificationEnabled: true,
|
||||
},
|
||||
// Add all approvers from approval levels
|
||||
...enrichedApprovalLevels.map((level: any) => ({
|
||||
userId: level.approverId,
|
||||
userEmail: level.approverEmail,
|
||||
userName: level.approverName,
|
||||
participantType: 'APPROVER' as const,
|
||||
canComment: true,
|
||||
canViewDocuments: true,
|
||||
canDownloadDocuments: true,
|
||||
notificationEnabled: true,
|
||||
})),
|
||||
// Add all spectators
|
||||
...enrichedSpectators,
|
||||
];
|
||||
|
||||
// Convert string literal priority to enum
|
||||
const workflowData = {
|
||||
...validatedData,
|
||||
priority: validatedData.priority as Priority
|
||||
priority: validatedData.priority as Priority,
|
||||
approvalLevels: enrichedApprovalLevels,
|
||||
participants: autoGeneratedParticipants,
|
||||
};
|
||||
|
||||
const requestMeta = getRequestMetadata(req);
|
||||
const workflow = await workflowService.createWorkflow(req.user.userId, workflowData, {
|
||||
ipAddress: requestMeta.ipAddress,
|
||||
@ -32,6 +111,7 @@ export class WorkflowController {
|
||||
ResponseHandler.success(res, workflow, 'Workflow created successfully', 201);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
logger.error('[WorkflowController] Failed to create workflow:', error);
|
||||
ResponseHandler.error(res, 'Failed to create workflow', 400, errorMessage);
|
||||
}
|
||||
}
|
||||
@ -59,6 +139,18 @@ export class WorkflowController {
|
||||
return;
|
||||
}
|
||||
|
||||
// Transform frontend format to backend format BEFORE validation
|
||||
// Map 'approvers' -> 'approvalLevels' for backward compatibility
|
||||
if (!parsed.approvalLevels && parsed.approvers) {
|
||||
const approvers = parsed.approvers || [];
|
||||
parsed.approvalLevels = approvers.map((a: any, index: number) => ({
|
||||
levelNumber: index + 1,
|
||||
email: a.email || a.approverEmail,
|
||||
tatHours: a.tatType === 'days' ? (a.tat || 0) * 24 : (a.tat || a.tatHours || 24),
|
||||
isFinalApprover: index === approvers.length - 1,
|
||||
}));
|
||||
}
|
||||
|
||||
let validated;
|
||||
try {
|
||||
validated = validateCreateWorkflow(parsed);
|
||||
@ -67,11 +159,81 @@ export class WorkflowController {
|
||||
const errorMessage = validationError?.errors
|
||||
? validationError.errors.map((e: any) => `${e.path.join('.')}: ${e.message}`).join('; ')
|
||||
: (validationError instanceof Error ? validationError.message : 'Validation failed');
|
||||
logger.error(`[WorkflowController] Validation failed:`, errorMessage);
|
||||
ResponseHandler.error(res, 'Validation failed', 400, errorMessage);
|
||||
return;
|
||||
}
|
||||
|
||||
const workflowData = { ...validated, priority: validated.priority as Priority } as any;
|
||||
// Validate initiator exists
|
||||
await validateInitiator(userId);
|
||||
|
||||
// Use the approval levels from validation (already transformed above)
|
||||
let approvalLevels = validated.approvalLevels || [];
|
||||
|
||||
// Normalize approval levels: map approverEmail -> email for backward compatibility
|
||||
const normalizedApprovalLevels = approvalLevels.map((level: any) => ({
|
||||
...level,
|
||||
email: level.email || level.approverEmail, // Support both formats
|
||||
}));
|
||||
|
||||
// Enrich approval levels with user data (auto-lookup from AD if not in DB)
|
||||
logger.info(`[WorkflowController] Enriching ${normalizedApprovalLevels.length} approval levels`);
|
||||
const enrichedApprovalLevels = await enrichApprovalLevels(normalizedApprovalLevels as any);
|
||||
|
||||
// Enrich spectators if provided
|
||||
// Normalize spectators: map userEmail -> email for backward compatibility
|
||||
// Filter participants to only include SPECTATOR type (exclude INITIATOR and APPROVER)
|
||||
const allParticipants = validated.spectators || validated.participants || [];
|
||||
const spectators = allParticipants.filter((p: any) =>
|
||||
!p.participantType || p.participantType === 'SPECTATOR'
|
||||
);
|
||||
const normalizedSpectators = spectators.map((spec: any) => ({
|
||||
...spec,
|
||||
email: spec.email || spec.userEmail, // Support both formats
|
||||
})).filter((spec: any) => spec.email); // Only include entries with email
|
||||
const enrichedSpectators = normalizedSpectators.length > 0
|
||||
? await enrichSpectators(normalizedSpectators as any)
|
||||
: [];
|
||||
|
||||
// Build complete participants array automatically
|
||||
// This includes: INITIATOR + all APPROVERs + all SPECTATORs
|
||||
const initiator = await User.findByPk(userId);
|
||||
const initiatorEmail = (initiator as any).email;
|
||||
const initiatorName = (initiator as any).displayName || (initiator as any).email;
|
||||
|
||||
const autoGeneratedParticipants = [
|
||||
// Add initiator
|
||||
{
|
||||
userId: userId,
|
||||
userEmail: initiatorEmail,
|
||||
userName: initiatorName,
|
||||
participantType: 'INITIATOR' as const,
|
||||
canComment: true,
|
||||
canViewDocuments: true,
|
||||
canDownloadDocuments: true,
|
||||
notificationEnabled: true,
|
||||
},
|
||||
// Add all approvers from approval levels
|
||||
...enrichedApprovalLevels.map((level: any) => ({
|
||||
userId: level.approverId,
|
||||
userEmail: level.approverEmail,
|
||||
userName: level.approverName,
|
||||
participantType: 'APPROVER' as const,
|
||||
canComment: true,
|
||||
canViewDocuments: true,
|
||||
canDownloadDocuments: true,
|
||||
notificationEnabled: true,
|
||||
})),
|
||||
// Add all spectators
|
||||
...enrichedSpectators,
|
||||
];
|
||||
|
||||
const workflowData = {
|
||||
...validated,
|
||||
priority: validated.priority as Priority,
|
||||
approvalLevels: enrichedApprovalLevels,
|
||||
participants: autoGeneratedParticipants,
|
||||
} as any;
|
||||
|
||||
const requestMeta = getRequestMetadata(req);
|
||||
const workflow = await workflowService.createWorkflow(userId, workflowData, {
|
||||
|
||||
291
src/middlewares/metrics.middleware.ts
Normal file
291
src/middlewares/metrics.middleware.ts
Normal file
@ -0,0 +1,291 @@
|
||||
/**
|
||||
* Prometheus Metrics Middleware
|
||||
* Exposes application metrics for monitoring with Prometheus/Grafana
|
||||
*
|
||||
* Metrics exposed:
|
||||
* - http_requests_total: Total number of HTTP requests
|
||||
* - http_request_duration_seconds: HTTP request latency histogram
|
||||
* - http_request_errors_total: Total number of HTTP errors
|
||||
* - nodejs_*: Node.js runtime metrics (memory, event loop, etc.)
|
||||
* - Custom business metrics (TAT breaches, workflow counts, etc.)
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction, Router } from 'express';
|
||||
import client from 'prom-client';
|
||||
|
||||
// ============================================================================
|
||||
// REGISTRY SETUP
|
||||
// ============================================================================
|
||||
|
||||
// Create a custom registry to avoid conflicts with default metrics
|
||||
const register = new client.Registry();
|
||||
|
||||
// Add default Node.js metrics (memory, CPU, event loop, GC, etc.)
|
||||
client.collectDefaultMetrics({
|
||||
register,
|
||||
prefix: 'nodejs_',
|
||||
labels: { app: 're-workflow', service: 'backend' },
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// HTTP METRICS
|
||||
// ============================================================================
|
||||
|
||||
// Total HTTP requests counter
|
||||
const httpRequestsTotal = new client.Counter({
|
||||
name: 'http_requests_total',
|
||||
help: 'Total number of HTTP requests',
|
||||
labelNames: ['method', 'route', 'status_code'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// HTTP request duration histogram
|
||||
const httpRequestDuration = new client.Histogram({
|
||||
name: 'http_request_duration_seconds',
|
||||
help: 'HTTP request latency in seconds',
|
||||
labelNames: ['method', 'route', 'status_code'],
|
||||
buckets: [0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// HTTP errors counter
|
||||
const httpRequestErrors = new client.Counter({
|
||||
name: 'http_request_errors_total',
|
||||
help: 'Total number of HTTP errors (4xx and 5xx)',
|
||||
labelNames: ['method', 'route', 'status_code', 'error_type'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// Active HTTP connections gauge
|
||||
const activeConnections = new client.Gauge({
|
||||
name: 'http_active_connections',
|
||||
help: 'Number of active HTTP connections',
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// BUSINESS METRICS
|
||||
// ============================================================================
|
||||
|
||||
// TAT breaches counter
|
||||
export const tatBreachesTotal = new client.Counter({
|
||||
name: 'tat_breaches_total',
|
||||
help: 'Total number of TAT breaches',
|
||||
labelNames: ['department', 'workflow_type', 'breach_level'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// Pending workflows gauge
|
||||
export const pendingWorkflowsCount = new client.Gauge({
|
||||
name: 'pending_workflows_count',
|
||||
help: 'Current number of pending workflows',
|
||||
labelNames: ['department', 'status'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// Workflow operations counter
|
||||
export const workflowOperationsTotal = new client.Counter({
|
||||
name: 'workflow_operations_total',
|
||||
help: 'Total number of workflow operations',
|
||||
labelNames: ['operation', 'status'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// User authentication events counter
|
||||
export const authEventsTotal = new client.Counter({
|
||||
name: 'auth_events_total',
|
||||
help: 'Total number of authentication events',
|
||||
labelNames: ['event_type', 'success'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// Database query duration histogram
|
||||
export const dbQueryDuration = new client.Histogram({
|
||||
name: 'db_query_duration_seconds',
|
||||
help: 'Database query latency in seconds',
|
||||
labelNames: ['operation', 'table'],
|
||||
buckets: [0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// Redis operations counter
|
||||
export const redisOperationsTotal = new client.Counter({
|
||||
name: 'redis_operations_total',
|
||||
help: 'Total number of Redis operations',
|
||||
labelNames: ['operation', 'status'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// AI service calls
|
||||
export const aiServiceCalls = new client.Counter({
|
||||
name: 'ai_service_calls_total',
|
||||
help: 'Total number of AI service calls',
|
||||
labelNames: ['provider', 'operation', 'status'],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
export const aiServiceDuration = new client.Histogram({
|
||||
name: 'ai_service_duration_seconds',
|
||||
help: 'AI service call latency in seconds',
|
||||
labelNames: ['provider', 'operation'],
|
||||
buckets: [0.5, 1, 2, 5, 10, 30, 60],
|
||||
registers: [register],
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// MIDDLEWARE
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Normalize route path for metrics labels
|
||||
* Replaces dynamic segments like UUIDs and IDs with placeholders
|
||||
*/
|
||||
function normalizeRoutePath(path: string): string {
|
||||
return path
|
||||
// Replace UUIDs
|
||||
.replace(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/gi, ':id')
|
||||
// Replace numeric IDs
|
||||
.replace(/\/\d+/g, '/:id')
|
||||
// Replace request IDs (REQ-XXXX-XXX format)
|
||||
.replace(/REQ-\d+-\d+/gi, ':requestId')
|
||||
// Clean up multiple slashes
|
||||
.replace(/\/+/g, '/');
|
||||
}
|
||||
|
||||
/**
|
||||
* HTTP metrics middleware
|
||||
* Tracks request counts, durations, and errors
|
||||
*/
|
||||
export function metricsMiddleware(req: Request, res: Response, next: NextFunction): void {
|
||||
// Skip metrics endpoint itself
|
||||
if (req.path === '/metrics') {
|
||||
return next();
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
activeConnections.inc();
|
||||
|
||||
// Capture response on finish
|
||||
res.on('finish', () => {
|
||||
const duration = (Date.now() - startTime) / 1000; // Convert to seconds
|
||||
const route = normalizeRoutePath(req.route?.path || req.path);
|
||||
const statusCode = res.statusCode.toString();
|
||||
const method = req.method;
|
||||
|
||||
// Record request count
|
||||
httpRequestsTotal.inc({ method, route, status_code: statusCode });
|
||||
|
||||
// Record request duration
|
||||
httpRequestDuration.observe(
|
||||
{ method, route, status_code: statusCode },
|
||||
duration
|
||||
);
|
||||
|
||||
// Record errors (4xx and 5xx)
|
||||
if (res.statusCode >= 400) {
|
||||
const errorType = res.statusCode >= 500 ? 'server_error' : 'client_error';
|
||||
httpRequestErrors.inc({
|
||||
method,
|
||||
route,
|
||||
status_code: statusCode,
|
||||
error_type: errorType,
|
||||
});
|
||||
}
|
||||
|
||||
activeConnections.dec();
|
||||
});
|
||||
|
||||
// Handle connection errors
|
||||
res.on('error', () => {
|
||||
activeConnections.dec();
|
||||
});
|
||||
|
||||
next();
|
||||
}
|
||||
|
||||
/**
|
||||
* Metrics endpoint handler
|
||||
* Returns Prometheus-formatted metrics
|
||||
*/
|
||||
export async function metricsHandler(_req: Request, res: Response): Promise<void> {
|
||||
try {
|
||||
res.set('Content-Type', register.contentType);
|
||||
const metrics = await register.metrics();
|
||||
res.end(metrics);
|
||||
} catch (error) {
|
||||
res.status(500).end('Error collecting metrics');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create metrics router
|
||||
* Sets up the /metrics endpoint
|
||||
*/
|
||||
export function createMetricsRouter(): Router {
|
||||
const router = Router();
|
||||
|
||||
// Metrics endpoint (GET /metrics)
|
||||
router.get('/metrics', metricsHandler);
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// HELPER FUNCTIONS FOR RECORDING METRICS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Record a TAT breach event
|
||||
*/
|
||||
export function recordTATBreach(department: string, workflowType: string, breachLevel: string = 'warning'): void {
|
||||
tatBreachesTotal.inc({ department, workflow_type: workflowType, breach_level: breachLevel });
|
||||
}
|
||||
|
||||
/**
|
||||
* Update pending workflows count
|
||||
*/
|
||||
export function updatePendingWorkflows(department: string, status: string, count: number): void {
|
||||
pendingWorkflowsCount.set({ department, status }, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a workflow operation
|
||||
*/
|
||||
export function recordWorkflowOperation(operation: string, success: boolean): void {
|
||||
workflowOperationsTotal.inc({ operation, status: success ? 'success' : 'failure' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Record an authentication event
|
||||
*/
|
||||
export function recordAuthEvent(eventType: string, success: boolean): void {
|
||||
authEventsTotal.inc({ event_type: eventType, success: success ? 'true' : 'false' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a database query duration
|
||||
*/
|
||||
export function recordDBQuery(operation: string, table: string, durationMs: number): void {
|
||||
dbQueryDuration.observe({ operation, table }, durationMs / 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a Redis operation
|
||||
*/
|
||||
export function recordRedisOperation(operation: string, success: boolean): void {
|
||||
redisOperationsTotal.inc({ operation, status: success ? 'success' : 'failure' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Record an AI service call
|
||||
*/
|
||||
export function recordAIServiceCall(provider: string, operation: string, success: boolean, durationMs?: number): void {
|
||||
aiServiceCalls.inc({ provider, operation, status: success ? 'success' : 'failure' });
|
||||
if (durationMs !== undefined) {
|
||||
aiServiceDuration.observe({ provider, operation }, durationMs / 1000);
|
||||
}
|
||||
}
|
||||
|
||||
// Export the registry for advanced use cases
|
||||
export { register };
|
||||
|
||||
@ -38,6 +38,13 @@ router.get(
|
||||
asyncHandler(summaryController.getSummaryByRequestId.bind(summaryController))
|
||||
);
|
||||
|
||||
// Regenerate summary for a request (MUST come before /:summaryId)
|
||||
// Access: Initiator or Admin/Management users
|
||||
router.post(
|
||||
'/request/:requestId/regenerate',
|
||||
asyncHandler(summaryController.regenerateSummary.bind(summaryController))
|
||||
);
|
||||
|
||||
// Share summary with users (MUST come before /:summaryId)
|
||||
router.post(
|
||||
'/:summaryId/share',
|
||||
|
||||
@ -460,18 +460,18 @@ router.post('/:id/approvers/at-level',
|
||||
);
|
||||
|
||||
// Pause workflow routes
|
||||
// POST /workflows/:id/pause - Pause a workflow (approver only)
|
||||
// POST /workflows/:id/pause - Pause a workflow (approver or initiator)
|
||||
router.post('/:id/pause',
|
||||
authenticateToken,
|
||||
requireParticipantTypes(['APPROVER']), // Only approvers can pause
|
||||
requireParticipantTypes(['APPROVER', 'INITIATOR']), // Both approvers and initiators can pause
|
||||
validateParams(workflowParamsSchema),
|
||||
asyncHandler(pauseController.pauseWorkflow.bind(pauseController))
|
||||
);
|
||||
|
||||
// POST /workflows/:id/resume - Resume a paused workflow (approver who paused or initiator)
|
||||
// POST /workflows/:id/resume - Resume a paused workflow (approver or initiator)
|
||||
router.post('/:id/resume',
|
||||
authenticateToken,
|
||||
requireParticipantTypes(['APPROVER', 'INITIATOR']),
|
||||
requireParticipantTypes(['APPROVER', 'INITIATOR']), // Both approvers and initiators can resume
|
||||
validateParams(workflowParamsSchema),
|
||||
asyncHandler(pauseController.resumeWorkflow.bind(pauseController))
|
||||
);
|
||||
|
||||
@ -5,7 +5,7 @@ export const SYSTEM_EVENT_REQUEST_ID = '00000000-0000-0000-0000-000000000001';
|
||||
|
||||
export type ActivityEntry = {
|
||||
requestId: string;
|
||||
type: 'created' | 'submitted' | 'assignment' | 'approval' | 'rejection' | 'status_change' | 'comment' | 'reminder' | 'document_added' | 'sla_warning' | 'ai_conclusion_generated' | 'closed' | 'login' | 'paused' | 'resumed' | 'pause_retriggered';
|
||||
type: 'created' | 'submitted' | 'assignment' | 'approval' | 'rejection' | 'status_change' | 'comment' | 'reminder' | 'document_added' | 'sla_warning' | 'ai_conclusion_generated' | 'summary_generated' | 'closed' | 'login' | 'paused' | 'resumed' | 'pause_retriggered';
|
||||
user?: { userId: string; name?: string; email?: string };
|
||||
timestamp: string;
|
||||
action: string;
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import logger from '@utils/logger';
|
||||
import logger, { logAIEvent } from '@utils/logger';
|
||||
import { getAIProviderConfig } from './configReader.service';
|
||||
|
||||
// Provider-specific interfaces
|
||||
@ -45,7 +45,7 @@ class ClaudeProvider implements AIProvider {
|
||||
async generateText(prompt: string): Promise<string> {
|
||||
if (!this.client) throw new Error('Claude client not initialized');
|
||||
|
||||
logger.info(`[AI Service] Generating with Claude model: ${this.model}`);
|
||||
logAIEvent('request', { provider: 'claude', model: this.model });
|
||||
|
||||
const response = await this.client.messages.create({
|
||||
model: this.model,
|
||||
@ -103,7 +103,7 @@ class OpenAIProvider implements AIProvider {
|
||||
async generateText(prompt: string): Promise<string> {
|
||||
if (!this.client) throw new Error('OpenAI client not initialized');
|
||||
|
||||
logger.info(`[AI Service] Generating with OpenAI model: ${this.model}`);
|
||||
logAIEvent('request', { provider: 'openai', model: this.model });
|
||||
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
@ -160,7 +160,7 @@ class GeminiProvider implements AIProvider {
|
||||
async generateText(prompt: string): Promise<string> {
|
||||
if (!this.client) throw new Error('Gemini client not initialized');
|
||||
|
||||
logger.info(`[AI Service] Generating with Gemini model: ${this.model}`);
|
||||
logAIEvent('request', { provider: 'gemini', model: this.model });
|
||||
|
||||
const model = this.client.getGenerativeModel({ model: this.model });
|
||||
const result = await model.generateContent(prompt);
|
||||
@ -548,13 +548,33 @@ ${isRejected
|
||||
- Be concise and direct - every word must add value
|
||||
- No time-specific words like "today", "now", "currently", "recently"
|
||||
- No corporate jargon or buzzwords
|
||||
- No emojis or excessive formatting
|
||||
- No emojis
|
||||
- Write like a professional documenting a completed process
|
||||
- Focus on facts: what was requested, who ${isRejected ? 'rejected' : 'approved'}, what was decided
|
||||
- Use past tense for completed actions
|
||||
- Use short sentences and avoid filler words
|
||||
|
||||
Write the conclusion now. STRICT LIMIT: ${maxLength} characters maximum. Prioritize and condense if needed:`;
|
||||
**FORMAT REQUIREMENT - HTML Rich Text:**
|
||||
- Generate content in HTML format for rich text editor display
|
||||
- Use proper HTML tags for structure and formatting:
|
||||
* <p>...</p> for paragraphs
|
||||
* <strong>...</strong> for important text/headings
|
||||
* <ul><li>...</li></ul> for bullet points
|
||||
* <ol><li>...</li></ol> for numbered lists
|
||||
* <br> for line breaks only when necessary
|
||||
- Use semantic HTML to make the content readable and well-structured
|
||||
- Example format:
|
||||
<p><strong>Request Summary:</strong> [Brief description]</p>
|
||||
<p><strong>Approval Decision:</strong> [Decision details]</p>
|
||||
<ul>
|
||||
<li>Key point 1</li>
|
||||
<li>Key point 2</li>
|
||||
</ul>
|
||||
<p><strong>Outcome:</strong> [Final outcome]</p>
|
||||
- Keep HTML clean and minimal - no inline styles, no divs, no classes
|
||||
- The HTML should render nicely in a rich text editor
|
||||
|
||||
Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters maximum (including HTML tags). Prioritize and condense if needed:`;
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
@ -6,11 +6,12 @@ import { ApprovalAction } from '../types/approval.types';
|
||||
import { ApprovalStatus, WorkflowStatus } from '../types/common.types';
|
||||
import { calculateTATPercentage } from '@utils/helpers';
|
||||
import { calculateElapsedWorkingHours } from '@utils/tatTimeUtils';
|
||||
import logger from '@utils/logger';
|
||||
import logger, { logWorkflowEvent, logAIEvent } from '@utils/logger';
|
||||
import { Op } from 'sequelize';
|
||||
import { notificationService } from './notification.service';
|
||||
import { activityService } from './activity.service';
|
||||
import { tatSchedulerService } from './tatScheduler.service';
|
||||
import { emitToRequestRoom } from '../realtime/socket';
|
||||
|
||||
export class ApprovalService {
|
||||
async approveLevel(levelId: string, action: ApprovalAction, _userId: string, requestMetadata?: { ipAddress?: string | null; userAgent?: string | null }): Promise<ApprovalLevel | null> {
|
||||
@ -121,7 +122,11 @@ export class ApprovalService {
|
||||
},
|
||||
{ where: { requestId: level.requestId } }
|
||||
);
|
||||
logger.info(`Final approver approved. Workflow ${level.requestId} closed as APPROVED`);
|
||||
logWorkflowEvent('approved', level.requestId, {
|
||||
level: level.levelNumber,
|
||||
isFinalApproval: true,
|
||||
status: 'APPROVED',
|
||||
});
|
||||
|
||||
// Log final approval activity first (so it's included in AI context)
|
||||
activityService.log({
|
||||
@ -152,7 +157,10 @@ export class ApprovalService {
|
||||
const remarkGenerationEnabled = (await getConfigValue('AI_REMARK_GENERATION_ENABLED', 'true'))?.toLowerCase() === 'true';
|
||||
|
||||
if (aiEnabled && remarkGenerationEnabled && aiService.isAvailable()) {
|
||||
logger.info(`[Approval] 🔄 Starting background AI conclusion generation for ${level.requestId}...`);
|
||||
logAIEvent('request', {
|
||||
requestId: level.requestId,
|
||||
action: 'conclusion_generation_started',
|
||||
});
|
||||
|
||||
// Gather context for AI generation
|
||||
const approvalLevels = await ApprovalLevel.findAll({
|
||||
@ -243,7 +251,10 @@ export class ApprovalService {
|
||||
finalizedAt: null
|
||||
} as any);
|
||||
|
||||
logger.info(`[Approval] ✅ Background AI conclusion completed for ${level.requestId}`);
|
||||
logAIEvent('response', {
|
||||
requestId: level.requestId,
|
||||
action: 'conclusion_generation_completed',
|
||||
});
|
||||
|
||||
// Log activity
|
||||
activityService.log({
|
||||
@ -266,9 +277,50 @@ export class ApprovalService {
|
||||
logger.warn(`[Approval] AI service unavailable for ${level.requestId}, skipping conclusion generation`);
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-generate RequestSummary after final approval (system-level generation)
|
||||
// This makes the summary immediately available when user views the approved request
|
||||
try {
|
||||
const { summaryService } = await import('./summary.service');
|
||||
const summary = await summaryService.createSummary(level.requestId, 'system', {
|
||||
isSystemGeneration: true
|
||||
});
|
||||
logger.info(`[Approval] ✅ Auto-generated summary ${(summary as any).summaryId} for approved request ${level.requestId}`);
|
||||
|
||||
// Log summary generation activity
|
||||
activityService.log({
|
||||
requestId: level.requestId,
|
||||
type: 'summary_generated',
|
||||
user: { userId: 'system', name: 'System' },
|
||||
timestamp: new Date().toISOString(),
|
||||
action: 'Summary Auto-Generated',
|
||||
details: 'Request summary auto-generated after final approval',
|
||||
ipAddress: undefined,
|
||||
userAgent: undefined
|
||||
});
|
||||
} catch (summaryError: any) {
|
||||
// Log but don't fail - initiator can regenerate later
|
||||
logger.error(`[Approval] Failed to auto-generate summary for ${level.requestId}:`, summaryError.message);
|
||||
}
|
||||
|
||||
} catch (aiError) {
|
||||
logger.error(`[Approval] Background AI generation failed for ${level.requestId}:`, aiError);
|
||||
logAIEvent('error', {
|
||||
requestId: level.requestId,
|
||||
action: 'conclusion_generation_failed',
|
||||
error: aiError,
|
||||
});
|
||||
// Silent failure - initiator can write manually
|
||||
|
||||
// Still try to generate summary even if AI conclusion failed
|
||||
try {
|
||||
const { summaryService } = await import('./summary.service');
|
||||
const summary = await summaryService.createSummary(level.requestId, 'system', {
|
||||
isSystemGeneration: true
|
||||
});
|
||||
logger.info(`[Approval] ✅ Auto-generated summary ${(summary as any).summaryId} for approved request ${level.requestId} (without AI conclusion)`);
|
||||
} catch (summaryError: any) {
|
||||
logger.error(`[Approval] Failed to auto-generate summary for ${level.requestId}:`, summaryError.message);
|
||||
}
|
||||
}
|
||||
})().catch(err => {
|
||||
// Catch any unhandled promise rejections
|
||||
@ -448,7 +500,11 @@ export class ApprovalService {
|
||||
}
|
||||
);
|
||||
|
||||
logger.info(`Level ${level.levelNumber} rejected. Workflow ${level.requestId} marked as REJECTED. Awaiting closure from initiator.`);
|
||||
logWorkflowEvent('rejected', level.requestId, {
|
||||
level: level.levelNumber,
|
||||
status: 'REJECTED',
|
||||
message: 'Awaiting closure from initiator',
|
||||
});
|
||||
|
||||
// Log rejection activity first (so it's included in AI context)
|
||||
if (wf) {
|
||||
@ -621,6 +677,16 @@ export class ApprovalService {
|
||||
}
|
||||
|
||||
logger.info(`Approval level ${levelId} ${action.action.toLowerCase()}ed`);
|
||||
|
||||
// Emit real-time update to all users viewing this request
|
||||
emitToRequestRoom(level.requestId, 'request:updated', {
|
||||
requestId: level.requestId,
|
||||
requestNumber: (wf as any)?.requestNumber,
|
||||
action: action.action,
|
||||
levelNumber: level.levelNumber,
|
||||
timestamp: now.toISOString()
|
||||
});
|
||||
|
||||
return updatedLevel;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to ${action.action.toLowerCase()} level ${levelId}:`, error);
|
||||
|
||||
@ -3,7 +3,7 @@ import { SSOUserData, ssoConfig } from '../config/sso';
|
||||
import jwt, { SignOptions } from 'jsonwebtoken';
|
||||
import type { StringValue } from 'ms';
|
||||
import { LoginResponse } from '../types/auth.types';
|
||||
import logger from '../utils/logger';
|
||||
import logger, { logAuthEvent } from '../utils/logger';
|
||||
import axios from 'axios';
|
||||
|
||||
export class AuthService {
|
||||
@ -71,9 +71,9 @@ export class AuthService {
|
||||
// Reload to get updated data
|
||||
user = await user.reload();
|
||||
|
||||
logger.info(`User updated via SSO`, {
|
||||
logAuthEvent('sso_callback', user.userId, {
|
||||
email: userData.email,
|
||||
oktaSub: userData.oktaSub,
|
||||
action: 'user_updated',
|
||||
updatedFields: Object.keys(userUpdateData),
|
||||
});
|
||||
} else {
|
||||
@ -93,10 +93,9 @@ export class AuthService {
|
||||
lastLogin: new Date()
|
||||
});
|
||||
|
||||
logger.info(`New user created via SSO`, {
|
||||
logAuthEvent('sso_callback', user.userId, {
|
||||
email: userData.email,
|
||||
oktaSub: userData.oktaSub,
|
||||
employeeId: userData.employeeId || 'not provided',
|
||||
action: 'user_created',
|
||||
displayName,
|
||||
hasDepartment: !!userData.department,
|
||||
hasDesignation: !!userData.designation,
|
||||
@ -123,9 +122,9 @@ export class AuthService {
|
||||
refreshToken
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`SSO callback failed`, {
|
||||
logAuthEvent('auth_failure', undefined, {
|
||||
email: userData.email,
|
||||
oktaSub: userData.oktaSub,
|
||||
action: 'sso_callback_failed',
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
@ -204,7 +203,10 @@ export class AuthService {
|
||||
|
||||
return this.generateAccessToken(user);
|
||||
} catch (error) {
|
||||
logger.error('Token refresh failed:', error);
|
||||
logAuthEvent('auth_failure', undefined, {
|
||||
action: 'token_refresh_failed',
|
||||
error,
|
||||
});
|
||||
throw new Error('Token refresh failed');
|
||||
}
|
||||
}
|
||||
@ -447,14 +449,13 @@ export class AuthService {
|
||||
oktaIdToken: id_token, // Include id_token for proper Okta logout
|
||||
};
|
||||
} catch (error: any) {
|
||||
logger.error('Token exchange with Okta failed:', {
|
||||
message: error.message,
|
||||
response: error.response?.data,
|
||||
logAuthEvent('auth_failure', undefined, {
|
||||
action: 'okta_token_exchange_failed',
|
||||
errorMessage: error.message,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
headers: error.response?.headers,
|
||||
code: error.code,
|
||||
stack: error.stack,
|
||||
oktaError: error.response?.data?.error,
|
||||
oktaErrorDescription: error.response?.data?.error_description,
|
||||
});
|
||||
|
||||
// Provide a more user-friendly error message
|
||||
|
||||
@ -12,6 +12,30 @@ let configCache: Map<string, string> = new Map();
|
||||
let cacheExpiry: Date | null = null;
|
||||
const CACHE_DURATION_MS = 5 * 60 * 1000; // 5 minutes
|
||||
|
||||
// Sensitive config keys that should be masked in logs
|
||||
const SENSITIVE_CONFIG_PATTERNS = [
|
||||
'API_KEY', 'SECRET', 'PASSWORD', 'TOKEN', 'CREDENTIAL',
|
||||
'PRIVATE', 'AUTH', 'KEY', 'VAPID'
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if a config key contains sensitive data
|
||||
*/
|
||||
function isSensitiveConfig(configKey: string): boolean {
|
||||
const upperKey = configKey.toUpperCase();
|
||||
return SENSITIVE_CONFIG_PATTERNS.some(pattern => upperKey.includes(pattern));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mask sensitive value for logging (show first 4 and last 2 chars)
|
||||
*/
|
||||
function maskSensitiveValue(value: string): string {
|
||||
if (!value || value.length <= 8) {
|
||||
return '***REDACTED***';
|
||||
}
|
||||
return `${value.substring(0, 4)}****${value.substring(value.length - 2)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a configuration value from database (with caching)
|
||||
*/
|
||||
@ -40,12 +64,16 @@ export async function getConfigValue(configKey: string, defaultValue: string = '
|
||||
// Always update cache expiry when loading from database
|
||||
cacheExpiry = new Date(Date.now() + CACHE_DURATION_MS);
|
||||
|
||||
logger.info(`[ConfigReader] Loaded config '${configKey}' = '${value}' from database (cached for 5min)`);
|
||||
// Mask sensitive values in logs for security
|
||||
const logValue = isSensitiveConfig(configKey) ? maskSensitiveValue(value) : value;
|
||||
logger.info(`[ConfigReader] Loaded config '${configKey}' = '${logValue}' from database (cached for 5min)`);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
logger.warn(`[ConfigReader] Config key '${configKey}' not found, using default: ${defaultValue}`);
|
||||
// Mask sensitive default values in logs for security
|
||||
const logDefault = isSensitiveConfig(configKey) ? maskSensitiveValue(defaultValue) : defaultValue;
|
||||
logger.warn(`[ConfigReader] Config key '${configKey}' not found, using default: ${logDefault}`);
|
||||
return defaultValue;
|
||||
} catch (error) {
|
||||
logger.error(`[ConfigReader] Error reading config '${configKey}':`, error);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import webpush from 'web-push';
|
||||
import logger from '@utils/logger';
|
||||
import logger, { logNotificationEvent } from '@utils/logger';
|
||||
import { Subscription } from '@models/Subscription';
|
||||
import { Notification } from '@models/Notification';
|
||||
|
||||
@ -151,14 +151,25 @@ class NotificationService {
|
||||
try {
|
||||
await webpush.sendNotification(sub, message);
|
||||
await notification.update({ pushSent: true });
|
||||
logger.info(`[Notification] Push sent to user ${userId}`);
|
||||
logNotificationEvent('sent', {
|
||||
userId,
|
||||
channel: 'push',
|
||||
type: payload.type,
|
||||
requestId: payload.requestId,
|
||||
});
|
||||
} catch (err: any) {
|
||||
// Check if subscription is expired/invalid
|
||||
if (this.isExpiredSubscriptionError(err)) {
|
||||
logger.warn(`[Notification] Expired subscription detected for user ${userId}, removing...`);
|
||||
await this.removeExpiredSubscription(userId, sub.endpoint);
|
||||
} else {
|
||||
logger.error(`[Notification] Failed to send push to user ${userId}:`, err);
|
||||
logNotificationEvent('failed', {
|
||||
userId,
|
||||
channel: 'push',
|
||||
type: payload.type,
|
||||
requestId: payload.requestId,
|
||||
error: err,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import { calculateElapsedWorkingHours } from '@utils/tatTimeUtils';
|
||||
import { notificationService } from './notification.service';
|
||||
import { activityService } from './activity.service';
|
||||
import dayjs from 'dayjs';
|
||||
import { emitToRequestRoom } from '../realtime/socket';
|
||||
|
||||
export class PauseService {
|
||||
/**
|
||||
@ -70,9 +71,12 @@ export class PauseService {
|
||||
throw new Error('No active approval level found to pause');
|
||||
}
|
||||
|
||||
// Verify user is the approver for this level
|
||||
if ((level as any).approverId !== userId) {
|
||||
throw new Error('Only the assigned approver can pause this workflow');
|
||||
// Verify user is either the approver for this level OR the initiator
|
||||
const isApprover = (level as any).approverId === userId;
|
||||
const isInitiator = (workflow as any).initiatorId === userId;
|
||||
|
||||
if (!isApprover && !isInitiator) {
|
||||
throw new Error('Only the assigned approver or the initiator can pause this workflow');
|
||||
}
|
||||
|
||||
// Check if level is already paused
|
||||
@ -167,7 +171,9 @@ export class PauseService {
|
||||
const requestNumber = (workflow as any).requestNumber;
|
||||
const title = (workflow as any).title;
|
||||
|
||||
// Notify initiator
|
||||
// Notify initiator only if someone else (approver) paused the request
|
||||
// Skip notification if initiator paused their own request
|
||||
if (!isInitiator) {
|
||||
await notificationService.sendToUsers([(workflow as any).initiatorId], {
|
||||
title: 'Workflow Paused',
|
||||
body: `Your request "${title}" has been paused by ${userName}. Reason: ${reason}. Will resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||
@ -178,8 +184,9 @@ export class PauseService {
|
||||
priority: 'HIGH',
|
||||
actionRequired: false
|
||||
});
|
||||
}
|
||||
|
||||
// Notify approver (self)
|
||||
// Notify the user who paused (confirmation)
|
||||
await notificationService.sendToUsers([userId], {
|
||||
title: 'Workflow Paused Successfully',
|
||||
body: `You have paused request "${title}". It will automatically resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||
@ -191,6 +198,22 @@ export class PauseService {
|
||||
actionRequired: false
|
||||
});
|
||||
|
||||
// If initiator paused, notify the current approver
|
||||
if (isInitiator && (level as any).approverId) {
|
||||
const approver = await User.findByPk((level as any).approverId);
|
||||
const approverUserId = (level as any).approverId;
|
||||
await notificationService.sendToUsers([approverUserId], {
|
||||
title: 'Workflow Paused by Initiator',
|
||||
body: `Request "${title}" has been paused by the initiator (${userName}). Reason: ${reason}. Will resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||
requestId,
|
||||
requestNumber,
|
||||
url: `/request/${requestNumber}`,
|
||||
type: 'workflow_paused',
|
||||
priority: 'HIGH',
|
||||
actionRequired: false
|
||||
});
|
||||
}
|
||||
|
||||
// Log activity
|
||||
await activityService.log({
|
||||
requestId,
|
||||
@ -208,6 +231,15 @@ export class PauseService {
|
||||
|
||||
logger.info(`[Pause] Workflow ${requestId} paused at level ${(level as any).levelNumber} by ${userId}`);
|
||||
|
||||
// Emit real-time update to all users viewing this request
|
||||
emitToRequestRoom(requestId, 'request:updated', {
|
||||
requestId,
|
||||
requestNumber: (workflow as any).requestNumber,
|
||||
action: 'PAUSE',
|
||||
levelNumber: (level as any).levelNumber,
|
||||
timestamp: now.toISOString()
|
||||
});
|
||||
|
||||
return { workflow, level };
|
||||
} catch (error: any) {
|
||||
logger.error(`[Pause] Failed to pause workflow:`, error);
|
||||
@ -219,8 +251,9 @@ export class PauseService {
|
||||
* Resume a paused workflow
|
||||
* @param requestId - The workflow request ID
|
||||
* @param userId - The user ID who is resuming (optional, for manual resume)
|
||||
* @param notes - Optional notes for the resume action
|
||||
*/
|
||||
async resumeWorkflow(requestId: string, userId?: string): Promise<{ workflow: WorkflowRequest; level: ApprovalLevel | null }> {
|
||||
async resumeWorkflow(requestId: string, userId?: string, notes?: string): Promise<{ workflow: WorkflowRequest; level: ApprovalLevel | null }> {
|
||||
try {
|
||||
const now = new Date();
|
||||
|
||||
@ -249,14 +282,13 @@ export class PauseService {
|
||||
}
|
||||
|
||||
// Verify user has permission (if manual resume)
|
||||
// Note: Initiators cannot resume directly - they must use retrigger to request approver to resume
|
||||
// Exception: When skipping approver (requirement 3.7), initiator can cancel pause
|
||||
// Both initiator and current approver can resume the workflow
|
||||
if (userId) {
|
||||
const pausedBy = (workflow as any).pausedBy;
|
||||
if (pausedBy !== userId) {
|
||||
// Only the approver who paused can resume directly
|
||||
// Initiators should use retrigger to request resume (requirement 3.5)
|
||||
throw new Error('Only the approver who paused this workflow can resume it. Initiators should use the retrigger option to request the approver to resume.');
|
||||
const isApprover = (level as any).approverId === userId;
|
||||
const isInitiator = (workflow as any).initiatorId === userId;
|
||||
|
||||
if (!isApprover && !isInitiator) {
|
||||
throw new Error('Only the assigned approver or the initiator can resume this workflow');
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,9 +377,15 @@ export class PauseService {
|
||||
|
||||
const requestNumber = (workflow as any).requestNumber;
|
||||
const title = (workflow as any).title;
|
||||
const initiatorId = (workflow as any).initiatorId;
|
||||
const approverId = (level as any).approverId;
|
||||
const isResumedByInitiator = userId === initiatorId;
|
||||
const isResumedByApprover = userId === approverId;
|
||||
|
||||
// Notify initiator
|
||||
await notificationService.sendToUsers([(workflow as any).initiatorId], {
|
||||
// Notify initiator only if someone else resumed (or auto-resume)
|
||||
// Skip if initiator resumed their own request
|
||||
if (!isResumedByInitiator) {
|
||||
await notificationService.sendToUsers([initiatorId], {
|
||||
title: 'Workflow Resumed',
|
||||
body: `Your request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}.`,
|
||||
requestId,
|
||||
@ -357,9 +395,12 @@ export class PauseService {
|
||||
priority: 'HIGH',
|
||||
actionRequired: false
|
||||
});
|
||||
}
|
||||
|
||||
// Notify approver
|
||||
await notificationService.sendToUsers([(level as any).approverId], {
|
||||
// Notify approver only if someone else resumed (or auto-resume)
|
||||
// Skip if approver resumed the request themselves
|
||||
if (!isResumedByApprover && approverId) {
|
||||
await notificationService.sendToUsers([approverId], {
|
||||
title: 'Workflow Resumed',
|
||||
body: `Request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}. Please continue with your review.`,
|
||||
requestId,
|
||||
@ -369,24 +410,53 @@ export class PauseService {
|
||||
priority: 'HIGH',
|
||||
actionRequired: true
|
||||
});
|
||||
}
|
||||
|
||||
// Send confirmation to the user who resumed (if manual resume)
|
||||
if (userId) {
|
||||
await notificationService.sendToUsers([userId], {
|
||||
title: 'Workflow Resumed Successfully',
|
||||
body: `You have resumed request "${title}". ${isResumedByApprover ? 'Please continue with your review.' : ''}`,
|
||||
requestId,
|
||||
requestNumber,
|
||||
url: `/request/${requestNumber}`,
|
||||
type: 'workflow_resumed',
|
||||
priority: 'MEDIUM',
|
||||
actionRequired: isResumedByApprover
|
||||
});
|
||||
}
|
||||
|
||||
// Log activity with notes
|
||||
const resumeDetails = notes
|
||||
? `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}. Notes: ${notes}`
|
||||
: `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}.`;
|
||||
|
||||
// Log activity
|
||||
await activityService.log({
|
||||
requestId,
|
||||
type: 'resumed',
|
||||
user: userId ? { userId, name: resumeUserName } : undefined,
|
||||
timestamp: now.toISOString(),
|
||||
action: 'Workflow Resumed',
|
||||
details: `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}.`,
|
||||
details: resumeDetails,
|
||||
metadata: {
|
||||
levelId: (level as any).levelId,
|
||||
levelNumber: (level as any).levelNumber,
|
||||
wasAutoResume: !userId
|
||||
wasAutoResume: !userId,
|
||||
notes: notes || null
|
||||
}
|
||||
});
|
||||
|
||||
logger.info(`[Pause] Workflow ${requestId} resumed ${userId ? `by ${userId}` : 'automatically'}`);
|
||||
|
||||
// Emit real-time update to all users viewing this request
|
||||
emitToRequestRoom(requestId, 'request:updated', {
|
||||
requestId,
|
||||
requestNumber: (workflow as any).requestNumber,
|
||||
action: 'RESUME',
|
||||
levelNumber: (level as any).levelNumber,
|
||||
timestamp: now.toISOString()
|
||||
});
|
||||
|
||||
return { workflow, level };
|
||||
} catch (error: any) {
|
||||
logger.error(`[Pause] Failed to resume workflow:`, error);
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { RequestSummary, SharedSummary, WorkflowRequest, ApprovalLevel, User, ConclusionRemark } from '@models/index';
|
||||
import { RequestSummary, SharedSummary, WorkflowRequest, ApprovalLevel, User, ConclusionRemark, Participant } from '@models/index';
|
||||
import '@models/index'; // Ensure associations are loaded
|
||||
import { Op } from 'sequelize';
|
||||
import logger from '@utils/logger';
|
||||
@ -8,9 +8,27 @@ export class SummaryService {
|
||||
/**
|
||||
* Create a summary for a closed request
|
||||
* Pulls data from workflow_requests, approval_levels, and conclusion_remarks
|
||||
*
|
||||
* Access Control:
|
||||
* - 'system': Allows system-level auto-generation on final approval
|
||||
* - initiator: The request initiator can create/regenerate
|
||||
* - admin/management: Admin or management role users can create/regenerate via API
|
||||
*
|
||||
* @param requestId - The workflow request ID
|
||||
* @param userId - The user ID requesting the summary (or 'system' for auto-generation)
|
||||
* @param options - Optional parameters
|
||||
* @param options.isSystemGeneration - Set to true for system-level auto-generation
|
||||
* @param options.userRole - The role of the user (for admin access check)
|
||||
* @param options.regenerate - Set to true to regenerate (delete existing and create new)
|
||||
*/
|
||||
async createSummary(requestId: string, initiatorId: string): Promise<RequestSummary> {
|
||||
async createSummary(
|
||||
requestId: string,
|
||||
userId: string,
|
||||
options?: { isSystemGeneration?: boolean; userRole?: string; regenerate?: boolean }
|
||||
): Promise<RequestSummary> {
|
||||
try {
|
||||
const { isSystemGeneration = false, userRole, regenerate = false } = options || {};
|
||||
|
||||
// Check if request exists and is closed
|
||||
const workflow = await WorkflowRequest.findByPk(requestId, {
|
||||
include: [
|
||||
@ -22,29 +40,36 @@ export class SummaryService {
|
||||
throw new Error('Workflow request not found');
|
||||
}
|
||||
|
||||
// Verify request is closed
|
||||
// Verify request is closed (APPROVED, REJECTED, or CLOSED)
|
||||
const status = (workflow as any).status?.toUpperCase();
|
||||
if (status !== 'APPROVED' && status !== 'REJECTED' && status !== 'CLOSED') {
|
||||
throw new Error('Request must be closed (APPROVED, REJECTED, or CLOSED) before creating summary');
|
||||
}
|
||||
|
||||
// Verify initiator owns the request
|
||||
if ((workflow as any).initiatorId !== initiatorId) {
|
||||
throw new Error('Only the initiator can create a summary for this request');
|
||||
const initiatorId = (workflow as any).initiatorId;
|
||||
const isInitiator = initiatorId === userId;
|
||||
const isAdmin = userRole && ['admin', 'super_admin', 'management'].includes(userRole.toLowerCase());
|
||||
|
||||
// Access control: Allow system generation, initiator, or admin users
|
||||
if (!isSystemGeneration && !isInitiator && !isAdmin) {
|
||||
throw new Error('Only the initiator or admin users can create a summary for this request');
|
||||
}
|
||||
|
||||
// Check if summary already exists - return it if it does (idempotent behavior)
|
||||
// Check if summary already exists
|
||||
const existingSummary = await RequestSummary.findOne({
|
||||
where: { requestId }
|
||||
});
|
||||
|
||||
if (existingSummary) {
|
||||
// Verify the existing summary belongs to the current initiator
|
||||
if ((existingSummary as any).initiatorId !== initiatorId) {
|
||||
throw new Error('Only the initiator can create a summary for this request');
|
||||
// If regenerate is requested by initiator or admin, delete existing and create new
|
||||
if (regenerate && (isInitiator || isAdmin)) {
|
||||
logger.info(`[Summary] Regenerating summary for request ${requestId}`);
|
||||
await existingSummary.destroy();
|
||||
} else {
|
||||
// Return existing summary (idempotent behavior)
|
||||
logger.info(`Summary already exists for request ${requestId}, returning existing summary`);
|
||||
return existingSummary as RequestSummary;
|
||||
}
|
||||
logger.info(`Summary already exists for request ${requestId}, returning existing summary`);
|
||||
return existingSummary as RequestSummary;
|
||||
}
|
||||
|
||||
// Get conclusion remarks
|
||||
@ -81,10 +106,10 @@ export class SummaryService {
|
||||
isAiGenerated = false;
|
||||
}
|
||||
|
||||
// Create summary
|
||||
// Create summary - always use the actual initiator from the workflow
|
||||
const summary = await RequestSummary.create({
|
||||
requestId,
|
||||
initiatorId,
|
||||
initiatorId: initiatorId, // Use workflow's initiator, not the requesting user
|
||||
title: (workflow as any).title || '',
|
||||
description: (workflow as any).description || null,
|
||||
closingRemarks,
|
||||
@ -92,7 +117,8 @@ export class SummaryService {
|
||||
conclusionId
|
||||
});
|
||||
|
||||
logger.info(`[Summary] Created summary ${(summary as any).summaryId} for request ${requestId}`);
|
||||
const generationType = isSystemGeneration ? 'system' : (isAdmin ? 'admin' : 'initiator');
|
||||
logger.info(`[Summary] Created summary ${(summary as any).summaryId} for request ${requestId} (generated by: ${generationType})`);
|
||||
return summary;
|
||||
} catch (error) {
|
||||
logger.error(`[Summary] Failed to create summary for request ${requestId}:`, error);
|
||||
@ -216,15 +242,50 @@ export class SummaryService {
|
||||
const initiator = (request as any).initiator || {};
|
||||
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
||||
|
||||
// Get conclusion remark if available
|
||||
let conclusionRemark = (summary as any).ConclusionRemark || (summary as any).conclusionRemark;
|
||||
|
||||
// If not loaded and we have conclusionId, fetch by conclusionId
|
||||
if (!conclusionRemark && (summary as any).conclusionId) {
|
||||
conclusionRemark = await ConclusionRemark.findByPk((summary as any).conclusionId);
|
||||
}
|
||||
|
||||
// If still not found, fetch by requestId (summary may have been created before conclusion)
|
||||
if (!conclusionRemark) {
|
||||
conclusionRemark = await ConclusionRemark.findOne({
|
||||
where: { requestId: (request as any).requestId }
|
||||
});
|
||||
}
|
||||
|
||||
// Determine effective final remark:
|
||||
// - If user edited: use finalRemark
|
||||
// - If user closed without editing: use aiGeneratedRemark (becomes final)
|
||||
// - Otherwise: use closingRemarks from summary snapshot
|
||||
const effectiveFinalRemark = conclusionRemark?.finalRemark ||
|
||||
conclusionRemark?.aiGeneratedRemark ||
|
||||
(summary as any).closingRemarks ||
|
||||
'—';
|
||||
|
||||
logger.info(`[Summary] SharedSummary ${sharedSummaryId}: Effective final remark length: ${effectiveFinalRemark?.length || 0} chars (isEdited: ${conclusionRemark?.isEdited}, hasAI: ${!!conclusionRemark?.aiGeneratedRemark}, hasFinal: ${!!conclusionRemark?.finalRemark})`);
|
||||
|
||||
return {
|
||||
summaryId: (summary as any).summaryId,
|
||||
requestId: (request as any).requestId,
|
||||
requestNumber: (request as any).requestNumber || 'N/A',
|
||||
title: (summary as any).title || (request as any).title || '',
|
||||
description: (summary as any).description || (request as any).description || '',
|
||||
closingRemarks: (summary as any).closingRemarks || '—',
|
||||
closingRemarks: effectiveFinalRemark, // ✅ Effective final remark (edited or AI)
|
||||
isAiGenerated: (summary as any).isAiGenerated || false,
|
||||
createdAt: (summary as any).createdAt,
|
||||
// Include conclusion remark data for detailed view
|
||||
conclusionRemark: conclusionRemark ? {
|
||||
aiGeneratedRemark: conclusionRemark.aiGeneratedRemark,
|
||||
finalRemark: conclusionRemark.finalRemark,
|
||||
effectiveFinalRemark: effectiveFinalRemark, // ✅ Computed field for convenience
|
||||
isEdited: conclusionRemark.isEdited,
|
||||
generatedAt: conclusionRemark.generatedAt,
|
||||
finalizedAt: conclusionRemark.finalizedAt
|
||||
} : null,
|
||||
initiator: {
|
||||
name: initiator.displayName || 'Unknown',
|
||||
designation: initiator.designation || 'N/A',
|
||||
@ -239,7 +300,8 @@ export class SummaryService {
|
||||
priority: (request as any).priority || 'STANDARD',
|
||||
status: (request as any).status || 'CLOSED',
|
||||
submissionDate: (request as any).submissionDate,
|
||||
closureDate: (request as any).closureDate
|
||||
closureDate: (request as any).closureDate,
|
||||
conclusionRemark: effectiveFinalRemark // ✅ Use effective final remark
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
@ -262,8 +324,20 @@ export class SummaryService {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check access: user must be initiator or have been shared with
|
||||
// Check access: initiator, participants, management, or explicitly shared users
|
||||
const isInitiator = (summary as any).initiatorId === userId;
|
||||
|
||||
// Check if user is a participant (approver or spectator)
|
||||
const isParticipant = await Participant.findOne({
|
||||
where: { requestId, userId }
|
||||
});
|
||||
|
||||
// Check if user has management/admin role
|
||||
const currentUser = await User.findByPk(userId);
|
||||
const userRole = (currentUser as any)?.role?.toUpperCase();
|
||||
const isManagement = userRole && ['ADMIN', 'SUPER_ADMIN', 'MANAGEMENT'].includes(userRole);
|
||||
|
||||
// Check if explicitly shared
|
||||
const isShared = await SharedSummary.findOne({
|
||||
where: {
|
||||
summaryId: (summary as any).summaryId,
|
||||
@ -271,7 +345,7 @@ export class SummaryService {
|
||||
}
|
||||
});
|
||||
|
||||
if (!isInitiator && !isShared) {
|
||||
if (!isInitiator && !isParticipant && !isManagement && !isShared) {
|
||||
return null; // No access, return null instead of throwing error
|
||||
}
|
||||
|
||||
@ -321,8 +395,23 @@ export class SummaryService {
|
||||
throw new Error('Associated workflow request not found');
|
||||
}
|
||||
|
||||
// Check access: user must be initiator or have been shared with
|
||||
// Check access: initiator, participants, management, or explicitly shared users
|
||||
const isInitiator = (summary as any).initiatorId === userId;
|
||||
|
||||
// Check if user is a participant (approver or spectator) in the request
|
||||
const isParticipant = await Participant.findOne({
|
||||
where: {
|
||||
requestId: (request as any).requestId,
|
||||
userId
|
||||
}
|
||||
});
|
||||
|
||||
// Check if user has management/admin role
|
||||
const currentUser = await User.findByPk(userId);
|
||||
const userRole = (currentUser as any)?.role?.toUpperCase();
|
||||
const isManagement = userRole && ['ADMIN', 'SUPER_ADMIN', 'MANAGEMENT'].includes(userRole);
|
||||
|
||||
// Check if explicitly shared
|
||||
const isShared = await SharedSummary.findOne({
|
||||
where: {
|
||||
summaryId,
|
||||
@ -330,7 +419,7 @@ export class SummaryService {
|
||||
}
|
||||
});
|
||||
|
||||
if (!isInitiator && !isShared) {
|
||||
if (!isInitiator && !isParticipant && !isManagement && !isShared) {
|
||||
throw new Error('Access denied: You do not have permission to view this summary');
|
||||
}
|
||||
|
||||
@ -389,15 +478,50 @@ export class SummaryService {
|
||||
const initiator = (request as any).initiator || {};
|
||||
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
||||
|
||||
// Get conclusion remark if available
|
||||
let conclusionRemark = (summary as any).ConclusionRemark || (summary as any).conclusionRemark;
|
||||
|
||||
// If not loaded and we have conclusionId, fetch by conclusionId
|
||||
if (!conclusionRemark && (summary as any).conclusionId) {
|
||||
conclusionRemark = await ConclusionRemark.findByPk((summary as any).conclusionId);
|
||||
}
|
||||
|
||||
// If still not found, fetch by requestId (summary may have been created before conclusion)
|
||||
if (!conclusionRemark) {
|
||||
conclusionRemark = await ConclusionRemark.findOne({
|
||||
where: { requestId: (request as any).requestId }
|
||||
});
|
||||
}
|
||||
|
||||
// Determine effective final remark:
|
||||
// - If user edited: use finalRemark
|
||||
// - If user closed without editing: use aiGeneratedRemark (becomes final)
|
||||
// - Otherwise: use closingRemarks from summary snapshot
|
||||
const effectiveFinalRemark = conclusionRemark?.finalRemark ||
|
||||
conclusionRemark?.aiGeneratedRemark ||
|
||||
(summary as any).closingRemarks ||
|
||||
'—';
|
||||
|
||||
logger.info(`[Summary] Summary ${summaryId}: Effective final remark length: ${effectiveFinalRemark?.length || 0} chars (isEdited: ${conclusionRemark?.isEdited}, hasAI: ${!!conclusionRemark?.aiGeneratedRemark}, hasFinal: ${!!conclusionRemark?.finalRemark})`);
|
||||
|
||||
return {
|
||||
summaryId: (summary as any).summaryId,
|
||||
requestId: (request as any).requestId,
|
||||
requestNumber: (request as any).requestNumber || 'N/A',
|
||||
title: (summary as any).title || (request as any).title || '',
|
||||
description: (summary as any).description || (request as any).description || '',
|
||||
closingRemarks: (summary as any).closingRemarks || '—',
|
||||
closingRemarks: effectiveFinalRemark, // ✅ Effective final remark (edited or AI)
|
||||
isAiGenerated: (summary as any).isAiGenerated || false,
|
||||
createdAt: (summary as any).createdAt,
|
||||
// Include conclusion remark data for detailed view
|
||||
conclusionRemark: conclusionRemark ? {
|
||||
aiGeneratedRemark: conclusionRemark.aiGeneratedRemark,
|
||||
finalRemark: conclusionRemark.finalRemark,
|
||||
effectiveFinalRemark: effectiveFinalRemark, // ✅ Computed field: finalRemark || aiGeneratedRemark
|
||||
isEdited: conclusionRemark.isEdited,
|
||||
generatedAt: conclusionRemark.generatedAt,
|
||||
finalizedAt: conclusionRemark.finalizedAt
|
||||
} : null,
|
||||
initiator: {
|
||||
name: initiator.displayName || 'Unknown',
|
||||
designation: initiator.designation || 'N/A',
|
||||
@ -412,7 +536,8 @@ export class SummaryService {
|
||||
priority: (request as any).priority || 'STANDARD',
|
||||
status: (request as any).status || 'CLOSED',
|
||||
submissionDate: (request as any).submissionDate,
|
||||
closureDate: (request as any).closureDate
|
||||
closureDate: (request as any).closureDate,
|
||||
conclusionRemark: effectiveFinalRemark // ✅ Use effective final remark
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
@ -2,7 +2,7 @@ import { tatQueue } from '../queues/tatQueue';
|
||||
import { calculateDelay, addWorkingHours, addWorkingHoursExpress } from '@utils/tatTimeUtils';
|
||||
import { getTatThresholds } from './configReader.service';
|
||||
import dayjs from 'dayjs';
|
||||
import logger from '@utils/logger';
|
||||
import logger, { logTATEvent } from '@utils/logger';
|
||||
import { Priority } from '../types/common.types';
|
||||
|
||||
export class TatSchedulerService {
|
||||
@ -140,7 +140,12 @@ export class TatSchedulerService {
|
||||
jobIndex++;
|
||||
}
|
||||
|
||||
logger.info(`[TAT Scheduler] ✅ TAT jobs scheduled for request ${requestId}`);
|
||||
logTATEvent('warning', requestId, {
|
||||
level: parseInt(levelId.split('-').pop() || '1'),
|
||||
tatHours: tatDurationHours,
|
||||
priority,
|
||||
message: 'TAT jobs scheduled',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`[TAT Scheduler] Failed to schedule TAT jobs:`, error);
|
||||
throw error;
|
||||
|
||||
@ -220,61 +220,134 @@ export class UserService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch user from Okta by email
|
||||
*/
|
||||
async fetchUserFromOktaByEmail(email: string): Promise<OktaUser | null> {
|
||||
try {
|
||||
const oktaDomain = process.env.OKTA_DOMAIN;
|
||||
const oktaApiToken = process.env.OKTA_API_TOKEN;
|
||||
|
||||
if (!oktaDomain || !oktaApiToken) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Search Okta users by email (exact match)
|
||||
const response = await axios.get(`${oktaDomain}/api/v1/users`, {
|
||||
params: { search: `profile.email eq "${email}"`, limit: 1 },
|
||||
headers: {
|
||||
'Authorization': `SSWS ${oktaApiToken}`,
|
||||
'Accept': 'application/json'
|
||||
},
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
const users: OktaUser[] = response.data || [];
|
||||
return users.length > 0 ? users[0] : null;
|
||||
} catch (error: any) {
|
||||
console.error(`Failed to fetch user from Okta by email ${email}:`, error.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure user exists in database (create if not exists)
|
||||
* Used when tagging users from Okta search results
|
||||
* Used when tagging users from Okta search results or when only email is provided
|
||||
*
|
||||
* @param oktaUserData - Can be just { email } or full user data
|
||||
*/
|
||||
async ensureUserExists(oktaUserData: {
|
||||
userId: string;
|
||||
userId?: string;
|
||||
email: string;
|
||||
displayName?: string;
|
||||
firstName?: string;
|
||||
lastName?: string;
|
||||
department?: string;
|
||||
phone?: string;
|
||||
designation?: string;
|
||||
jobTitle?: string;
|
||||
manager?: string;
|
||||
employeeId?: string;
|
||||
employeeNumber?: string;
|
||||
secondEmail?: string;
|
||||
mobilePhone?: string;
|
||||
location?: string;
|
||||
}): Promise<UserModel> {
|
||||
const email = oktaUserData.email.toLowerCase();
|
||||
|
||||
// Check if user already exists
|
||||
// Check if user already exists in database
|
||||
let user = await UserModel.findOne({
|
||||
where: {
|
||||
[Op.or]: [
|
||||
{ email },
|
||||
{ oktaSub: oktaUserData.userId }
|
||||
...(oktaUserData.userId ? [{ oktaSub: oktaUserData.userId }] : [])
|
||||
]
|
||||
}
|
||||
});
|
||||
|
||||
if (user) {
|
||||
// Update existing user with latest info from Okta
|
||||
await user.update({
|
||||
oktaSub: oktaUserData.userId,
|
||||
// Update existing user with latest info from Okta (if provided)
|
||||
const updateData: any = {
|
||||
email,
|
||||
firstName: oktaUserData.firstName || user.firstName,
|
||||
lastName: oktaUserData.lastName || user.lastName,
|
||||
displayName: oktaUserData.displayName || user.displayName,
|
||||
department: oktaUserData.department || user.department,
|
||||
phone: oktaUserData.phone || user.phone,
|
||||
isActive: true,
|
||||
updatedAt: new Date()
|
||||
});
|
||||
};
|
||||
|
||||
if (oktaUserData.userId) updateData.oktaSub = oktaUserData.userId;
|
||||
if (oktaUserData.firstName) updateData.firstName = oktaUserData.firstName;
|
||||
if (oktaUserData.lastName) updateData.lastName = oktaUserData.lastName;
|
||||
if (oktaUserData.displayName) updateData.displayName = oktaUserData.displayName;
|
||||
if (oktaUserData.department) updateData.department = oktaUserData.department;
|
||||
if (oktaUserData.phone) updateData.phone = oktaUserData.phone;
|
||||
if (oktaUserData.designation) updateData.designation = oktaUserData.designation;
|
||||
if (oktaUserData.employeeId) updateData.employeeId = oktaUserData.employeeId;
|
||||
|
||||
await user.update(updateData);
|
||||
return user;
|
||||
}
|
||||
|
||||
// Create new user
|
||||
// User not found in DB - try to fetch from Okta
|
||||
if (!oktaUserData.userId) {
|
||||
const oktaUser = await this.fetchUserFromOktaByEmail(email);
|
||||
if (oktaUser) {
|
||||
// Found in Okta - create with Okta data
|
||||
user = await UserModel.create({
|
||||
oktaSub: oktaUser.id,
|
||||
email,
|
||||
employeeId: null,
|
||||
firstName: oktaUser.profile.firstName || null,
|
||||
lastName: oktaUser.profile.lastName || null,
|
||||
displayName: oktaUser.profile.displayName || `${oktaUser.profile.firstName || ''} ${oktaUser.profile.lastName || ''}`.trim() || email.split('@')[0],
|
||||
department: oktaUser.profile.department || null,
|
||||
designation: null,
|
||||
phone: oktaUser.profile.mobilePhone || null,
|
||||
isActive: oktaUser.status === 'ACTIVE',
|
||||
role: 'USER',
|
||||
lastLogin: undefined,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date()
|
||||
});
|
||||
return user;
|
||||
} else {
|
||||
// Not found in Okta either
|
||||
throw new Error(`User with email '${email}' not found in organization directory`);
|
||||
}
|
||||
}
|
||||
|
||||
// Create new user with provided data
|
||||
user = await UserModel.create({
|
||||
oktaSub: oktaUserData.userId,
|
||||
email,
|
||||
employeeId: null, // Will be updated on first login
|
||||
employeeId: oktaUserData.employeeId || null,
|
||||
firstName: oktaUserData.firstName || null,
|
||||
lastName: oktaUserData.lastName || null,
|
||||
displayName: oktaUserData.displayName || email.split('@')[0],
|
||||
department: oktaUserData.department || null,
|
||||
designation: null,
|
||||
phone: oktaUserData.phone || null,
|
||||
designation: oktaUserData.designation || oktaUserData.jobTitle || null,
|
||||
phone: oktaUserData.phone || oktaUserData.mobilePhone || null,
|
||||
isActive: true,
|
||||
role: 'USER',
|
||||
lastLogin: undefined, // Not logged in yet, just created for tagging
|
||||
lastLogin: undefined,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date()
|
||||
});
|
||||
|
||||
207
src/services/userEnrichment.service.ts
Normal file
207
src/services/userEnrichment.service.ts
Normal file
@ -0,0 +1,207 @@
|
||||
/**
|
||||
* User Enrichment Service
|
||||
*
|
||||
* Handles automatic user lookup/creation and data enrichment for workflow creation
|
||||
*/
|
||||
|
||||
import { User } from '@models/User';
|
||||
import logger from '@utils/logger';
|
||||
import { UserService } from './user.service';
|
||||
|
||||
const userService = new UserService();
|
||||
|
||||
interface SimplifiedApprovalLevel {
|
||||
email: string;
|
||||
tatHours: number;
|
||||
isFinalApprover?: boolean;
|
||||
levelNumber?: number;
|
||||
levelName?: string;
|
||||
approverId?: string;
|
||||
approverEmail?: string;
|
||||
approverName?: string;
|
||||
}
|
||||
|
||||
interface EnrichedApprovalLevel {
|
||||
levelNumber: number;
|
||||
levelName: string;
|
||||
approverId: string;
|
||||
approverEmail: string;
|
||||
approverName: string;
|
||||
tatHours: number;
|
||||
isFinalApprover: boolean;
|
||||
}
|
||||
|
||||
interface SimplifiedSpectator {
|
||||
email: string;
|
||||
userId?: string;
|
||||
userEmail?: string;
|
||||
userName?: string;
|
||||
}
|
||||
|
||||
interface EnrichedSpectator {
|
||||
userId: string;
|
||||
userEmail: string;
|
||||
userName: string;
|
||||
participantType: 'SPECTATOR';
|
||||
canComment: boolean;
|
||||
canViewDocuments: boolean;
|
||||
canDownloadDocuments: boolean;
|
||||
notificationEnabled: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enrich approval levels with user data from database/AD
|
||||
* @param approvalLevels - Simplified approval levels (only email + tatHours required)
|
||||
* @returns Enriched approval levels with full user data
|
||||
*/
|
||||
export async function enrichApprovalLevels(
|
||||
approvalLevels: SimplifiedApprovalLevel[]
|
||||
): Promise<EnrichedApprovalLevel[]> {
|
||||
const enriched: EnrichedApprovalLevel[] = [];
|
||||
const processedEmails = new Set<string>();
|
||||
|
||||
for (let i = 0; i < approvalLevels.length; i++) {
|
||||
const level = approvalLevels[i];
|
||||
const email = level.email.toLowerCase();
|
||||
|
||||
// Check for duplicate emails
|
||||
if (processedEmails.has(email)) {
|
||||
throw new Error(`Duplicate approver email found: ${email}. Each approver must have a unique email.`);
|
||||
}
|
||||
processedEmails.add(email);
|
||||
|
||||
try {
|
||||
// Find or create user from AD
|
||||
let user = await User.findOne({ where: { email } });
|
||||
|
||||
if (!user) {
|
||||
logger.info(`[UserEnrichment] User not found in DB, attempting to sync from AD: ${email}`);
|
||||
// Try to fetch and create user from AD
|
||||
try {
|
||||
user = await userService.ensureUserExists({ email }) as any;
|
||||
} catch (adError: any) {
|
||||
logger.error(`[UserEnrichment] Failed to sync user from AD: ${email}`, adError);
|
||||
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||
}
|
||||
}
|
||||
|
||||
const userId = (user as any).userId;
|
||||
const displayName = (user as any).displayName || (user as any).email;
|
||||
const designation = (user as any).designation || (user as any).jobTitle;
|
||||
const department = (user as any).department;
|
||||
|
||||
// Auto-generate level name
|
||||
let levelName = level.levelName;
|
||||
if (!levelName) {
|
||||
if (designation) {
|
||||
levelName = `${designation} Approval`;
|
||||
} else if (department) {
|
||||
levelName = `${department} Approval`;
|
||||
} else {
|
||||
levelName = `Level ${i + 1} Approval`;
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-detect final approver (last level)
|
||||
const isFinalApprover = level.isFinalApprover !== undefined
|
||||
? level.isFinalApprover
|
||||
: (i === approvalLevels.length - 1);
|
||||
|
||||
enriched.push({
|
||||
levelNumber: level.levelNumber || (i + 1),
|
||||
levelName,
|
||||
approverId: userId,
|
||||
approverEmail: email,
|
||||
approverName: displayName,
|
||||
tatHours: level.tatHours,
|
||||
isFinalApprover,
|
||||
});
|
||||
|
||||
logger.info(`[UserEnrichment] Enriched approval level ${i + 1}: ${email} -> ${displayName} (${levelName})`);
|
||||
} catch (error: any) {
|
||||
logger.error(`[UserEnrichment] Failed to enrich approval level for ${email}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
return enriched;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enrich spectators with user data from database/AD
|
||||
* @param spectators - Simplified spectators (only email required)
|
||||
* @returns Enriched spectators with full user data
|
||||
*/
|
||||
export async function enrichSpectators(
|
||||
spectators: SimplifiedSpectator[]
|
||||
): Promise<EnrichedSpectator[]> {
|
||||
if (!spectators || spectators.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const enriched: EnrichedSpectator[] = [];
|
||||
const processedEmails = new Set<string>();
|
||||
|
||||
for (const spectator of spectators) {
|
||||
const email = spectator.email.toLowerCase();
|
||||
|
||||
// Check for duplicate emails
|
||||
if (processedEmails.has(email)) {
|
||||
throw new Error(`Duplicate spectator email found: ${email}. Each spectator must have a unique email.`);
|
||||
}
|
||||
processedEmails.add(email);
|
||||
|
||||
try {
|
||||
// Find or create user from AD
|
||||
let user = await User.findOne({ where: { email } });
|
||||
|
||||
if (!user) {
|
||||
logger.info(`[UserEnrichment] User not found in DB, attempting to sync from AD: ${email}`);
|
||||
try {
|
||||
user = await userService.ensureUserExists({ email }) as any;
|
||||
} catch (adError: any) {
|
||||
logger.error(`[UserEnrichment] Failed to sync user from AD: ${email}`, adError);
|
||||
throw new Error(`Spectator email '${email}' not found in organization directory. Please verify the email address.`);
|
||||
}
|
||||
}
|
||||
|
||||
const userId = (user as any).userId;
|
||||
const displayName = (user as any).displayName || (user as any).email;
|
||||
|
||||
enriched.push({
|
||||
userId,
|
||||
userEmail: email,
|
||||
userName: displayName,
|
||||
participantType: 'SPECTATOR',
|
||||
canComment: true,
|
||||
canViewDocuments: true,
|
||||
canDownloadDocuments: false,
|
||||
notificationEnabled: true,
|
||||
});
|
||||
|
||||
logger.info(`[UserEnrichment] Enriched spectator: ${email} -> ${displayName}`);
|
||||
} catch (error: any) {
|
||||
logger.error(`[UserEnrichment] Failed to enrich spectator ${email}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
return enriched;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate and ensure initiator exists in database
|
||||
* @param initiatorId - User ID of the initiator
|
||||
* @returns User object if valid
|
||||
* @throws Error if initiator not found or invalid
|
||||
*/
|
||||
export async function validateInitiator(initiatorId: string): Promise<any> {
|
||||
const user = await User.findByPk(initiatorId);
|
||||
|
||||
if (!user) {
|
||||
throw new Error(`Invalid initiator: User with ID '${initiatorId}' not found. Please ensure you are logged in with a valid account.`);
|
||||
}
|
||||
|
||||
return user;
|
||||
}
|
||||
|
||||
@ -8,7 +8,7 @@ import { Document } from '@models/Document';
|
||||
import '@models/index';
|
||||
import { CreateWorkflowRequest, UpdateWorkflowRequest } from '../types/workflow.types';
|
||||
import { generateRequestNumber, calculateTATDays } from '@utils/helpers';
|
||||
import logger from '@utils/logger';
|
||||
import logger, { logWorkflowEvent, logWithContext } from '@utils/logger';
|
||||
import { WorkflowStatus, ParticipantType, ApprovalStatus } from '../types/common.types';
|
||||
import { Op, QueryTypes } from 'sequelize';
|
||||
import { sequelize } from '@config/database';
|
||||
@ -18,6 +18,7 @@ import dayjs from 'dayjs';
|
||||
import { notificationService } from './notification.service';
|
||||
import { activityService } from './activity.service';
|
||||
import { tatSchedulerService } from './tatScheduler.service';
|
||||
import { emitToRequestRoom } from '../realtime/socket';
|
||||
|
||||
export class WorkflowService {
|
||||
/**
|
||||
@ -40,13 +41,24 @@ export class WorkflowService {
|
||||
|
||||
/**
|
||||
* Add a new approver to an existing workflow
|
||||
* Auto-creates user from Okta/AD if not in database
|
||||
*/
|
||||
async addApprover(requestId: string, email: string, addedBy: string): Promise<any> {
|
||||
try {
|
||||
// Find user by email
|
||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
||||
const emailLower = email.toLowerCase();
|
||||
|
||||
// Find or create user from AD
|
||||
let user = await User.findOne({ where: { email: emailLower } });
|
||||
if (!user) {
|
||||
throw new Error('User not found with this email');
|
||||
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||
const { UserService } = await import('./user.service');
|
||||
const userService = new UserService();
|
||||
try {
|
||||
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||
} catch (adError: any) {
|
||||
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||
}
|
||||
}
|
||||
|
||||
const userId = (user as any).userId;
|
||||
@ -143,52 +155,9 @@ export class WorkflowService {
|
||||
throw new Error('Cannot skip future approval levels');
|
||||
}
|
||||
|
||||
// Cancel pause if workflow is paused (requirement 3.7)
|
||||
// When initiator skips a paused approver, the pause is negated and workflow resumes automatically
|
||||
// Block skip if workflow is paused - must resume first
|
||||
if ((workflow as any).isPaused || (workflow as any).status === 'PAUSED') {
|
||||
try {
|
||||
// Get the paused level (should be the level being skipped)
|
||||
const pausedLevel = await ApprovalLevel.findOne({
|
||||
where: {
|
||||
requestId,
|
||||
isPaused: true
|
||||
}
|
||||
});
|
||||
|
||||
// Cancel pause on the workflow (the level will be marked as skipped below)
|
||||
const previousStatus = (workflow as any).pauseTatSnapshot?.previousStatus || WorkflowStatus.PENDING;
|
||||
await workflow.update({
|
||||
isPaused: false,
|
||||
pausedAt: null as any,
|
||||
pausedBy: null as any,
|
||||
pauseReason: null as any,
|
||||
pauseResumeDate: null as any,
|
||||
pauseTatSnapshot: null as any,
|
||||
status: previousStatus // Restore previous status (should be PENDING)
|
||||
});
|
||||
|
||||
// If the paused level is the one being skipped, clear its pause fields
|
||||
// (it will be marked as SKIPPED below, so no need to restore to PENDING)
|
||||
if (pausedLevel && (pausedLevel as any).levelId === levelId) {
|
||||
await pausedLevel.update({
|
||||
isPaused: false,
|
||||
pausedAt: null as any,
|
||||
pausedBy: null as any,
|
||||
pauseReason: null as any,
|
||||
pauseResumeDate: null as any,
|
||||
pauseTatStartTime: null as any,
|
||||
pauseElapsedHours: null as any
|
||||
});
|
||||
}
|
||||
|
||||
logger.info(`[Workflow] Pause cancelled and workflow resumed when approver was skipped for request ${requestId}`);
|
||||
|
||||
// Reload workflow to get updated state after resume
|
||||
await workflow.reload();
|
||||
} catch (pauseError) {
|
||||
logger.warn(`[Workflow] Failed to cancel pause when skipping approver:`, pauseError);
|
||||
// Continue with skip even if pause cancellation fails
|
||||
}
|
||||
throw new Error('Cannot skip approver while workflow is paused. Please resume the workflow first before skipping.');
|
||||
}
|
||||
|
||||
// Mark as skipped
|
||||
@ -277,6 +246,17 @@ export class WorkflowService {
|
||||
});
|
||||
|
||||
logger.info(`[Workflow] Skipped approver at level ${levelNumber} for request ${requestId}`);
|
||||
|
||||
// Emit real-time update to all users viewing this request
|
||||
const wfForEmit = await WorkflowRequest.findByPk(requestId);
|
||||
emitToRequestRoom(requestId, 'request:updated', {
|
||||
requestId,
|
||||
requestNumber: (wfForEmit as any)?.requestNumber,
|
||||
action: 'SKIP',
|
||||
levelNumber: levelNumber,
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
|
||||
return level;
|
||||
} catch (error) {
|
||||
logger.error(`[Workflow] Failed to skip approver:`, error);
|
||||
@ -286,6 +266,7 @@ export class WorkflowService {
|
||||
|
||||
/**
|
||||
* Add a new approver at specific level (with level shifting)
|
||||
* Auto-creates user from Okta/AD if not in database
|
||||
*/
|
||||
async addApproverAtLevel(
|
||||
requestId: string,
|
||||
@ -295,14 +276,26 @@ export class WorkflowService {
|
||||
addedBy: string
|
||||
): Promise<any> {
|
||||
try {
|
||||
// Find user by email
|
||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
||||
const emailLower = email.toLowerCase();
|
||||
|
||||
// Find or create user from AD
|
||||
let user = await User.findOne({ where: { email: emailLower } });
|
||||
if (!user) {
|
||||
throw new Error('User not found with this email');
|
||||
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||
const { UserService } = await import('./user.service');
|
||||
const userService = new UserService();
|
||||
try {
|
||||
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||
} catch (adError: any) {
|
||||
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||
}
|
||||
}
|
||||
|
||||
const userId = (user as any).userId;
|
||||
const userName = (user as any).displayName || (user as any).email;
|
||||
const designation = (user as any).designation || (user as any).jobTitle;
|
||||
const department = (user as any).department;
|
||||
|
||||
// Check if user is already a participant
|
||||
const existing = await Participant.findOne({
|
||||
@ -338,27 +331,39 @@ export class WorkflowService {
|
||||
}
|
||||
|
||||
// Shift existing levels at and after target level
|
||||
const levelsToShift = allLevels.filter(l => (l as any).levelNumber >= targetLevel);
|
||||
// IMPORTANT: Shift in REVERSE order to avoid unique constraint violations
|
||||
const levelsToShift = allLevels
|
||||
.filter(l => (l as any).levelNumber >= targetLevel)
|
||||
.sort((a, b) => (b as any).levelNumber - (a as any).levelNumber); // Sort descending
|
||||
|
||||
for (const levelToShift of levelsToShift) {
|
||||
const newLevelNumber = (levelToShift as any).levelNumber + 1;
|
||||
const oldLevelNumber = (levelToShift as any).levelNumber;
|
||||
const newLevelNumber = oldLevelNumber + 1;
|
||||
await levelToShift.update({
|
||||
levelNumber: newLevelNumber,
|
||||
levelName: `Level ${newLevelNumber}`
|
||||
});
|
||||
logger.info(`[Workflow] Shifted level ${(levelToShift as any).levelNumber - 1} → ${newLevelNumber}`);
|
||||
logger.info(`[Workflow] Shifted level ${oldLevelNumber} → ${newLevelNumber}`);
|
||||
}
|
||||
|
||||
// Update total levels in workflow
|
||||
await workflow.update({ totalLevels: allLevels.length + 1 });
|
||||
|
||||
// Auto-generate smart level name
|
||||
let levelName = `Level ${targetLevel}`;
|
||||
if (designation) {
|
||||
levelName = `${designation} Approval`;
|
||||
} else if (department) {
|
||||
levelName = `${department} Approval`;
|
||||
}
|
||||
|
||||
// Create new approval level at target position
|
||||
const newLevel = await ApprovalLevel.create({
|
||||
requestId,
|
||||
levelNumber: targetLevel,
|
||||
levelName: `Level ${targetLevel}`,
|
||||
levelName,
|
||||
approverId: userId,
|
||||
approverEmail: email.toLowerCase(),
|
||||
approverEmail: emailLower,
|
||||
approverName: userName,
|
||||
tatHours,
|
||||
// tatDays is auto-calculated by database as a generated column
|
||||
@ -437,13 +442,24 @@ export class WorkflowService {
|
||||
|
||||
/**
|
||||
* Add a new spectator to an existing workflow
|
||||
* Auto-creates user from Okta/AD if not in database
|
||||
*/
|
||||
async addSpectator(requestId: string, email: string, addedBy: string): Promise<any> {
|
||||
try {
|
||||
// Find user by email
|
||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
||||
const emailLower = email.toLowerCase();
|
||||
|
||||
// Find or create user from AD
|
||||
let user = await User.findOne({ where: { email: emailLower } });
|
||||
if (!user) {
|
||||
throw new Error('User not found with this email');
|
||||
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||
const { UserService } = await import('./user.service');
|
||||
const userService = new UserService();
|
||||
try {
|
||||
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||
} catch (adError: any) {
|
||||
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||
throw new Error(`Spectator email '${email}' not found in organization directory. Please verify the email address.`);
|
||||
}
|
||||
}
|
||||
|
||||
const userId = (user as any).userId;
|
||||
@ -2271,7 +2287,12 @@ export class WorkflowService {
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Workflow created: ${requestNumber}`);
|
||||
logWorkflowEvent('created', workflow.requestId, {
|
||||
requestNumber,
|
||||
priority: workflowData.priority,
|
||||
userId: initiatorId,
|
||||
status: workflow.status,
|
||||
});
|
||||
|
||||
// Get initiator details
|
||||
const initiator = await User.findByPk(initiatorId);
|
||||
@ -2326,7 +2347,11 @@ export class WorkflowService {
|
||||
|
||||
return workflow;
|
||||
} catch (error) {
|
||||
logger.error('Failed to create workflow:', error);
|
||||
logWithContext('error', 'Failed to create workflow', {
|
||||
userId: initiatorId,
|
||||
priority: workflowData.priority,
|
||||
error,
|
||||
});
|
||||
throw new Error('Failed to create workflow');
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,50 +1,442 @@
|
||||
import winston from 'winston';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
|
||||
const logDir = process.env.LOG_FILE_PATH || './logs';
|
||||
const isProduction = process.env.NODE_ENV === 'production';
|
||||
|
||||
// Create logger instance
|
||||
const logger = winston.createLogger({
|
||||
level: process.env.LOG_LEVEL || 'info',
|
||||
format: winston.format.combine(
|
||||
winston.format.timestamp({
|
||||
format: 'YYYY-MM-DD HH:mm:ss',
|
||||
}),
|
||||
winston.format.errors({ stack: true }),
|
||||
winston.format.json()
|
||||
),
|
||||
defaultMeta: { service: 're-workflow-backend' },
|
||||
transports: [
|
||||
// Write all logs with level 'error' and below to error.log
|
||||
new winston.transports.File({
|
||||
filename: path.join(logDir, 'error.log'),
|
||||
level: 'error',
|
||||
maxsize: 5242880, // 5MB
|
||||
maxFiles: 5,
|
||||
}),
|
||||
// Write all logs with level 'info' and below to combined.log
|
||||
new winston.transports.File({
|
||||
filename: path.join(logDir, 'combined.log'),
|
||||
maxsize: 5242880, // 5MB
|
||||
maxFiles: 5,
|
||||
}),
|
||||
],
|
||||
});
|
||||
// ============ SENSITIVE DATA PATTERNS ============
|
||||
const SENSITIVE_KEYS = [
|
||||
'password', 'secret', 'token', 'key', 'apikey', 'api_key', 'api-key',
|
||||
'authorization', 'auth', 'credential', 'private', 'access_token',
|
||||
'refresh_token', 'jwt', 'bearer', 'session', 'cookie', 'csrf',
|
||||
'vapid', 'smtp_password', 'db_password', 'redis_url', 'connection_string'
|
||||
];
|
||||
|
||||
// If we're not in production, log to the console as well
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
logger.add(
|
||||
const SENSITIVE_PATTERN = new RegExp(
|
||||
`(${SENSITIVE_KEYS.join('|')})\\s*[=:]\\s*['"]?([^'\"\\s,}\\]]+)['"]?`,
|
||||
'gi'
|
||||
);
|
||||
|
||||
/**
|
||||
* Mask sensitive values in strings (API keys, passwords, tokens)
|
||||
*/
|
||||
const maskSensitiveData = (value: any): any => {
|
||||
if (typeof value === 'string') {
|
||||
// Mask patterns like "API_KEY = abc123" or "password: secret"
|
||||
let masked = value.replace(SENSITIVE_PATTERN, (match, key, val) => {
|
||||
if (val && val.length > 0) {
|
||||
const maskedVal = val.length > 4
|
||||
? val.substring(0, 2) + '***' + val.substring(val.length - 2)
|
||||
: '***';
|
||||
return `${key}=${maskedVal}`;
|
||||
}
|
||||
return match;
|
||||
});
|
||||
|
||||
// Mask standalone tokens/keys (long alphanumeric strings that look like secrets)
|
||||
// e.g., "sk-abc123xyz789..." or "ghp_xxxx..."
|
||||
masked = masked.replace(
|
||||
/\b(sk-|ghp_|gho_|github_pat_|xox[baprs]-|Bearer\s+)([A-Za-z0-9_-]{20,})/gi,
|
||||
(match, prefix, token) => `${prefix}${'*'.repeat(8)}...`
|
||||
);
|
||||
|
||||
return masked;
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map(maskSensitiveData);
|
||||
}
|
||||
|
||||
if (value && typeof value === 'object') {
|
||||
const masked: any = {};
|
||||
for (const [k, v] of Object.entries(value)) {
|
||||
const keyLower = k.toLowerCase();
|
||||
// Check if key itself is sensitive
|
||||
if (SENSITIVE_KEYS.some(sk => keyLower.includes(sk))) {
|
||||
masked[k] = typeof v === 'string' && v.length > 0 ? '***REDACTED***' : v;
|
||||
} else {
|
||||
masked[k] = maskSensitiveData(v);
|
||||
}
|
||||
}
|
||||
return masked;
|
||||
}
|
||||
|
||||
return value;
|
||||
};
|
||||
|
||||
// ============ COMMON LABELS/METADATA ============
|
||||
const appMeta = {
|
||||
app: 're-workflow',
|
||||
service: 'backend',
|
||||
environment: process.env.NODE_ENV || 'development',
|
||||
version: process.env.APP_VERSION || '1.2.0',
|
||||
};
|
||||
|
||||
// ============ TRANSPORTS ============
|
||||
const transports: winston.transport[] = [
|
||||
// Local file transport - Error logs
|
||||
new winston.transports.File({
|
||||
filename: path.join(logDir, 'error.log'),
|
||||
level: 'error',
|
||||
maxsize: 10 * 1024 * 1024, // 10MB
|
||||
maxFiles: 10,
|
||||
tailable: true,
|
||||
}),
|
||||
// Local file transport - Combined logs
|
||||
new winston.transports.File({
|
||||
filename: path.join(logDir, 'combined.log'),
|
||||
maxsize: 10 * 1024 * 1024, // 10MB
|
||||
maxFiles: 10,
|
||||
tailable: true,
|
||||
}),
|
||||
];
|
||||
|
||||
// ============ LOKI TRANSPORT (Grafana) ============
|
||||
if (process.env.LOKI_HOST) {
|
||||
try {
|
||||
const LokiTransport = require('winston-loki');
|
||||
|
||||
const lokiTransportOptions: any = {
|
||||
host: process.env.LOKI_HOST,
|
||||
labels: appMeta,
|
||||
json: true,
|
||||
format: winston.format.combine(
|
||||
winston.format.timestamp(),
|
||||
winston.format.json()
|
||||
),
|
||||
replaceTimestamp: true,
|
||||
onConnectionError: (err: Error) => {
|
||||
console.error('[Loki] Connection error:', err.message);
|
||||
},
|
||||
batching: true,
|
||||
interval: 5,
|
||||
};
|
||||
|
||||
if (process.env.LOKI_USER && process.env.LOKI_PASSWORD) {
|
||||
lokiTransportOptions.basicAuth = `${process.env.LOKI_USER}:${process.env.LOKI_PASSWORD}`;
|
||||
}
|
||||
|
||||
transports.push(new LokiTransport(lokiTransportOptions));
|
||||
console.log(`[Logger] ✅ Loki transport enabled: ${process.env.LOKI_HOST}`);
|
||||
} catch (error) {
|
||||
console.warn('[Logger] ⚠️ Failed to initialize Loki transport:', (error as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
// ============ CONSOLE TRANSPORT (Development) ============
|
||||
if (!isProduction) {
|
||||
transports.push(
|
||||
new winston.transports.Console({
|
||||
format: winston.format.combine(
|
||||
winston.format.colorize(),
|
||||
winston.format.simple()
|
||||
winston.format.printf(({ level, message, timestamp, ...meta }) => {
|
||||
const metaStr = Object.keys(meta).length && !meta.service
|
||||
? ` ${JSON.stringify(meta)}`
|
||||
: '';
|
||||
return `${timestamp} [${level}]: ${message}${metaStr}`;
|
||||
})
|
||||
),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// ============ ERROR SANITIZER ============
|
||||
/**
|
||||
* Sanitize error objects for logging - prevents huge Axios error dumps
|
||||
*/
|
||||
const sanitizeError = (error: any): object => {
|
||||
// Handle Axios errors specifically
|
||||
if (error?.isAxiosError || error?.name === 'AxiosError') {
|
||||
return {
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method,
|
||||
responseData: error.response?.data,
|
||||
};
|
||||
}
|
||||
|
||||
// Handle standard errors
|
||||
if (error instanceof Error) {
|
||||
return {
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
...(error as any).statusCode && { statusCode: (error as any).statusCode },
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback for unknown error types
|
||||
return {
|
||||
message: String(error),
|
||||
type: typeof error,
|
||||
};
|
||||
};
|
||||
|
||||
// Custom format to sanitize errors and mask sensitive data before logging
|
||||
const sanitizeFormat = winston.format((info) => {
|
||||
// Sanitize error objects
|
||||
if (info.error && typeof info.error === 'object') {
|
||||
info.error = sanitizeError(info.error);
|
||||
}
|
||||
|
||||
// If message is an error object, sanitize it
|
||||
if (info.message && typeof info.message === 'object' && (info.message as any).stack) {
|
||||
info.error = sanitizeError(info.message);
|
||||
info.message = (info.message as Error).message;
|
||||
}
|
||||
|
||||
// Mask sensitive data in message
|
||||
if (typeof info.message === 'string') {
|
||||
info.message = maskSensitiveData(info.message);
|
||||
}
|
||||
|
||||
// Mask sensitive data in all metadata
|
||||
for (const key of Object.keys(info)) {
|
||||
if (key !== 'level' && key !== 'timestamp' && key !== 'service') {
|
||||
info[key] = maskSensitiveData(info[key]);
|
||||
}
|
||||
}
|
||||
|
||||
return info;
|
||||
});
|
||||
|
||||
// ============ CREATE LOGGER ============
|
||||
const logger = winston.createLogger({
|
||||
level: process.env.LOG_LEVEL || (isProduction ? 'info' : 'debug'),
|
||||
format: winston.format.combine(
|
||||
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
winston.format.errors({ stack: true }),
|
||||
sanitizeFormat(),
|
||||
winston.format.json()
|
||||
),
|
||||
defaultMeta: {
|
||||
service: 're-workflow-backend',
|
||||
hostname: os.hostname(),
|
||||
},
|
||||
transports,
|
||||
});
|
||||
|
||||
// ============ HELPER METHODS FOR STRUCTURED LOGGING ============
|
||||
|
||||
/**
|
||||
* Log with additional context labels (will appear in Grafana)
|
||||
*/
|
||||
export const logWithContext = (
|
||||
level: 'info' | 'warn' | 'error' | 'debug',
|
||||
message: string,
|
||||
context: {
|
||||
// Domain labels
|
||||
requestId?: string;
|
||||
userId?: string;
|
||||
priority?: 'STANDARD' | 'EXPRESS';
|
||||
status?: string;
|
||||
department?: string;
|
||||
// API labels
|
||||
endpoint?: string;
|
||||
method?: string;
|
||||
statusCode?: number;
|
||||
duration?: number;
|
||||
// Error context
|
||||
errorType?: string;
|
||||
error?: any;
|
||||
stack?: string;
|
||||
// Custom data
|
||||
[key: string]: any;
|
||||
}
|
||||
) => {
|
||||
// Sanitize error if present
|
||||
const sanitizedContext = { ...context };
|
||||
if (sanitizedContext.error) {
|
||||
sanitizedContext.error = sanitizeError(sanitizedContext.error);
|
||||
}
|
||||
|
||||
logger.log(level, message, sanitizedContext);
|
||||
};
|
||||
|
||||
/**
|
||||
* Log API request (use in middleware or controllers)
|
||||
*/
|
||||
export const logApiRequest = (
|
||||
method: string,
|
||||
endpoint: string,
|
||||
statusCode: number,
|
||||
duration: number,
|
||||
userId?: string,
|
||||
error?: string
|
||||
) => {
|
||||
const level = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
|
||||
logger.log(level, `${method} ${endpoint} ${statusCode} ${duration}ms`, {
|
||||
endpoint,
|
||||
method,
|
||||
statusCode,
|
||||
duration,
|
||||
userId,
|
||||
...(error && { error }),
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log workflow events
|
||||
*/
|
||||
export const logWorkflowEvent = (
|
||||
event: 'created' | 'submitted' | 'approved' | 'rejected' | 'closed' | 'paused' | 'resumed' | 'updated',
|
||||
requestId: string,
|
||||
details: {
|
||||
priority?: string;
|
||||
status?: string;
|
||||
department?: string;
|
||||
userId?: string;
|
||||
userName?: string;
|
||||
message?: string;
|
||||
level?: number;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
logger.info(`Workflow ${event}: ${requestId}`, {
|
||||
workflowEvent: event,
|
||||
requestId,
|
||||
...details,
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log TAT/SLA events
|
||||
*/
|
||||
export const logTATEvent = (
|
||||
event: 'approaching' | 'breached' | 'resolved' | 'warning',
|
||||
requestId: string,
|
||||
details: {
|
||||
priority?: string;
|
||||
threshold?: number;
|
||||
elapsedHours?: number;
|
||||
tatHours?: number;
|
||||
level?: number;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
const level = event === 'breached' ? 'error' : event === 'approaching' || event === 'warning' ? 'warn' : 'info';
|
||||
logger.log(level, `TAT ${event}: ${requestId}`, {
|
||||
tatEvent: event,
|
||||
requestId,
|
||||
...details,
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log authentication events
|
||||
*/
|
||||
export const logAuthEvent = (
|
||||
event: 'login' | 'logout' | 'token_refresh' | 'token_exchange' | 'auth_failure' | 'sso_callback',
|
||||
userId: string | undefined,
|
||||
details: {
|
||||
email?: string;
|
||||
role?: string;
|
||||
ip?: string;
|
||||
userAgent?: string;
|
||||
error?: any;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
const level = event === 'auth_failure' ? 'warn' : 'info';
|
||||
|
||||
// Sanitize error if present
|
||||
const sanitizedDetails = { ...details };
|
||||
if (sanitizedDetails.error) {
|
||||
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||
}
|
||||
|
||||
logger.log(level, `Auth ${event}${userId ? `: ${userId}` : ''}`, {
|
||||
authEvent: event,
|
||||
userId,
|
||||
...sanitizedDetails,
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log document events
|
||||
*/
|
||||
export const logDocumentEvent = (
|
||||
event: 'uploaded' | 'downloaded' | 'deleted' | 'previewed',
|
||||
documentId: string,
|
||||
details: {
|
||||
requestId?: string;
|
||||
userId?: string;
|
||||
fileName?: string;
|
||||
fileType?: string;
|
||||
fileSize?: number;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
logger.info(`Document ${event}: ${documentId}`, {
|
||||
documentEvent: event,
|
||||
documentId,
|
||||
...details,
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log notification events
|
||||
*/
|
||||
export const logNotificationEvent = (
|
||||
event: 'sent' | 'failed' | 'queued',
|
||||
details: {
|
||||
type?: string;
|
||||
userId?: string;
|
||||
requestId?: string;
|
||||
channel?: 'push' | 'email' | 'in-app';
|
||||
error?: any;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
const level = event === 'failed' ? 'error' : 'info';
|
||||
|
||||
// Sanitize error if present
|
||||
const sanitizedDetails = { ...details };
|
||||
if (sanitizedDetails.error) {
|
||||
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||
}
|
||||
|
||||
logger.log(level, `Notification ${event}`, {
|
||||
notificationEvent: event,
|
||||
...sanitizedDetails,
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Log AI service events
|
||||
*/
|
||||
export const logAIEvent = (
|
||||
event: 'request' | 'response' | 'error' | 'fallback',
|
||||
details: {
|
||||
provider?: string;
|
||||
model?: string;
|
||||
requestId?: string;
|
||||
duration?: number;
|
||||
error?: any;
|
||||
[key: string]: any;
|
||||
} = {}
|
||||
) => {
|
||||
const level = event === 'error' ? 'error' : 'info';
|
||||
|
||||
// Sanitize error if present
|
||||
const sanitizedDetails = { ...details };
|
||||
if (sanitizedDetails.error) {
|
||||
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||
}
|
||||
|
||||
logger.log(level, `AI ${event}`, {
|
||||
aiEvent: event,
|
||||
...sanitizedDetails,
|
||||
});
|
||||
};
|
||||
|
||||
// ============ MORGAN STREAM ============
|
||||
// Create a stream object for Morgan HTTP logging
|
||||
// Use type assertion to bypass TypeScript's strict checking for the stream property
|
||||
const loggerWithStream = logger as any;
|
||||
loggerWithStream.stream = {
|
||||
write: (message: string) => {
|
||||
@ -52,4 +444,6 @@ loggerWithStream.stream = {
|
||||
},
|
||||
};
|
||||
|
||||
// Export helper functions and logger
|
||||
export { sanitizeError };
|
||||
export default loggerWithStream as winston.Logger;
|
||||
|
||||
@ -1,29 +1,49 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
// Simplified approval level schema - only requires email and tatHours
|
||||
// Backend will enrich with user details (approverId, approverName, levelName)
|
||||
const simplifiedApprovalLevelSchema = z.object({
|
||||
email: z.string().email('Valid email is required'),
|
||||
tatHours: z.number().positive('TAT hours must be positive'),
|
||||
isFinalApprover: z.boolean().optional(),
|
||||
// Optional fields that backend will auto-populate if not provided
|
||||
levelNumber: z.number().int().min(1).max(10).optional(),
|
||||
levelName: z.string().optional(),
|
||||
approverId: z.string().uuid().optional(),
|
||||
approverEmail: z.string().email().optional(),
|
||||
approverName: z.string().optional(),
|
||||
});
|
||||
|
||||
// Simplified spectator schema - only requires email
|
||||
const simplifiedSpectatorSchema = z.object({
|
||||
email: z.string().email('Valid email is required').optional(),
|
||||
// Optional fields that backend will auto-populate if not provided
|
||||
userId: z.string().uuid().optional(),
|
||||
userEmail: z.string().email().optional(),
|
||||
userName: z.string().optional(),
|
||||
participantType: z.enum(['INITIATOR', 'APPROVER', 'SPECTATOR'] as const).optional(),
|
||||
canComment: z.boolean().optional(),
|
||||
canViewDocuments: z.boolean().optional(),
|
||||
canDownloadDocuments: z.boolean().optional(),
|
||||
notificationEnabled: z.boolean().optional(),
|
||||
});
|
||||
|
||||
export const createWorkflowSchema = z.object({
|
||||
templateType: z.enum(['CUSTOM', 'TEMPLATE']),
|
||||
title: z.string().min(1, 'Title is required').max(500, 'Title too long'),
|
||||
description: z.string().min(1, 'Description is required'),
|
||||
priority: z.enum(['STANDARD', 'EXPRESS'] as const),
|
||||
approvalLevels: z.array(z.object({
|
||||
levelNumber: z.number().int().min(1).max(10),
|
||||
levelName: z.string().optional(),
|
||||
approverId: z.string().uuid(),
|
||||
approverEmail: z.string().email(),
|
||||
approverName: z.string().min(1),
|
||||
tatHours: z.number().positive(),
|
||||
isFinalApprover: z.boolean().optional(),
|
||||
})).min(1, 'At least one approval level is required').max(10, 'Maximum 10 approval levels allowed'),
|
||||
participants: z.array(z.object({
|
||||
userId: z.string().uuid(),
|
||||
userEmail: z.string().email(),
|
||||
userName: z.string().min(1),
|
||||
participantType: z.enum(['INITIATOR', 'APPROVER', 'SPECTATOR'] as const),
|
||||
canComment: z.boolean().optional(),
|
||||
canViewDocuments: z.boolean().optional(),
|
||||
canDownloadDocuments: z.boolean().optional(),
|
||||
notificationEnabled: z.boolean().optional(),
|
||||
})).optional(),
|
||||
approvalLevels: z.array(simplifiedApprovalLevelSchema)
|
||||
.min(1, 'At least one approval level is required')
|
||||
.max(10, 'Maximum 10 approval levels allowed'),
|
||||
participants: z.array(simplifiedSpectatorSchema).optional(),
|
||||
spectators: z.array(simplifiedSpectatorSchema).optional(), // Alias for participants
|
||||
// Additional frontend compatibility fields
|
||||
approverCount: z.number().optional(),
|
||||
approvers: z.array(z.any()).optional(),
|
||||
priorityUi: z.string().optional(),
|
||||
templateId: z.string().optional(),
|
||||
ccList: z.array(z.any()).optional(),
|
||||
});
|
||||
|
||||
export const updateWorkflowSchema = z.object({
|
||||
|
||||
Loading…
Reference in New Issue
Block a user