Compare commits
2 Commits
18620235d8
...
134b7d547d
| Author | SHA1 | Date | |
|---|---|---|---|
| 134b7d547d | |||
| a6bafa8764 |
538
API_SIMPLIFIED_FORMAT.md
Normal file
538
API_SIMPLIFIED_FORMAT.md
Normal file
@ -0,0 +1,538 @@
|
|||||||
|
# Simplified Workflow API - Postman Guide
|
||||||
|
|
||||||
|
## ✅ Updated Simplified Format
|
||||||
|
|
||||||
|
The API has been updated to make workflow creation much simpler. You now only need to provide **email** and **tatHours** for approvers, and **email** for spectators. The backend automatically handles:
|
||||||
|
|
||||||
|
- User lookup/creation from Okta/Azure AD
|
||||||
|
- Fetching user details (name, department, designation)
|
||||||
|
- Auto-generating level names based on designation/department
|
||||||
|
- Auto-detecting final approver (last level)
|
||||||
|
- Proper validation with clear error messages
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
### Login
|
||||||
|
```http
|
||||||
|
POST {{baseUrl}}/auth/login
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"email": "your-email@example.com",
|
||||||
|
"password": "your-password"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"data": {
|
||||||
|
"token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
|
||||||
|
"user": { "userId": "...", "email": "...", ... }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Create Workflow - Simplified Format
|
||||||
|
|
||||||
|
### Example 1: Simple Workflow (JSON)
|
||||||
|
|
||||||
|
**POST** `{{baseUrl}}/workflows`
|
||||||
|
|
||||||
|
**Headers:**
|
||||||
|
```
|
||||||
|
Content-Type: application/json
|
||||||
|
Authorization: Bearer <your_token>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Purchase Order Approval - Office Equipment",
|
||||||
|
"description": "Approval needed for purchasing new office equipment including laptops and monitors. Total budget: $50,000",
|
||||||
|
"priority": "STANDARD",
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "manager@royalenfield.com",
|
||||||
|
"tatHours": 24
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "director@royalenfield.com",
|
||||||
|
"tatHours": 48
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "cfo@royalenfield.com",
|
||||||
|
"tatHours": 72
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"spectators": [
|
||||||
|
{
|
||||||
|
"email": "hr@royalenfield.com"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "finance@royalenfield.com"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example 2: Express Priority with Final Approver Flag
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Urgent: Server Infrastructure Upgrade",
|
||||||
|
"description": "Critical server infrastructure upgrade required immediately",
|
||||||
|
"priority": "EXPRESS",
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "it-manager@royalenfield.com",
|
||||||
|
"tatHours": 8
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "cto@royalenfield.com",
|
||||||
|
"tatHours": 16,
|
||||||
|
"isFinalApprover": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example 3: With Custom Level Names
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Vendor Contract Approval",
|
||||||
|
"description": "New vendor contract for manufacturing components",
|
||||||
|
"priority": "STANDARD",
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "procurement@royalenfield.com",
|
||||||
|
"tatHours": 24,
|
||||||
|
"levelName": "Procurement Review"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "legal@royalenfield.com",
|
||||||
|
"tatHours": 48,
|
||||||
|
"levelName": "Legal Compliance"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "vp@royalenfield.com",
|
||||||
|
"tatHours": 72,
|
||||||
|
"levelName": "Executive Approval",
|
||||||
|
"isFinalApprover": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example 4: Multipart with Files
|
||||||
|
|
||||||
|
**POST** `{{baseUrl}}/workflows/multipart`
|
||||||
|
|
||||||
|
**Headers:**
|
||||||
|
```
|
||||||
|
Authorization: Bearer <your_token>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body (form-data):**
|
||||||
|
| Key | Type | Value |
|
||||||
|
|-----|------|-------|
|
||||||
|
| `payload` | Text | `{"templateType":"CUSTOM","title":"Budget Request 2025","description":"Annual budget request","priority":"STANDARD","approvalLevels":[{"email":"finance-manager@royalenfield.com","tatHours":48},{"email":"cfo@royalenfield.com","tatHours":72}]}` |
|
||||||
|
| `files` | File | Select PDF/Excel file(s) |
|
||||||
|
| `category` | Text | `SUPPORTING` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Field Reference
|
||||||
|
|
||||||
|
### Required Fields
|
||||||
|
|
||||||
|
| Field | Type | Description | Example |
|
||||||
|
|-------|------|-------------|---------|
|
||||||
|
| `templateType` | string | Workflow type | `"CUSTOM"` or `"TEMPLATE"` |
|
||||||
|
| `title` | string | Request title (max 500 chars) | `"Purchase Order Approval"` |
|
||||||
|
| `description` | string | Detailed description | `"Approval needed for..."` |
|
||||||
|
| `priority` | string | Request priority | `"STANDARD"` or `"EXPRESS"` |
|
||||||
|
| `approvalLevels` | array | List of approvers (min 1, max 10) | See below |
|
||||||
|
|
||||||
|
### Approval Level Fields
|
||||||
|
|
||||||
|
| Field | Type | Required | Description |
|
||||||
|
|-------|------|----------|-------------|
|
||||||
|
| `email` | string | ✅ Yes | Approver's email address |
|
||||||
|
| `tatHours` | number | ✅ Yes | Turn-around time in hours (positive number) |
|
||||||
|
| `isFinalApprover` | boolean | ❌ No | Explicitly mark as final approver (auto-detected if last level) |
|
||||||
|
| `levelName` | string | ❌ No | Custom level name (auto-generated if not provided) |
|
||||||
|
|
||||||
|
**Auto-generated `levelName` logic:**
|
||||||
|
- If approver has **designation**: `"{Designation} Approval"` (e.g., "Manager Approval")
|
||||||
|
- If approver has **department**: `"{Department} Approval"` (e.g., "Finance Approval")
|
||||||
|
- Otherwise: `"Level {N} Approval"` (e.g., "Level 1 Approval")
|
||||||
|
|
||||||
|
### Spectator Fields
|
||||||
|
|
||||||
|
| Field | Type | Required | Description |
|
||||||
|
|-------|------|----------|-------------|
|
||||||
|
| `email` | string | ✅ Yes | Spectator's email address |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Validation & Error Handling
|
||||||
|
|
||||||
|
The backend automatically validates and provides clear error messages:
|
||||||
|
|
||||||
|
### ✅ Successful Response
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Workflow created successfully",
|
||||||
|
"data": {
|
||||||
|
"requestId": "uuid",
|
||||||
|
"requestNumber": "REQ-2025-12-0001",
|
||||||
|
"title": "...",
|
||||||
|
"status": "PENDING",
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ❌ Error: Invalid Email
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to create workflow",
|
||||||
|
"details": "Approver email 'invalid@example.com' not found in organization directory. Please verify the email address."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ❌ Error: Duplicate Email
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to create workflow",
|
||||||
|
"details": "Duplicate approver email found: manager@example.com. Each approver must have a unique email."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ❌ Error: Invalid Initiator
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to create workflow",
|
||||||
|
"details": "Invalid initiator: User with ID '...' not found. Please ensure you are logged in with a valid account."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ❌ Error: Validation Failed
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Validation failed",
|
||||||
|
"details": "approvalLevels.0.email: Valid email is required; approvalLevels.0.tatHours: TAT hours must be positive"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## What Happens Behind the Scenes
|
||||||
|
|
||||||
|
When you create a workflow, the backend:
|
||||||
|
|
||||||
|
1. **Validates Initiator**: Ensures the logged-in user exists
|
||||||
|
2. **Enriches Approval Levels**:
|
||||||
|
- Searches for each approver in the local database
|
||||||
|
- If not found, fetches from Okta/Azure AD
|
||||||
|
- Creates user record if they exist in AD but not in DB
|
||||||
|
- Extracts: `userId`, `displayName`, `designation`, `department`
|
||||||
|
- Auto-generates `levelName` if not provided
|
||||||
|
- Auto-detects `isFinalApprover` (last level = true)
|
||||||
|
3. **Enriches Spectators**:
|
||||||
|
- Same lookup/creation process as approvers
|
||||||
|
- Sets default permissions (view + comment, no download)
|
||||||
|
4. **Creates Workflow**:
|
||||||
|
- Saves workflow request
|
||||||
|
- Creates approval levels
|
||||||
|
- Creates participants
|
||||||
|
- Sends notifications
|
||||||
|
- Logs activity
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migration from Old Format
|
||||||
|
|
||||||
|
### ❌ Old Format (No Longer Required)
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"levelNumber": 1,
|
||||||
|
"levelName": "Manager Approval",
|
||||||
|
"approverId": "uuid-123",
|
||||||
|
"approverEmail": "manager@example.com",
|
||||||
|
"approverName": "John Doe",
|
||||||
|
"tatHours": 24,
|
||||||
|
"isFinalApprover": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ✅ New Simplified Format
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "manager@example.com",
|
||||||
|
"tatHours": 24
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**The backend handles everything else automatically!**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tips & Best Practices
|
||||||
|
|
||||||
|
1. **Use Valid Email Addresses**: Ensure all approver/spectator emails exist in your Okta/Azure AD
|
||||||
|
2. **TAT Hours**: Set realistic turn-around times based on priority:
|
||||||
|
- STANDARD: 24-72 hours per level
|
||||||
|
- EXPRESS: 8-24 hours per level
|
||||||
|
3. **Final Approver**: Last level is automatically marked as final approver (you can override with `isFinalApprover: true` on any level)
|
||||||
|
4. **Level Names**: Let the system auto-generate based on designation/department, or provide custom names
|
||||||
|
5. **Spectators**: Add users who need visibility but not approval authority
|
||||||
|
6. **Documents**: Use `/multipart` endpoint for file uploads
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing in Postman
|
||||||
|
|
||||||
|
1. **Set Environment Variables**:
|
||||||
|
- `baseUrl`: `http://localhost:5000/api/v1`
|
||||||
|
- `token`: Your auth token from login
|
||||||
|
|
||||||
|
2. **Login First**:
|
||||||
|
- Call `POST /auth/login`
|
||||||
|
- Copy the `token` from response
|
||||||
|
- Set as environment variable
|
||||||
|
|
||||||
|
3. **Create Workflow**:
|
||||||
|
- Use simplified format
|
||||||
|
- Only provide email + tatHours
|
||||||
|
- Backend handles the rest
|
||||||
|
|
||||||
|
4. **Check Response**:
|
||||||
|
- Verify `requestNumber` is generated
|
||||||
|
- Check `approvalLevels` are enriched with user data
|
||||||
|
- Confirm `participants` includes spectators
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Add Approver/Spectator After Request Creation
|
||||||
|
|
||||||
|
These endpoints allow adding approvers or spectators to an existing request. They follow the same simplified pattern - just provide email, and the backend handles user lookup/creation.
|
||||||
|
|
||||||
|
### Add Approver at Specific Level
|
||||||
|
|
||||||
|
**POST** `{{baseUrl}}/workflows/:requestId/approvers/at-level`
|
||||||
|
|
||||||
|
**Headers:**
|
||||||
|
```
|
||||||
|
Authorization: Bearer <your_token>
|
||||||
|
Content-Type: application/json
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "newapprover@royalenfield.com",
|
||||||
|
"tatHours": 24,
|
||||||
|
"level": 2
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**What Happens:**
|
||||||
|
- ✅ Finds user by email in DB, or syncs from Okta/AD if not found
|
||||||
|
- ✅ Auto-generates levelName based on designation/department
|
||||||
|
- ✅ Shifts existing levels if needed
|
||||||
|
- ✅ Updates final approver flag
|
||||||
|
- ✅ Sends notification to new approver
|
||||||
|
- ✅ Logs activity
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Approver added successfully",
|
||||||
|
"data": {
|
||||||
|
"levelId": "uuid",
|
||||||
|
"levelNumber": 2,
|
||||||
|
"levelName": "Manager Approval",
|
||||||
|
"approverId": "uuid",
|
||||||
|
"approverEmail": "newapprover@royalenfield.com",
|
||||||
|
"approverName": "John Doe",
|
||||||
|
"tatHours": 24,
|
||||||
|
"status": "PENDING"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Add Simple Approver (General)
|
||||||
|
|
||||||
|
**POST** `{{baseUrl}}/workflows/:requestId/participants/approver`
|
||||||
|
|
||||||
|
**Headers:**
|
||||||
|
```
|
||||||
|
Authorization: Bearer <your_token>
|
||||||
|
Content-Type: application/json
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "approver@royalenfield.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: This adds them as a general approver participant, not at a specific level.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Add Spectator
|
||||||
|
|
||||||
|
**POST** `{{baseUrl}}/workflows/:requestId/participants/spectator`
|
||||||
|
|
||||||
|
**Headers:**
|
||||||
|
```
|
||||||
|
Authorization: Bearer <your_token>
|
||||||
|
Content-Type: application/json
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "spectator@royalenfield.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**What Happens:**
|
||||||
|
- ✅ Finds user by email in DB, or syncs from Okta/AD if not found
|
||||||
|
- ✅ Sets spectator permissions (view + comment, no download)
|
||||||
|
- ✅ Sends notification to new spectator
|
||||||
|
- ✅ Logs activity
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"data": {
|
||||||
|
"participantId": "uuid",
|
||||||
|
"userId": "uuid",
|
||||||
|
"userEmail": "spectator@royalenfield.com",
|
||||||
|
"userName": "Jane Doe",
|
||||||
|
"participantType": "SPECTATOR",
|
||||||
|
"canComment": true,
|
||||||
|
"canViewDocuments": true,
|
||||||
|
"canDownloadDocuments": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error Handling for Add Operations
|
||||||
|
|
||||||
|
**❌ User Not Found in AD:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to add approver",
|
||||||
|
"details": "Approver email 'invalid@example.com' not found in organization directory. Please verify the email address."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**❌ User Already a Participant:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to add spectator",
|
||||||
|
"details": "User is already a participant in this request"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**❌ Invalid Level:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Failed to add approver at level",
|
||||||
|
"details": "Cannot add approver at level 1 - level has already been completed"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Complete Flow Example
|
||||||
|
|
||||||
|
### 1. Login
|
||||||
|
```bash
|
||||||
|
POST /api/v1/auth/login
|
||||||
|
Body: { "email": "user@example.com", "password": "pass" }
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create Workflow (Simplified)
|
||||||
|
```bash
|
||||||
|
POST /api/v1/workflows
|
||||||
|
Body: {
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Purchase Order",
|
||||||
|
"description": "Office equipment",
|
||||||
|
"priority": "STANDARD",
|
||||||
|
"approvalLevels": [
|
||||||
|
{ "email": "manager@example.com", "tatHours": 24 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Add Additional Approver (After Creation)
|
||||||
|
```bash
|
||||||
|
POST /api/v1/workflows/:requestId/approvers/at-level
|
||||||
|
Body: {
|
||||||
|
"email": "director@example.com",
|
||||||
|
"tatHours": 48,
|
||||||
|
"level": 2
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Add Spectator
|
||||||
|
```bash
|
||||||
|
POST /api/v1/workflows/:requestId/participants/spectator
|
||||||
|
Body: { "email": "hr@example.com" }
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Need Help?
|
||||||
|
|
||||||
|
If you encounter any issues:
|
||||||
|
1. Check the error message - it will tell you exactly what's wrong
|
||||||
|
2. Verify emails exist in your organization directory
|
||||||
|
3. Ensure you're logged in with a valid token
|
||||||
|
4. Check backend logs for detailed error information
|
||||||
|
|
||||||
266
POSTMAN_COLLECTION_UPDATES.md
Normal file
266
POSTMAN_COLLECTION_UPDATES.md
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
# Postman Collection Updates - Simplified API
|
||||||
|
|
||||||
|
## ✅ Updated Endpoints
|
||||||
|
|
||||||
|
The Postman collection has been updated to use the **simplified API format**. Here's what changed:
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **1. Create Workflow (JSON) - Simplified** ✨
|
||||||
|
|
||||||
|
**Old Format (REMOVED):**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"requestTitle": "...",
|
||||||
|
"requestDescription": "...",
|
||||||
|
"requestingDepartment": "IT",
|
||||||
|
"requestCategory": "PURCHASE_ORDER",
|
||||||
|
"approvers": [
|
||||||
|
{ "email": "...", "tatHours": 24, "level": 1 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**New Simplified Format:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Purchase Order Approval for Office Equipment",
|
||||||
|
"description": "Approval needed for purchasing new office equipment...",
|
||||||
|
"priority": "STANDARD",
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "manager@royalenfield.com",
|
||||||
|
"tatHours": 24
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "director@royalenfield.com",
|
||||||
|
"tatHours": 48
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "cfo@royalenfield.com",
|
||||||
|
"tatHours": 72
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"spectators": [
|
||||||
|
{
|
||||||
|
"email": "hr@royalenfield.com"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"email": "finance@royalenfield.com"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**What Backend Does Automatically:**
|
||||||
|
- ✅ Finds/creates users from Okta/AD
|
||||||
|
- ✅ Generates level names from designation/department
|
||||||
|
- ✅ Auto-detects final approver (last level)
|
||||||
|
- ✅ Sets proper permissions
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **2. Create Workflow (Multipart with Files) - Simplified** ✨
|
||||||
|
|
||||||
|
**Updated Form Data:**
|
||||||
|
| Key | Value |
|
||||||
|
|-----|-------|
|
||||||
|
| `payload` | `{"templateType":"CUSTOM","title":"...","description":"...","priority":"STANDARD","approvalLevels":[{"email":"manager@royalenfield.com","tatHours":24}],"spectators":[{"email":"hr@royalenfield.com"}]}` |
|
||||||
|
| `files` | Select file(s) |
|
||||||
|
| `category` | `SUPPORTING` (optional) |
|
||||||
|
|
||||||
|
**Changes:**
|
||||||
|
- ❌ Removed: `requestTitle`, `requestDescription`, `requestingDepartment`, `requestCategory`
|
||||||
|
- ❌ Removed: Complex approver format with level numbers
|
||||||
|
- ✅ Added: Single `payload` field with simplified JSON
|
||||||
|
- ✅ Simplified: Only `email` and `tatHours` per approver
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **3. Add Approver at Level - Simplified** 🆕
|
||||||
|
|
||||||
|
**NEW Endpoint Added!**
|
||||||
|
|
||||||
|
**Method:** `POST`
|
||||||
|
**URL:** `{{baseUrl}}/workflows/:id/approvers/at-level`
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "newapprover@royalenfield.com",
|
||||||
|
"tatHours": 24,
|
||||||
|
"level": 2
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**What Backend Does:**
|
||||||
|
- ✅ Finds/creates user from Okta/AD
|
||||||
|
- ✅ Generates smart level name
|
||||||
|
- ✅ Shifts existing levels if needed
|
||||||
|
- ✅ Updates final approver flag
|
||||||
|
- ✅ Sends notifications
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **4. Add Spectator - Simplified** 🆕
|
||||||
|
|
||||||
|
**NEW Endpoint Added!**
|
||||||
|
|
||||||
|
**Method:** `POST`
|
||||||
|
**URL:** `{{baseUrl}}/workflows/:id/participants/spectator`
|
||||||
|
|
||||||
|
**Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "spectator@royalenfield.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**What Backend Does:**
|
||||||
|
- ✅ Finds/creates user from Okta/AD
|
||||||
|
- ✅ Sets spectator permissions (view + comment)
|
||||||
|
- ✅ Sends notification
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📋 Complete Workflow Example
|
||||||
|
|
||||||
|
### Step 1: Login
|
||||||
|
```http
|
||||||
|
POST {{baseUrl}}/auth/login
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"email": "user@royalenfield.com",
|
||||||
|
"password": "your-password"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:** Save the `token` from response
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Step 2: Create Workflow (Simplified)
|
||||||
|
```http
|
||||||
|
POST {{baseUrl}}/workflows
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"templateType": "CUSTOM",
|
||||||
|
"title": "Purchase Order - Office Equipment",
|
||||||
|
"description": "Approval for office equipment purchase",
|
||||||
|
"priority": "STANDARD",
|
||||||
|
"approvalLevels": [
|
||||||
|
{
|
||||||
|
"email": "manager@royalenfield.com",
|
||||||
|
"tatHours": 24
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:** Save the `requestId` or `requestNumber`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Step 3: Add Additional Approver
|
||||||
|
```http
|
||||||
|
POST {{baseUrl}}/workflows/REQ-2024-0001/approvers/at-level
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"email": "director@royalenfield.com",
|
||||||
|
"tatHours": 48,
|
||||||
|
"level": 2
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Step 4: Add Spectator
|
||||||
|
```http
|
||||||
|
POST {{baseUrl}}/workflows/REQ-2024-0001/participants/spectator
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"email": "hr@royalenfield.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎯 Key Benefits
|
||||||
|
|
||||||
|
### Before (Old Format):
|
||||||
|
- ❌ Required user IDs, names manually
|
||||||
|
- ❌ Complex payload structure
|
||||||
|
- ❌ Manual level naming
|
||||||
|
- ❌ Manual final approver detection
|
||||||
|
|
||||||
|
### After (New Simplified Format):
|
||||||
|
- ✅ Only email required
|
||||||
|
- ✅ Simple, clean JSON
|
||||||
|
- ✅ Auto-generated level names
|
||||||
|
- ✅ Auto-detected final approver
|
||||||
|
- ✅ Auto user creation from Okta/AD
|
||||||
|
- ✅ Clear error messages
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔧 Environment Variables
|
||||||
|
|
||||||
|
Make sure to set these in Postman:
|
||||||
|
|
||||||
|
| Variable | Value | Example |
|
||||||
|
|----------|-------|---------|
|
||||||
|
| `baseUrl` | Backend API URL | `http://localhost:5000/api/v1` |
|
||||||
|
| `token` | Auth token from login | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📝 Notes
|
||||||
|
|
||||||
|
1. **Backward Compatible:** The backend still accepts the old format, but the new format is recommended
|
||||||
|
2. **Auto User Creation:** If a user exists in Okta/AD but not in the database, they will be created automatically
|
||||||
|
3. **Smart Level Names:** Level names are generated from:
|
||||||
|
- User's designation (e.g., "Manager Approval")
|
||||||
|
- User's department (e.g., "Finance Approval")
|
||||||
|
- Fallback: "Level N Approval"
|
||||||
|
4. **Final Approver:** Last approval level is automatically marked as final approver
|
||||||
|
5. **Error Messages:** Clear, actionable error messages for invalid emails or users not found in AD
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ❓ Troubleshooting
|
||||||
|
|
||||||
|
### Error: "User not found in organization directory"
|
||||||
|
- **Cause:** Email doesn't exist in Okta/AD
|
||||||
|
- **Solution:** Verify email address is correct and user has an active account
|
||||||
|
|
||||||
|
### Error: "Duplicate approver email found"
|
||||||
|
- **Cause:** Same email used for multiple approval levels
|
||||||
|
- **Solution:** Each approver must have a unique email
|
||||||
|
|
||||||
|
### Error: "Invalid initiator"
|
||||||
|
- **Cause:** Auth token is invalid or user doesn't exist
|
||||||
|
- **Solution:** Re-login to get a fresh token
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 Quick Start
|
||||||
|
|
||||||
|
1. **Import Collection:** Import `Royal_Enfield_API_Collection.postman_collection.json` into Postman
|
||||||
|
2. **Set Environment:** Configure `baseUrl` and `token` variables
|
||||||
|
3. **Login:** Call the login endpoint to get your token
|
||||||
|
4. **Create Workflow:** Use the simplified "Create Workflow (JSON) - Simplified" endpoint
|
||||||
|
5. **Test:** Try adding approvers and spectators using the new simplified endpoints
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Updated:** December 2, 2025
|
||||||
|
**Version:** 2.0 - Simplified API Format
|
||||||
|
|
||||||
@ -359,7 +359,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Create Workflow (JSON)",
|
"name": "Create Workflow (JSON) - Simplified",
|
||||||
"request": {
|
"request": {
|
||||||
"method": "POST",
|
"method": "POST",
|
||||||
"header": [
|
"header": [
|
||||||
@ -370,18 +370,18 @@
|
|||||||
],
|
],
|
||||||
"body": {
|
"body": {
|
||||||
"mode": "raw",
|
"mode": "raw",
|
||||||
"raw": "{\n // Request title - brief description\n \"requestTitle\": \"Purchase Order Approval for Office Equipment\",\n \n // Detailed description of the request\n \"requestDescription\": \"Approval needed for purchasing new office equipment including laptops, monitors, and office furniture. Total budget: $50,000\",\n \n // Priority: STANDARD | EXPRESS\n \"priority\": \"STANDARD\",\n \n // Department requesting approval\n \"requestingDepartment\": \"IT\",\n \n // Category of request\n \"requestCategory\": \"PURCHASE_ORDER\",\n \n // Approvers list - array of approval levels\n \"approvers\": [\n {\n // Approver's email\n \"email\": \"manager@example.com\",\n \n // TAT (Turn Around Time) in hours\n \"tatHours\": 24,\n \n // Level number (sequential)\n \"level\": 1\n },\n {\n \"email\": \"director@example.com\",\n \"tatHours\": 48,\n \"level\": 2\n },\n {\n \"email\": \"cfo@example.com\",\n \"tatHours\": 72,\n \"level\": 3\n }\n ],\n \n // Spectators (optional) - users who can view but not approve\n \"spectators\": [\n {\n \"email\": \"hr@example.com\"\n },\n {\n \"email\": \"finance@example.com\"\n }\n ],\n \n // Document IDs (if documents uploaded separately)\n \"documentIds\": []\n}"
|
"raw": "{\n \"templateType\": \"CUSTOM\",\n \"title\": \"Purchase Order Approval for Office Equipment\",\n \"description\": \"Approval needed for purchasing new office equipment including laptops, monitors, and office furniture. Total budget: $50,000\",\n \"priority\": \"STANDARD\",\n \"approvalLevels\": [\n {\n \"email\": \"manager@royalenfield.com\",\n \"tatHours\": 24\n },\n {\n \"email\": \"director@royalenfield.com\",\n \"tatHours\": 48\n },\n {\n \"email\": \"cfo@royalenfield.com\",\n \"tatHours\": 72\n }\n ],\n \"spectators\": [\n {\n \"email\": \"hr@royalenfield.com\"\n },\n {\n \"email\": \"finance@royalenfield.com\"\n }\n ]\n}"
|
||||||
},
|
},
|
||||||
"url": {
|
"url": {
|
||||||
"raw": "{{baseUrl}}/workflows",
|
"raw": "{{baseUrl}}/workflows",
|
||||||
"host": ["{{baseUrl}}"],
|
"host": ["{{baseUrl}}"],
|
||||||
"path": ["workflows"]
|
"path": ["workflows"]
|
||||||
},
|
},
|
||||||
"description": "Create new workflow request with JSON payload"
|
"description": "Create new workflow request with JSON payload. Backend automatically:\n- Finds/creates users from Okta/AD\n- Generates level names from designation/department\n- Auto-detects final approver (last level)\n- Sets proper permissions\n\nOnly email and tatHours required per approver!"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Create Workflow (Multipart with Files)",
|
"name": "Create Workflow (Multipart with Files) - Simplified",
|
||||||
"request": {
|
"request": {
|
||||||
"method": "POST",
|
"method": "POST",
|
||||||
"header": [],
|
"header": [],
|
||||||
@ -389,52 +389,22 @@
|
|||||||
"mode": "formdata",
|
"mode": "formdata",
|
||||||
"formdata": [
|
"formdata": [
|
||||||
{
|
{
|
||||||
"key": "requestTitle",
|
"key": "payload",
|
||||||
"value": "Purchase Order Approval for Office Equipment",
|
"value": "{\"templateType\":\"CUSTOM\",\"title\":\"Purchase Order Approval with Documents\",\"description\":\"Approval needed for office equipment purchase with supporting documents\",\"priority\":\"STANDARD\",\"approvalLevels\":[{\"email\":\"manager@royalenfield.com\",\"tatHours\":24},{\"email\":\"director@royalenfield.com\",\"tatHours\":48}],\"spectators\":[{\"email\":\"hr@royalenfield.com\"}]}",
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"description": "Request title"
|
"description": "JSON payload with simplified format (email + tatHours only)"
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "requestDescription",
|
|
||||||
"value": "Approval needed for purchasing new office equipment",
|
|
||||||
"type": "text",
|
|
||||||
"description": "Detailed description"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "priority",
|
|
||||||
"value": "STANDARD",
|
|
||||||
"type": "text",
|
|
||||||
"description": "STANDARD or EXPRESS"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "requestingDepartment",
|
|
||||||
"value": "IT",
|
|
||||||
"type": "text",
|
|
||||||
"description": "Department name"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "requestCategory",
|
|
||||||
"value": "PURCHASE_ORDER",
|
|
||||||
"type": "text",
|
|
||||||
"description": "Category of request"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "approvers",
|
|
||||||
"value": "[{\"email\":\"manager@example.com\",\"tatHours\":24,\"level\":1},{\"email\":\"director@example.com\",\"tatHours\":48,\"level\":2}]",
|
|
||||||
"type": "text",
|
|
||||||
"description": "JSON array of approvers"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"key": "spectators",
|
|
||||||
"value": "[{\"email\":\"hr@example.com\"}]",
|
|
||||||
"type": "text",
|
|
||||||
"description": "JSON array of spectators (optional)"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key": "files",
|
"key": "files",
|
||||||
"type": "file",
|
"type": "file",
|
||||||
"src": [],
|
"src": [],
|
||||||
"description": "Upload files (multiple files supported)"
|
"description": "Upload files (multiple files supported)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "category",
|
||||||
|
"value": "SUPPORTING",
|
||||||
|
"type": "text",
|
||||||
|
"description": "Document category: SUPPORTING | APPROVAL | REFERENCE | FINAL | OTHER"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -443,7 +413,7 @@
|
|||||||
"host": ["{{baseUrl}}"],
|
"host": ["{{baseUrl}}"],
|
||||||
"path": ["workflows", "multipart"]
|
"path": ["workflows", "multipart"]
|
||||||
},
|
},
|
||||||
"description": "Create workflow with file uploads using multipart/form-data"
|
"description": "Create workflow with file uploads. Backend automatically:\n- Finds/creates users from Okta/AD\n- Generates level names\n- Auto-detects final approver\n- Uploads and attaches documents\n\nOnly email and tatHours required per approver!"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -572,6 +542,64 @@
|
|||||||
"description": "Submit workflow for approval (changes status from DRAFT to OPEN)"
|
"description": "Submit workflow for approval (changes status from DRAFT to OPEN)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "Add Approver at Level - Simplified",
|
||||||
|
"request": {
|
||||||
|
"method": "POST",
|
||||||
|
"header": [
|
||||||
|
{
|
||||||
|
"key": "Content-Type",
|
||||||
|
"value": "application/json"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"body": {
|
||||||
|
"mode": "raw",
|
||||||
|
"raw": "{\n \"email\": \"newapprover@royalenfield.com\",\n \"tatHours\": 24,\n \"level\": 2\n}"
|
||||||
|
},
|
||||||
|
"url": {
|
||||||
|
"raw": "{{baseUrl}}/workflows/:id/approvers/at-level",
|
||||||
|
"host": ["{{baseUrl}}"],
|
||||||
|
"path": ["workflows", ":id", "approvers", "at-level"],
|
||||||
|
"variable": [
|
||||||
|
{
|
||||||
|
"key": "id",
|
||||||
|
"value": "REQ-2024-0001",
|
||||||
|
"description": "Workflow ID or Request Number"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "Add a new approver at specific level. Backend automatically:\n- Finds/creates user from Okta/AD\n- Generates level name from designation/department\n- Shifts existing levels if needed\n- Updates final approver flag\n- Sends notifications\n\nOnly email, tatHours, and level required!"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Add Spectator - Simplified",
|
||||||
|
"request": {
|
||||||
|
"method": "POST",
|
||||||
|
"header": [
|
||||||
|
{
|
||||||
|
"key": "Content-Type",
|
||||||
|
"value": "application/json"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"body": {
|
||||||
|
"mode": "raw",
|
||||||
|
"raw": "{\n \"email\": \"spectator@royalenfield.com\"\n}"
|
||||||
|
},
|
||||||
|
"url": {
|
||||||
|
"raw": "{{baseUrl}}/workflows/:id/participants/spectator",
|
||||||
|
"host": ["{{baseUrl}}"],
|
||||||
|
"path": ["workflows", ":id", "participants", "spectator"],
|
||||||
|
"variable": [
|
||||||
|
{
|
||||||
|
"key": "id",
|
||||||
|
"value": "REQ-2024-0001",
|
||||||
|
"description": "Workflow ID or Request Number"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "Add a spectator to request. Backend automatically:\n- Finds/creates user from Okta/AD\n- Sets spectator permissions (view + comment, no download)\n- Sends notification\n\nOnly email required!"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "Get Workflow Activity",
|
"name": "Get Workflow Activity",
|
||||||
"request": {
|
"request": {
|
||||||
|
|||||||
239
USER_NOTIFICATION_PREFERENCES.md
Normal file
239
USER_NOTIFICATION_PREFERENCES.md
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
# User Notification Preferences
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Individual users can now control their notification preferences across three channels: **Email**, **Push**, and **In-App** notifications.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features Implemented
|
||||||
|
|
||||||
|
### ✅ Backend
|
||||||
|
|
||||||
|
1. **Database Schema**
|
||||||
|
- Added three boolean fields to `users` table:
|
||||||
|
- `email_notifications_enabled` (default: true)
|
||||||
|
- `push_notifications_enabled` (default: true)
|
||||||
|
- `in_app_notifications_enabled` (default: true)
|
||||||
|
- Migration file: `20251203-add-user-notification-preferences.ts`
|
||||||
|
|
||||||
|
2. **User Model Updates**
|
||||||
|
- `Re_Backend/src/models/User.ts`
|
||||||
|
- Added fields: `emailNotificationsEnabled`, `pushNotificationsEnabled`, `inAppNotificationsEnabled`
|
||||||
|
|
||||||
|
3. **API Endpoints**
|
||||||
|
- **GET** `/api/v1/user/preferences/notifications` - Get current user's preferences
|
||||||
|
- **PUT** `/api/v1/user/preferences/notifications` - Update preferences
|
||||||
|
- Controller: `Re_Backend/src/controllers/userPreference.controller.ts`
|
||||||
|
- Routes: `Re_Backend/src/routes/userPreference.routes.ts`
|
||||||
|
- Validator: `Re_Backend/src/validators/userPreference.validator.ts`
|
||||||
|
|
||||||
|
4. **Notification Service Enhancement**
|
||||||
|
- `Re_Backend/src/services/notification.service.ts`
|
||||||
|
- Now checks user preferences before sending notifications
|
||||||
|
- Respects individual channel settings (email, push, in-app)
|
||||||
|
|
||||||
|
### ✅ Frontend
|
||||||
|
|
||||||
|
1. **API Service**
|
||||||
|
- `Re_Figma_Code/src/services/userPreferenceApi.ts`
|
||||||
|
- Functions: `getNotificationPreferences()`, `updateNotificationPreferences()`
|
||||||
|
|
||||||
|
2. **UI Component**
|
||||||
|
- `Re_Figma_Code/src/components/settings/NotificationPreferences.tsx`
|
||||||
|
- Beautiful card-based UI with toggle switches
|
||||||
|
- Real-time updates with loading states
|
||||||
|
- Success/error feedback
|
||||||
|
|
||||||
|
3. **Settings Page Integration**
|
||||||
|
- `Re_Figma_Code/src/pages/Settings/Settings.tsx`
|
||||||
|
- Full-width notification preferences card
|
||||||
|
- Separate browser push registration button
|
||||||
|
- Available for both admin and regular users
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
### User Experience
|
||||||
|
|
||||||
|
1. **Navigate to Settings**
|
||||||
|
- User clicks on Settings in the navigation
|
||||||
|
|
||||||
|
2. **View Notification Preferences**
|
||||||
|
- Top card shows three toggle switches:
|
||||||
|
- 📧 **Email Notifications** - Receive notifications via email
|
||||||
|
- 🔔 **Push Notifications** - Receive browser push notifications
|
||||||
|
- 💬 **In-App Notifications** - Show notifications within the application
|
||||||
|
|
||||||
|
3. **Toggle Preferences**
|
||||||
|
- Click any switch to enable/disable that channel
|
||||||
|
- Changes are saved immediately
|
||||||
|
- Success message confirms the update
|
||||||
|
|
||||||
|
4. **Register Browser for Push** (separate card)
|
||||||
|
- One-time setup per browser/device
|
||||||
|
- Requests browser permission
|
||||||
|
- Registers the browser endpoint for push notifications
|
||||||
|
|
||||||
|
### System Behavior
|
||||||
|
|
||||||
|
**When sending notifications:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// System checks user preferences
|
||||||
|
if (user.inAppNotificationsEnabled) {
|
||||||
|
// Create in-app notification in database
|
||||||
|
// Emit socket event for real-time delivery
|
||||||
|
}
|
||||||
|
|
||||||
|
if (user.pushNotificationsEnabled) {
|
||||||
|
// Send browser push notification (if browser is registered)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (user.emailNotificationsEnabled) {
|
||||||
|
// Send email notification (when implemented)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- ✅ Users have full control over notification channels
|
||||||
|
- ✅ Reduces notification fatigue
|
||||||
|
- ✅ Improves user experience
|
||||||
|
- ✅ Respects user preferences while ensuring critical alerts are delivered
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## API Documentation
|
||||||
|
|
||||||
|
### Get Notification Preferences
|
||||||
|
|
||||||
|
**Request:**
|
||||||
|
```http
|
||||||
|
GET /api/v1/user/preferences/notifications
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"data": {
|
||||||
|
"emailNotificationsEnabled": true,
|
||||||
|
"pushNotificationsEnabled": true,
|
||||||
|
"inAppNotificationsEnabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update Notification Preferences
|
||||||
|
|
||||||
|
**Request:**
|
||||||
|
```http
|
||||||
|
PUT /api/v1/user/preferences/notifications
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"emailNotificationsEnabled": false,
|
||||||
|
"pushNotificationsEnabled": true,
|
||||||
|
"inAppNotificationsEnabled": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Notification preferences updated successfully",
|
||||||
|
"data": {
|
||||||
|
"emailNotificationsEnabled": false,
|
||||||
|
"pushNotificationsEnabled": true,
|
||||||
|
"inAppNotificationsEnabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Database Migration
|
||||||
|
|
||||||
|
To apply the migration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd Re_Backend
|
||||||
|
npm run migrate
|
||||||
|
```
|
||||||
|
|
||||||
|
This will add the three notification preference columns to the `users` table with default value `true` for all existing users.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Admin Configuration vs User Preferences
|
||||||
|
|
||||||
|
### Two Levels of Control:
|
||||||
|
|
||||||
|
1. **System-Wide (Admin Only)**
|
||||||
|
- Settings → Configuration → Notification Rules
|
||||||
|
- `ENABLE_EMAIL_NOTIFICATIONS` - Master switch for email
|
||||||
|
- `ENABLE_PUSH_NOTIFICATIONS` - Master switch for push
|
||||||
|
- If admin disables a channel, it's disabled for ALL users
|
||||||
|
|
||||||
|
2. **User-Level (Individual Users)**
|
||||||
|
- Settings → User Settings → Notification Preferences
|
||||||
|
- Users can disable channels for themselves
|
||||||
|
- User preferences are respected only if admin has enabled the channel
|
||||||
|
|
||||||
|
### Logic:
|
||||||
|
```
|
||||||
|
Notification Sent = Admin Enabled AND User Enabled
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
- [ ] Email notification implementation (currently just a preference toggle)
|
||||||
|
- [ ] SMS notifications support
|
||||||
|
- [ ] Granular notification types (e.g., only approval requests, only TAT alerts)
|
||||||
|
- [ ] Quiet hours / Do Not Disturb schedules
|
||||||
|
- [ ] Notification digest/batching preferences
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing Checklist
|
||||||
|
|
||||||
|
- [x] User can view their notification preferences
|
||||||
|
- [x] User can toggle email notifications on/off
|
||||||
|
- [x] User can toggle push notifications on/off
|
||||||
|
- [x] User can toggle in-app notifications on/off
|
||||||
|
- [x] Notification service respects user preferences
|
||||||
|
- [x] In-app notifications are not created if disabled
|
||||||
|
- [x] Push notifications are not sent if disabled
|
||||||
|
- [x] UI shows loading states during updates
|
||||||
|
- [x] UI shows success/error messages
|
||||||
|
- [x] Migration adds columns with correct defaults
|
||||||
|
- [x] API endpoints require authentication
|
||||||
|
- [x] Changes persist after logout/login
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Modified/Created
|
||||||
|
|
||||||
|
### Backend
|
||||||
|
- ✅ `src/models/User.ts` - Added notification preference fields
|
||||||
|
- ✅ `src/migrations/20251203-add-user-notification-preferences.ts` - Migration
|
||||||
|
- ✅ `src/controllers/userPreference.controller.ts` - New controller
|
||||||
|
- ✅ `src/routes/userPreference.routes.ts` - New routes
|
||||||
|
- ✅ `src/validators/userPreference.validator.ts` - New validator
|
||||||
|
- ✅ `src/routes/index.ts` - Registered new routes
|
||||||
|
- ✅ `src/services/notification.service.ts` - Updated to respect preferences
|
||||||
|
|
||||||
|
### Frontend
|
||||||
|
- ✅ `src/services/userPreferenceApi.ts` - New API service
|
||||||
|
- ✅ `src/components/settings/NotificationPreferences.tsx` - New component
|
||||||
|
- ✅ `src/pages/Settings/Settings.tsx` - Integrated new component
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Implementation Complete! 🎉**
|
||||||
|
|
||||||
@ -1,2 +1,2 @@
|
|||||||
import{a as t}from"./index-BJNKp6s1.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-biwEPLZp.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-1fSSvDCY.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
|
import{a as t}from"./index-CPLcj4mB.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-biwEPLZp.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-1fSSvDCY.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
|
||||||
//# sourceMappingURL=conclusionApi-CLFPR2m0.js.map
|
//# sourceMappingURL=conclusionApi-BgpqHPwu.js.map
|
||||||
@ -1 +1 @@
|
|||||||
{"version":3,"file":"conclusionApi-CLFPR2m0.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
|
{"version":3,"file":"conclusionApi-BgpqHPwu.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
|
||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
build/assets/index-BP7LmC3Z.css
Normal file
1
build/assets/index-BP7LmC3Z.css
Normal file
File diff suppressed because one or more lines are too long
64
build/assets/index-CPLcj4mB.js
Normal file
64
build/assets/index-CPLcj4mB.js
Normal file
File diff suppressed because one or more lines are too long
1
build/assets/index-CPLcj4mB.js.map
Normal file
1
build/assets/index-CPLcj4mB.js.map
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,20 +1,20 @@
|
|||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8" />
|
<meta charset="UTF-8" />
|
||||||
<!-- CSP: Allows blob URLs for file previews and cross-origin API calls during development -->
|
<!-- CSP: Allows blob URLs for file previews and cross-origin API calls during development -->
|
||||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' blob:; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; script-src 'self'; img-src 'self' data: https: blob:; connect-src 'self' blob: data: http://localhost:5000 http://localhost:3000 ws://localhost:5000 ws://localhost:3000 wss://localhost:5000 wss://localhost:3000; frame-src 'self' blob:; font-src 'self' https://fonts.gstatic.com data:; object-src 'none'; base-uri 'self'; form-action 'self';" />
|
<meta http-equiv="Content-Security-Policy" content="default-src 'self' blob:; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; script-src 'self'; img-src 'self' data: https: blob:; connect-src 'self' blob: data: http://localhost:5000 http://localhost:3000 ws://localhost:5000 ws://localhost:3000 wss://localhost:5000 wss://localhost:3000; frame-src 'self' blob:; font-src 'self' https://fonts.gstatic.com data:; object-src 'none'; base-uri 'self'; form-action 'self';" />
|
||||||
<link rel="icon" type="image/svg+xml" href="/royal_enfield_logo.svg" />
|
<link rel="icon" type="image/svg+xml" href="/royal_enfield_logo.svg" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<meta name="description" content="Royal Enfield Approval & Request Management Portal - Streamlined approval workflows for enterprise operations" />
|
<meta name="description" content="Royal Enfield Approval & Request Management Portal - Streamlined approval workflows for enterprise operations" />
|
||||||
<meta name="theme-color" content="#2d4a3e" />
|
<meta name="theme-color" content="#2d4a3e" />
|
||||||
<title>Royal Enfield | Approval Portal</title>
|
<title>Royal Enfield | Approval Portal</title>
|
||||||
|
|
||||||
<!-- Preload critical fonts and icons -->
|
<!-- Preload critical fonts and icons -->
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||||
|
|
||||||
<!-- Ensure proper icon rendering and layout -->
|
<!-- Ensure proper icon rendering and layout -->
|
||||||
<style>
|
<style>
|
||||||
/* Ensure Lucide icons render properly */
|
/* Ensure Lucide icons render properly */
|
||||||
svg {
|
svg {
|
||||||
@ -51,8 +51,8 @@
|
|||||||
transform: scale(1.05);
|
transform: scale(1.05);
|
||||||
transition: transform 0.2s ease;
|
transition: transform 0.2s ease;
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
<script type="module" crossorigin src="/assets/index-BJNKp6s1.js"></script>
|
<script type="module" crossorigin src="/assets/index-CPLcj4mB.js"></script>
|
||||||
<link rel="modulepreload" crossorigin href="/assets/charts-vendor-Cji9-Yri.js">
|
<link rel="modulepreload" crossorigin href="/assets/charts-vendor-Cji9-Yri.js">
|
||||||
<link rel="modulepreload" crossorigin href="/assets/radix-vendor-C2EbRL2a.js">
|
<link rel="modulepreload" crossorigin href="/assets/radix-vendor-C2EbRL2a.js">
|
||||||
<link rel="modulepreload" crossorigin href="/assets/utils-vendor-DHm03ykU.js">
|
<link rel="modulepreload" crossorigin href="/assets/utils-vendor-DHm03ykU.js">
|
||||||
@ -60,10 +60,10 @@
|
|||||||
<link rel="modulepreload" crossorigin href="/assets/socket-vendor-TjCxX7sJ.js">
|
<link rel="modulepreload" crossorigin href="/assets/socket-vendor-TjCxX7sJ.js">
|
||||||
<link rel="modulepreload" crossorigin href="/assets/redux-vendor-tbZCm13o.js">
|
<link rel="modulepreload" crossorigin href="/assets/redux-vendor-tbZCm13o.js">
|
||||||
<link rel="modulepreload" crossorigin href="/assets/router-vendor-1fSSvDCY.js">
|
<link rel="modulepreload" crossorigin href="/assets/router-vendor-1fSSvDCY.js">
|
||||||
<link rel="stylesheet" crossorigin href="/assets/index-DD2tGQ-m.css">
|
<link rel="stylesheet" crossorigin href="/assets/index-BP7LmC3Z.css">
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="root"></div>
|
<div id="root"></div>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|
||||||
|
|||||||
228
docker-compose.full.yml
Normal file
228
docker-compose.full.yml
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# RE Workflow - Full Stack Docker Compose
|
||||||
|
# Includes: Application + Database + Monitoring Stack
|
||||||
|
# =============================================================================
|
||||||
|
# Usage:
|
||||||
|
# docker-compose -f docker-compose.full.yml up -d
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ===========================================================================
|
||||||
|
# APPLICATION SERVICES
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: re_workflow_db
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: ${DB_USER:-laxman}
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD:-Admin@123}
|
||||||
|
POSTGRES_DB: ${DB_NAME:-re_workflow_db}
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
- ./database/schema:/docker-entrypoint-initdb.d
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-laxman}"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: re_workflow_redis
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
backend:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
container_name: re_workflow_backend
|
||||||
|
environment:
|
||||||
|
NODE_ENV: development
|
||||||
|
DB_HOST: postgres
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_USER: ${DB_USER:-laxman}
|
||||||
|
DB_PASSWORD: ${DB_PASSWORD:-Admin@123}
|
||||||
|
DB_NAME: ${DB_NAME:-re_workflow_db}
|
||||||
|
REDIS_URL: redis://redis:6379
|
||||||
|
PORT: 5000
|
||||||
|
# Loki for logging
|
||||||
|
LOKI_HOST: http://loki:3100
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
volumes:
|
||||||
|
- ./logs:/app/logs
|
||||||
|
- ./uploads:/app/uploads
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:5000/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})\""]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# MONITORING SERVICES
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:v2.47.2
|
||||||
|
container_name: re_prometheus
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||||
|
- ./monitoring/prometheus/alert.rules.yml:/etc/prometheus/alert.rules.yml:ro
|
||||||
|
- prometheus_data:/prometheus
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||||
|
- '--storage.tsdb.path=/prometheus'
|
||||||
|
- '--storage.tsdb.retention.time=15d'
|
||||||
|
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||||
|
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||||
|
- '--web.enable-lifecycle'
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
loki:
|
||||||
|
image: grafana/loki:2.9.2
|
||||||
|
container_name: re_loki
|
||||||
|
ports:
|
||||||
|
- "3100:3100"
|
||||||
|
volumes:
|
||||||
|
- ./monitoring/loki/loki-config.yml:/etc/loki/local-config.yaml:ro
|
||||||
|
- loki_data:/loki
|
||||||
|
command: -config.file=/etc/loki/local-config.yaml
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
promtail:
|
||||||
|
image: grafana/promtail:2.9.2
|
||||||
|
container_name: re_promtail
|
||||||
|
volumes:
|
||||||
|
- ./monitoring/promtail/promtail-config.yml:/etc/promtail/config.yml:ro
|
||||||
|
- ./logs:/var/log/app:ro
|
||||||
|
- promtail_data:/tmp/promtail
|
||||||
|
command: -config.file=/etc/promtail/config.yml
|
||||||
|
depends_on:
|
||||||
|
- loki
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:10.2.2
|
||||||
|
container_name: re_grafana
|
||||||
|
ports:
|
||||||
|
- "3001:3000"
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_USER=admin
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=REWorkflow@2024
|
||||||
|
- GF_USERS_ALLOW_SIGN_UP=false
|
||||||
|
- GF_FEATURE_TOGGLES_ENABLE=publicDashboards
|
||||||
|
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
- ./monitoring/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||||
|
- ./monitoring/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||||
|
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||||
|
depends_on:
|
||||||
|
- prometheus
|
||||||
|
- loki
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
node-exporter:
|
||||||
|
image: prom/node-exporter:v1.6.1
|
||||||
|
container_name: re_node_exporter
|
||||||
|
ports:
|
||||||
|
- "9100:9100"
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: prom/alertmanager:v0.26.0
|
||||||
|
container_name: re_alertmanager
|
||||||
|
ports:
|
||||||
|
- "9093:9093"
|
||||||
|
volumes:
|
||||||
|
- ./monitoring/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||||||
|
- alertmanager_data:/alertmanager
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||||
|
- '--storage.path=/alertmanager'
|
||||||
|
networks:
|
||||||
|
- re_workflow_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# NETWORKS
|
||||||
|
# ===========================================================================
|
||||||
|
networks:
|
||||||
|
re_workflow_network:
|
||||||
|
driver: bridge
|
||||||
|
name: re_workflow_network
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# VOLUMES
|
||||||
|
# ===========================================================================
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
|
name: re_postgres_data
|
||||||
|
redis_data:
|
||||||
|
name: re_redis_data
|
||||||
|
prometheus_data:
|
||||||
|
name: re_prometheus_data
|
||||||
|
loki_data:
|
||||||
|
name: re_loki_data
|
||||||
|
promtail_data:
|
||||||
|
name: re_promtail_data
|
||||||
|
grafana_data:
|
||||||
|
name: re_grafana_data
|
||||||
|
alertmanager_data:
|
||||||
|
name: re_alertmanager_data
|
||||||
|
|
||||||
326
docs/GCP_STORAGE_SETUP.md
Normal file
326
docs/GCP_STORAGE_SETUP.md
Normal file
@ -0,0 +1,326 @@
|
|||||||
|
# GCP Cloud Storage Setup Guide for RE Workflow
|
||||||
|
|
||||||
|
## Project Information
|
||||||
|
|
||||||
|
| Item | Value |
|
||||||
|
|------|-------|
|
||||||
|
| **Application** | RE Workflow System |
|
||||||
|
| **Environment** | UAT |
|
||||||
|
| **Domain** | https://reflow-uat.royalenfield.com |
|
||||||
|
| **Purpose** | Store workflow documents and attachments |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Requirements Overview
|
||||||
|
|
||||||
|
The RE Workflow application needs Google Cloud Storage to store:
|
||||||
|
- Request documents (uploaded during workflow creation)
|
||||||
|
- Attachments (added during approval process)
|
||||||
|
- Supporting documents
|
||||||
|
|
||||||
|
### Folder Structure in Bucket
|
||||||
|
|
||||||
|
```
|
||||||
|
reflow-documents-uat/
|
||||||
|
├── requests/
|
||||||
|
│ ├── REQ-2025-12-0001/
|
||||||
|
│ │ ├── documents/
|
||||||
|
│ │ │ ├── proposal.pdf
|
||||||
|
│ │ │ └── specification.docx
|
||||||
|
│ │ └── attachments/
|
||||||
|
│ │ ├── approval_note.pdf
|
||||||
|
│ │ └── signature.png
|
||||||
|
│ │
|
||||||
|
│ ├── REQ-2025-12-0002/
|
||||||
|
│ │ ├── documents/
|
||||||
|
│ │ │ └── budget_report.xlsx
|
||||||
|
│ │ └── attachments/
|
||||||
|
│ │ └── manager_approval.pdf
|
||||||
|
│ │
|
||||||
|
│ └── REQ-2025-12-0003/
|
||||||
|
│ ├── documents/
|
||||||
|
│ └── attachments/
|
||||||
|
│
|
||||||
|
└── temp/
|
||||||
|
└── (temporary uploads before processing)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. GCP Bucket Configuration
|
||||||
|
|
||||||
|
### 2.1 Create Bucket
|
||||||
|
|
||||||
|
| Setting | Value |
|
||||||
|
|---------|-------|
|
||||||
|
| **Bucket Name** | `reflow-documents-uat` (UAT) / `reflow-documents-prod` (Production) |
|
||||||
|
| **Location Type** | Region |
|
||||||
|
| **Region** | `asia-south1` (Mumbai) |
|
||||||
|
| **Storage Class** | Standard |
|
||||||
|
| **Access Control** | Uniform |
|
||||||
|
| **Public Access Prevention** | Enforced (Block all public access) |
|
||||||
|
|
||||||
|
### 2.2 Console Commands (gcloud CLI)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create bucket
|
||||||
|
gcloud storage buckets create gs://reflow-documents-uat \
|
||||||
|
--project=YOUR_PROJECT_ID \
|
||||||
|
--location=asia-south1 \
|
||||||
|
--uniform-bucket-level-access
|
||||||
|
|
||||||
|
# Block public access
|
||||||
|
gcloud storage buckets update gs://reflow-documents-uat \
|
||||||
|
--public-access-prevention
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Service Account Setup
|
||||||
|
|
||||||
|
### 3.1 Create Service Account
|
||||||
|
|
||||||
|
| Setting | Value |
|
||||||
|
|---------|-------|
|
||||||
|
| **Name** | `reflow-storage-sa` |
|
||||||
|
| **Description** | Service account for RE Workflow file storage |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create service account
|
||||||
|
gcloud iam service-accounts create reflow-storage-sa \
|
||||||
|
--display-name="RE Workflow Storage Service Account" \
|
||||||
|
--project=YOUR_PROJECT_ID
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.2 Assign Permissions
|
||||||
|
|
||||||
|
The service account needs these roles:
|
||||||
|
|
||||||
|
| Role | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `roles/storage.objectCreator` | Upload files |
|
||||||
|
| `roles/storage.objectViewer` | Download/preview files |
|
||||||
|
| `roles/storage.objectAdmin` | Delete files |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Grant permissions
|
||||||
|
gcloud projects add-iam-policy-binding YOUR_PROJECT_ID \
|
||||||
|
--member="serviceAccount:reflow-storage-sa@YOUR_PROJECT_ID.iam.gserviceaccount.com" \
|
||||||
|
--role="roles/storage.objectAdmin"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.3 Generate JSON Key
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate key file
|
||||||
|
gcloud iam service-accounts keys create gcp-key.json \
|
||||||
|
--iam-account=reflow-storage-sa@YOUR_PROJECT_ID.iam.gserviceaccount.com
|
||||||
|
```
|
||||||
|
|
||||||
|
⚠️ **Security:** Share this key file securely (not via email). Use a secure file transfer method.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. CORS Configuration
|
||||||
|
|
||||||
|
Apply this CORS policy to allow browser uploads:
|
||||||
|
|
||||||
|
### 4.1 Create `cors-config.json`
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"origin": [
|
||||||
|
"https://reflow-uat.royalenfield.com",
|
||||||
|
"https://reflow.royalenfield.com"
|
||||||
|
],
|
||||||
|
"method": ["GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"],
|
||||||
|
"responseHeader": [
|
||||||
|
"Content-Type",
|
||||||
|
"Content-Disposition",
|
||||||
|
"Content-Length",
|
||||||
|
"Cache-Control",
|
||||||
|
"x-goog-meta-*"
|
||||||
|
],
|
||||||
|
"maxAgeSeconds": 3600
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4.2 Apply CORS Policy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gcloud storage buckets update gs://reflow-documents-uat \
|
||||||
|
--cors-file=cors-config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Lifecycle Rules (Optional but Recommended)
|
||||||
|
|
||||||
|
### 5.1 Auto-delete Temporary Files
|
||||||
|
|
||||||
|
Delete files in `temp/` folder after 24 hours:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"lifecycle": {
|
||||||
|
"rule": [
|
||||||
|
{
|
||||||
|
"action": { "type": "Delete" },
|
||||||
|
"condition": {
|
||||||
|
"age": 1,
|
||||||
|
"matchesPrefix": ["temp/"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gcloud storage buckets update gs://reflow-documents-uat \
|
||||||
|
--lifecycle-file=lifecycle-config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Bucket Versioning (Recommended)
|
||||||
|
|
||||||
|
Enable versioning for accidental delete recovery:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gcloud storage buckets update gs://reflow-documents-uat \
|
||||||
|
--versioning
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Deliverables to Application Team
|
||||||
|
|
||||||
|
Please provide the following to the development team:
|
||||||
|
|
||||||
|
### 7.1 Environment Variables
|
||||||
|
|
||||||
|
| Variable | Value |
|
||||||
|
|----------|-------|
|
||||||
|
| `GCP_PROJECT_ID` | `your-gcp-project-id` |
|
||||||
|
| `GCP_BUCKET_NAME` | `reflow-documents-uat` |
|
||||||
|
| `GCP_KEY_FILE` | `./config/gcp-key.json` |
|
||||||
|
|
||||||
|
### 7.2 Files to Share
|
||||||
|
|
||||||
|
| File | Description | How to Share |
|
||||||
|
|------|-------------|--------------|
|
||||||
|
| `gcp-key.json` | Service account key | Secure transfer (not email) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Verification Steps
|
||||||
|
|
||||||
|
After setup, verify with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List bucket contents
|
||||||
|
gcloud storage ls gs://reflow-documents-uat/
|
||||||
|
|
||||||
|
# Test upload
|
||||||
|
echo "test" > test.txt
|
||||||
|
gcloud storage cp test.txt gs://reflow-documents-uat/temp/
|
||||||
|
|
||||||
|
# Test download
|
||||||
|
gcloud storage cp gs://reflow-documents-uat/temp/test.txt ./downloaded.txt
|
||||||
|
|
||||||
|
# Test delete
|
||||||
|
gcloud storage rm gs://reflow-documents-uat/temp/test.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. Environment-Specific Buckets
|
||||||
|
|
||||||
|
| Environment | Bucket Name | Region |
|
||||||
|
|-------------|-------------|--------|
|
||||||
|
| Development | `reflow-documents-dev` | asia-south1 |
|
||||||
|
| UAT | `reflow-documents-uat` | asia-south1 |
|
||||||
|
| Production | `reflow-documents-prod` | asia-south1 |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10. Monitoring & Alerts (Optional)
|
||||||
|
|
||||||
|
### 10.1 Enable Logging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gcloud storage buckets update gs://reflow-documents-uat \
|
||||||
|
--log-bucket=gs://your-logging-bucket \
|
||||||
|
--log-object-prefix=reflow-storage-logs/
|
||||||
|
```
|
||||||
|
|
||||||
|
### 10.2 Storage Alerts
|
||||||
|
|
||||||
|
Set up alerts for:
|
||||||
|
- Storage exceeds 80% of quota
|
||||||
|
- Unusual download patterns
|
||||||
|
- Failed access attempts
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11. Cost Estimation
|
||||||
|
|
||||||
|
| Item | Estimate (Monthly) |
|
||||||
|
|------|-------------------|
|
||||||
|
| Storage (100GB) | ~$2.00 |
|
||||||
|
| Operations (10K) | ~$0.05 |
|
||||||
|
| Network Egress | Varies by usage |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 12. Security Checklist
|
||||||
|
|
||||||
|
- [ ] Public access prevention enabled
|
||||||
|
- [ ] Service account has minimal required permissions
|
||||||
|
- [ ] JSON key stored securely (not in Git)
|
||||||
|
- [ ] CORS configured for specific domains only
|
||||||
|
- [ ] Bucket versioning enabled
|
||||||
|
- [ ] Lifecycle rules for temp files
|
||||||
|
- [ ] Access logging enabled
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 13. Contact
|
||||||
|
|
||||||
|
| Role | Contact |
|
||||||
|
|------|---------|
|
||||||
|
| Application Team | [Your Email] |
|
||||||
|
| DevOps Team | [DevOps Email] |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix: Quick Reference
|
||||||
|
|
||||||
|
### GCP Console URLs
|
||||||
|
|
||||||
|
- **Buckets:** https://console.cloud.google.com/storage/browser
|
||||||
|
- **Service Accounts:** https://console.cloud.google.com/iam-admin/serviceaccounts
|
||||||
|
- **IAM:** https://console.cloud.google.com/iam-admin/iam
|
||||||
|
|
||||||
|
### gcloud Commands Summary
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create bucket
|
||||||
|
gcloud storage buckets create gs://BUCKET_NAME --location=asia-south1
|
||||||
|
|
||||||
|
# Create service account
|
||||||
|
gcloud iam service-accounts create SA_NAME
|
||||||
|
|
||||||
|
# Generate key
|
||||||
|
gcloud iam service-accounts keys create key.json --iam-account=SA@PROJECT.iam.gserviceaccount.com
|
||||||
|
|
||||||
|
# Set CORS
|
||||||
|
gcloud storage buckets update gs://BUCKET_NAME --cors-file=cors.json
|
||||||
|
|
||||||
|
# Enable versioning
|
||||||
|
gcloud storage buckets update gs://BUCKET_NAME --versioning
|
||||||
|
```
|
||||||
|
|
||||||
726
docs/LOKI_DEPLOYMENT_GUIDE.md
Normal file
726
docs/LOKI_DEPLOYMENT_GUIDE.md
Normal file
@ -0,0 +1,726 @@
|
|||||||
|
# Loki + Grafana Deployment Guide for RE Workflow
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This guide covers deploying **Loki with Grafana** for log aggregation in the RE Workflow application.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────┐ ┌─────────────────────────┐
|
||||||
|
│ RE Workflow Backend │──────────▶│ Loki │
|
||||||
|
│ (Node.js + Winston) │ HTTP │ (Log Storage) │
|
||||||
|
└─────────────────────────┘ :3100 └───────────┬─────────────┘
|
||||||
|
│
|
||||||
|
┌───────────▼─────────────┐
|
||||||
|
│ Grafana │
|
||||||
|
│ monitoring.cloudtopiaa │
|
||||||
|
│ (Your existing!) │
|
||||||
|
└─────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why Loki + Grafana?**
|
||||||
|
- ✅ Lightweight - designed for logs (unlike ELK)
|
||||||
|
- ✅ Uses your existing Grafana instance
|
||||||
|
- ✅ Same query language as Prometheus (LogQL)
|
||||||
|
- ✅ Cost-effective - indexes labels, not content
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Part 1: Windows Development Setup
|
||||||
|
|
||||||
|
## Prerequisites (Windows)
|
||||||
|
|
||||||
|
- Docker Desktop for Windows installed
|
||||||
|
- WSL2 enabled (recommended)
|
||||||
|
- 4GB+ RAM available for Docker
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 1: Install Docker Desktop
|
||||||
|
|
||||||
|
1. Download from: https://www.docker.com/products/docker-desktop/
|
||||||
|
2. Run installer
|
||||||
|
3. Enable WSL2 integration when prompted
|
||||||
|
4. Restart computer
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 2: Create Project Directory
|
||||||
|
|
||||||
|
Open PowerShell as Administrator:
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Create directory
|
||||||
|
mkdir C:\loki
|
||||||
|
cd C:\loki
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 3: Create Loki Configuration (Windows)
|
||||||
|
|
||||||
|
Create file `C:\loki\loki-config.yaml`:
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Using PowerShell
|
||||||
|
notepad C:\loki\loki-config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Paste this configuration:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
auth_enabled: false
|
||||||
|
|
||||||
|
server:
|
||||||
|
http_listen_port: 3100
|
||||||
|
grpc_listen_port: 9096
|
||||||
|
|
||||||
|
common:
|
||||||
|
instance_addr: 127.0.0.1
|
||||||
|
path_prefix: /loki
|
||||||
|
storage:
|
||||||
|
filesystem:
|
||||||
|
chunks_directory: /loki/chunks
|
||||||
|
rules_directory: /loki/rules
|
||||||
|
replication_factor: 1
|
||||||
|
ring:
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
|
||||||
|
query_range:
|
||||||
|
results_cache:
|
||||||
|
cache:
|
||||||
|
embedded_cache:
|
||||||
|
enabled: true
|
||||||
|
max_size_mb: 100
|
||||||
|
|
||||||
|
schema_config:
|
||||||
|
configs:
|
||||||
|
- from: 2020-10-24
|
||||||
|
store: tsdb
|
||||||
|
object_store: filesystem
|
||||||
|
schema: v13
|
||||||
|
index:
|
||||||
|
prefix: index_
|
||||||
|
period: 24h
|
||||||
|
|
||||||
|
limits_config:
|
||||||
|
retention_period: 7d
|
||||||
|
ingestion_rate_mb: 10
|
||||||
|
ingestion_burst_size_mb: 20
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 4: Create Docker Compose (Windows)
|
||||||
|
|
||||||
|
Create file `C:\loki\docker-compose.yml`:
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
notepad C:\loki\docker-compose.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Paste this configuration:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
loki:
|
||||||
|
image: grafana/loki:2.9.2
|
||||||
|
container_name: loki
|
||||||
|
ports:
|
||||||
|
- "3100:3100"
|
||||||
|
volumes:
|
||||||
|
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||||
|
- loki-data:/loki
|
||||||
|
command: -config.file=/etc/loki/local-config.yaml
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
container_name: grafana
|
||||||
|
ports:
|
||||||
|
- "3001:3000" # Using 3001 since 3000 is used by React frontend
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_USER=admin
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=admin123
|
||||||
|
volumes:
|
||||||
|
- grafana-data:/var/lib/grafana
|
||||||
|
depends_on:
|
||||||
|
- loki
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
loki-data:
|
||||||
|
grafana-data:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 5: Start Services (Windows)
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
cd C:\loki
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**Wait 30 seconds for services to initialize.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 6: Verify Services (Windows)
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Check containers are running
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Test Loki health
|
||||||
|
Invoke-WebRequest -Uri http://localhost:3100/ready
|
||||||
|
|
||||||
|
# Or using curl (if installed)
|
||||||
|
curl http://localhost:3100/ready
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 7: Configure Grafana (Windows Dev)
|
||||||
|
|
||||||
|
1. Open browser: `http://localhost:3001` *(port 3001 to avoid conflict with React on 3000)*
|
||||||
|
2. Login: `admin` / `admin123`
|
||||||
|
3. Go to: **Connections → Data Sources → Add data source**
|
||||||
|
4. Select: **Loki**
|
||||||
|
5. Configure:
|
||||||
|
- URL: `http://loki:3100`
|
||||||
|
6. Click: **Save & Test**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 8: Configure Backend .env (Windows Dev)
|
||||||
|
|
||||||
|
```env
|
||||||
|
# Development - Local Loki
|
||||||
|
LOKI_HOST=http://localhost:3100
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Windows Commands Reference
|
||||||
|
|
||||||
|
| Command | Purpose |
|
||||||
|
|---------|---------|
|
||||||
|
| `docker-compose up -d` | Start Loki + Grafana |
|
||||||
|
| `docker-compose down` | Stop services |
|
||||||
|
| `docker-compose logs -f loki` | View Loki logs |
|
||||||
|
| `docker-compose restart` | Restart services |
|
||||||
|
| `docker ps` | Check running containers |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Part 2: Linux Production Setup (DevOps)
|
||||||
|
|
||||||
|
## Prerequisites (Linux)
|
||||||
|
|
||||||
|
- Ubuntu 20.04+ / CentOS 7+ / RHEL 8+
|
||||||
|
- Docker & Docker Compose installed
|
||||||
|
- 2GB+ RAM (4GB recommended)
|
||||||
|
- 10GB+ disk space
|
||||||
|
- Grafana running at `http://monitoring.cloudtopiaa.com/`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 1: Install Docker (if not installed)
|
||||||
|
|
||||||
|
**Ubuntu/Debian:**
|
||||||
|
```bash
|
||||||
|
# Update packages
|
||||||
|
sudo apt update
|
||||||
|
|
||||||
|
# Install Docker
|
||||||
|
sudo apt install -y docker.io docker-compose
|
||||||
|
|
||||||
|
# Start Docker
|
||||||
|
sudo systemctl start docker
|
||||||
|
sudo systemctl enable docker
|
||||||
|
|
||||||
|
# Add user to docker group
|
||||||
|
sudo usermod -aG docker $USER
|
||||||
|
```
|
||||||
|
|
||||||
|
**CentOS/RHEL:**
|
||||||
|
```bash
|
||||||
|
# Install Docker
|
||||||
|
sudo yum install -y docker docker-compose
|
||||||
|
|
||||||
|
# Start Docker
|
||||||
|
sudo systemctl start docker
|
||||||
|
sudo systemctl enable docker
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 2: Create Loki Directory
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo mkdir -p /opt/loki
|
||||||
|
cd /opt/loki
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 3: Create Loki Configuration (Linux)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /opt/loki/loki-config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Paste this configuration:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
auth_enabled: false
|
||||||
|
|
||||||
|
server:
|
||||||
|
http_listen_port: 3100
|
||||||
|
grpc_listen_port: 9096
|
||||||
|
|
||||||
|
common:
|
||||||
|
instance_addr: 127.0.0.1
|
||||||
|
path_prefix: /tmp/loki
|
||||||
|
storage:
|
||||||
|
filesystem:
|
||||||
|
chunks_directory: /tmp/loki/chunks
|
||||||
|
rules_directory: /tmp/loki/rules
|
||||||
|
replication_factor: 1
|
||||||
|
ring:
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
|
||||||
|
query_range:
|
||||||
|
results_cache:
|
||||||
|
cache:
|
||||||
|
embedded_cache:
|
||||||
|
enabled: true
|
||||||
|
max_size_mb: 100
|
||||||
|
|
||||||
|
schema_config:
|
||||||
|
configs:
|
||||||
|
- from: 2020-10-24
|
||||||
|
store: tsdb
|
||||||
|
object_store: filesystem
|
||||||
|
schema: v13
|
||||||
|
index:
|
||||||
|
prefix: index_
|
||||||
|
period: 24h
|
||||||
|
|
||||||
|
ruler:
|
||||||
|
alertmanager_url: http://localhost:9093
|
||||||
|
|
||||||
|
limits_config:
|
||||||
|
retention_period: 30d
|
||||||
|
ingestion_rate_mb: 10
|
||||||
|
ingestion_burst_size_mb: 20
|
||||||
|
|
||||||
|
# Storage retention
|
||||||
|
compactor:
|
||||||
|
working_directory: /tmp/loki/compactor
|
||||||
|
retention_enabled: true
|
||||||
|
retention_delete_delay: 2h
|
||||||
|
delete_request_store: filesystem
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 4: Create Docker Compose (Linux Production)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /opt/loki/docker-compose.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Paste this configuration (Loki only - uses existing Grafana):**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
loki:
|
||||||
|
image: grafana/loki:2.9.2
|
||||||
|
container_name: loki
|
||||||
|
ports:
|
||||||
|
- "3100:3100"
|
||||||
|
volumes:
|
||||||
|
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||||
|
- loki-data:/tmp/loki
|
||||||
|
command: -config.file=/etc/loki/local-config.yaml
|
||||||
|
networks:
|
||||||
|
- monitoring
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
networks:
|
||||||
|
monitoring:
|
||||||
|
driver: bridge
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
loki-data:
|
||||||
|
driver: local
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 5: Start Loki (Linux)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /opt/loki
|
||||||
|
sudo docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**Wait 30 seconds for Loki to initialize.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 6: Verify Loki (Linux)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check container
|
||||||
|
sudo docker ps | grep loki
|
||||||
|
|
||||||
|
# Test Loki health
|
||||||
|
curl http://localhost:3100/ready
|
||||||
|
|
||||||
|
# Test Loki is accepting logs
|
||||||
|
curl http://localhost:3100/loki/api/v1/labels
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected response:**
|
||||||
|
```json
|
||||||
|
{"status":"success","data":[]}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 7: Open Firewall Port (Linux)
|
||||||
|
|
||||||
|
**Ubuntu/Debian:**
|
||||||
|
```bash
|
||||||
|
sudo ufw allow 3100/tcp
|
||||||
|
sudo ufw reload
|
||||||
|
```
|
||||||
|
|
||||||
|
**CentOS/RHEL:**
|
||||||
|
```bash
|
||||||
|
sudo firewall-cmd --permanent --add-port=3100/tcp
|
||||||
|
sudo firewall-cmd --reload
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 8: Add Loki to Existing Grafana
|
||||||
|
|
||||||
|
1. **Open Grafana:** `http://monitoring.cloudtopiaa.com/`
|
||||||
|
2. **Login** with admin credentials
|
||||||
|
3. **Go to:** Connections → Data Sources → Add data source
|
||||||
|
4. **Select:** Loki
|
||||||
|
5. **Configure:**
|
||||||
|
|
||||||
|
| Field | Value |
|
||||||
|
|-------|-------|
|
||||||
|
| Name | `RE-Workflow-Logs` |
|
||||||
|
| URL | `http://<loki-server-ip>:3100` |
|
||||||
|
| Timeout | `60` |
|
||||||
|
|
||||||
|
6. **Click:** Save & Test
|
||||||
|
7. **Should see:** ✅ "Data source successfully connected"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 9: Configure Backend .env (Production)
|
||||||
|
|
||||||
|
```env
|
||||||
|
# Production - Remote Loki
|
||||||
|
LOKI_HOST=http://<loki-server-ip>:3100
|
||||||
|
# LOKI_USER= # Optional: if basic auth enabled
|
||||||
|
# LOKI_PASSWORD= # Optional: if basic auth enabled
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Linux Commands Reference
|
||||||
|
|
||||||
|
| Command | Purpose |
|
||||||
|
|---------|---------|
|
||||||
|
| `sudo docker-compose up -d` | Start Loki |
|
||||||
|
| `sudo docker-compose down` | Stop Loki |
|
||||||
|
| `sudo docker-compose logs -f` | View logs |
|
||||||
|
| `sudo docker-compose restart` | Restart |
|
||||||
|
| `sudo docker ps` | Check containers |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 10: Enable Basic Auth (Optional - Production)
|
||||||
|
|
||||||
|
For added security, enable basic auth:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install apache2-utils for htpasswd
|
||||||
|
sudo apt install apache2-utils
|
||||||
|
|
||||||
|
# Create password file
|
||||||
|
sudo htpasswd -c /opt/loki/.htpasswd lokiuser
|
||||||
|
|
||||||
|
# Update docker-compose.yml to use nginx reverse proxy with auth
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Part 3: Grafana Dashboard Setup
|
||||||
|
|
||||||
|
## Create Dashboard
|
||||||
|
|
||||||
|
1. Go to: `http://monitoring.cloudtopiaa.com/dashboards` (or `http://localhost:3001` for dev)
|
||||||
|
2. Click: **New → New Dashboard**
|
||||||
|
3. Add panels as described below
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 1: Error Count (Stat)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
count_over_time({app="re-workflow"} |= "error" [24h])
|
||||||
|
```
|
||||||
|
- Visualization: **Stat**
|
||||||
|
- Title: "Errors (24h)"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 2: Error Timeline (Time Series)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
sum by (level) (count_over_time({app="re-workflow"} | json | level=~"error|warn" [5m]))
|
||||||
|
```
|
||||||
|
- Visualization: **Time Series**
|
||||||
|
- Title: "Errors Over Time"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 3: Recent Errors (Logs)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
{app="re-workflow"} | json | level="error"
|
||||||
|
```
|
||||||
|
- Visualization: **Logs**
|
||||||
|
- Title: "Recent Errors"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 4: TAT Breaches (Stat)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
count_over_time({app="re-workflow"} | json | tatEvent="breached" [24h])
|
||||||
|
```
|
||||||
|
- Visualization: **Stat**
|
||||||
|
- Title: "TAT Breaches"
|
||||||
|
- Color: Red
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 5: Workflow Events (Pie)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
sum by (workflowEvent) (count_over_time({app="re-workflow"} | json | workflowEvent!="" [24h]))
|
||||||
|
```
|
||||||
|
- Visualization: **Pie Chart**
|
||||||
|
- Title: "Workflow Events"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Panel 6: Auth Failures (Table)
|
||||||
|
|
||||||
|
**Query (LogQL):**
|
||||||
|
```
|
||||||
|
{app="re-workflow"} | json | authEvent="auth_failure"
|
||||||
|
```
|
||||||
|
- Visualization: **Table**
|
||||||
|
- Title: "Authentication Failures"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Useful LogQL Queries
|
||||||
|
|
||||||
|
| Purpose | Query |
|
||||||
|
|---------|-------|
|
||||||
|
| All errors | `{app="re-workflow"} \| json \| level="error"` |
|
||||||
|
| Specific request | `{app="re-workflow"} \| json \| requestId="REQ-2024-001"` |
|
||||||
|
| User activity | `{app="re-workflow"} \| json \| userId="user-123"` |
|
||||||
|
| TAT breaches | `{app="re-workflow"} \| json \| tatEvent="breached"` |
|
||||||
|
| Auth failures | `{app="re-workflow"} \| json \| authEvent="auth_failure"` |
|
||||||
|
| Workflow created | `{app="re-workflow"} \| json \| workflowEvent="created"` |
|
||||||
|
| API errors (5xx) | `{app="re-workflow"} \| json \| statusCode>=500` |
|
||||||
|
| Slow requests | `{app="re-workflow"} \| json \| duration>3000` |
|
||||||
|
| Error rate | `sum(rate({app="re-workflow"} \| json \| level="error"[5m]))` |
|
||||||
|
| By department | `{app="re-workflow"} \| json \| department="Engineering"` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Part 4: Alerting Setup
|
||||||
|
|
||||||
|
## Alert 1: High Error Rate
|
||||||
|
|
||||||
|
1. Go to: **Alerting → Alert Rules → New Alert Rule**
|
||||||
|
2. Configure:
|
||||||
|
- Name: `RE Workflow - High Error Rate`
|
||||||
|
- Data source: `RE-Workflow-Logs`
|
||||||
|
- Query: `count_over_time({app="re-workflow"} | json | level="error" [5m])`
|
||||||
|
- Condition: IS ABOVE 10
|
||||||
|
3. Add notification (Slack, Email)
|
||||||
|
|
||||||
|
## Alert 2: TAT Breach
|
||||||
|
|
||||||
|
1. Create new alert rule
|
||||||
|
2. Configure:
|
||||||
|
- Name: `RE Workflow - TAT Breach`
|
||||||
|
- Query: `count_over_time({app="re-workflow"} | json | tatEvent="breached" [15m])`
|
||||||
|
- Condition: IS ABOVE 0
|
||||||
|
3. Add notification
|
||||||
|
|
||||||
|
## Alert 3: Auth Attack Detection
|
||||||
|
|
||||||
|
1. Create new alert rule
|
||||||
|
2. Configure:
|
||||||
|
- Name: `RE Workflow - Auth Attack`
|
||||||
|
- Query: `count_over_time({app="re-workflow"} | json | authEvent="auth_failure" [5m])`
|
||||||
|
- Condition: IS ABOVE 20
|
||||||
|
3. Add notification to Security team
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Part 5: Troubleshooting
|
||||||
|
|
||||||
|
## Windows Issues
|
||||||
|
|
||||||
|
### Docker Desktop not starting
|
||||||
|
```powershell
|
||||||
|
# Restart Docker Desktop service
|
||||||
|
Restart-Service docker
|
||||||
|
|
||||||
|
# Or restart Docker Desktop from system tray
|
||||||
|
```
|
||||||
|
|
||||||
|
### Port 3100 already in use
|
||||||
|
```powershell
|
||||||
|
# Find process using port
|
||||||
|
netstat -ano | findstr :3100
|
||||||
|
|
||||||
|
# Kill process
|
||||||
|
taskkill /PID <pid> /F
|
||||||
|
```
|
||||||
|
|
||||||
|
### WSL2 issues
|
||||||
|
```powershell
|
||||||
|
# Update WSL
|
||||||
|
wsl --update
|
||||||
|
|
||||||
|
# Restart WSL
|
||||||
|
wsl --shutdown
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Linux Issues
|
||||||
|
|
||||||
|
### Loki won't start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check logs
|
||||||
|
sudo docker logs loki
|
||||||
|
|
||||||
|
# Common fix - permissions
|
||||||
|
sudo chown -R 10001:10001 /opt/loki
|
||||||
|
```
|
||||||
|
|
||||||
|
### Grafana can't connect to Loki
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Verify Loki is healthy
|
||||||
|
curl http://localhost:3100/ready
|
||||||
|
|
||||||
|
# Check network from Grafana server
|
||||||
|
curl http://loki-server:3100/ready
|
||||||
|
|
||||||
|
# Restart Loki
|
||||||
|
sudo docker-compose restart
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logs not appearing in Grafana
|
||||||
|
|
||||||
|
1. Check application env has correct `LOKI_HOST`
|
||||||
|
2. Verify network connectivity: `curl http://loki:3100/ready`
|
||||||
|
3. Check labels: `curl http://localhost:3100/loki/api/v1/labels`
|
||||||
|
4. Wait for application to send first logs
|
||||||
|
|
||||||
|
### High memory usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Reduce retention period in loki-config.yaml
|
||||||
|
limits_config:
|
||||||
|
retention_period: 7d # Reduce from 30d
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Quick Reference
|
||||||
|
|
||||||
|
## Environment Comparison
|
||||||
|
|
||||||
|
| Setting | Windows Dev | Linux Production |
|
||||||
|
|---------|-------------|------------------|
|
||||||
|
| LOKI_HOST | `http://localhost:3100` | `http://<server-ip>:3100` |
|
||||||
|
| Grafana URL | `http://localhost:3001` | `http://monitoring.cloudtopiaa.com` |
|
||||||
|
| Config Path | `C:\loki\` | `/opt/loki/` |
|
||||||
|
| Retention | 7 days | 30 days |
|
||||||
|
|
||||||
|
## Port Reference
|
||||||
|
|
||||||
|
| Service | Port | URL |
|
||||||
|
|---------|------|-----|
|
||||||
|
| Loki | 3100 | `http://server:3100` |
|
||||||
|
| Grafana (Dev) | 3001 | `http://localhost:3001` |
|
||||||
|
| Grafana (Prod) | 80/443 | `http://monitoring.cloudtopiaa.com/` |
|
||||||
|
| React Frontend | 3000 | `http://localhost:3000` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Verification Checklist
|
||||||
|
|
||||||
|
## Windows Development
|
||||||
|
- [ ] Docker Desktop running
|
||||||
|
- [ ] `docker ps` shows loki and grafana containers
|
||||||
|
- [ ] `http://localhost:3100/ready` returns "ready"
|
||||||
|
- [ ] `http://localhost:3001` shows Grafana login
|
||||||
|
- [ ] Loki data source connected in Grafana
|
||||||
|
- [ ] Backend `.env` has `LOKI_HOST=http://localhost:3100`
|
||||||
|
|
||||||
|
## Linux Production
|
||||||
|
- [ ] Loki container running (`docker ps`)
|
||||||
|
- [ ] `curl localhost:3100/ready` returns "ready"
|
||||||
|
- [ ] Firewall port 3100 open
|
||||||
|
- [ ] Grafana connected to Loki
|
||||||
|
- [ ] Backend `.env` has correct `LOKI_HOST`
|
||||||
|
- [ ] Logs appearing in Grafana Explore
|
||||||
|
- [ ] Dashboard created
|
||||||
|
- [ ] Alerts configured
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Contact
|
||||||
|
|
||||||
|
For issues with this setup:
|
||||||
|
- Backend logs: Check Grafana dashboard
|
||||||
|
- Infrastructure: Contact DevOps team
|
||||||
@ -52,6 +52,12 @@ GEMINI_MODEL=gemini-2.0-flash-lite
|
|||||||
# Logging
|
# Logging
|
||||||
LOG_LEVEL=info
|
LOG_LEVEL=info
|
||||||
LOG_FILE_PATH=./logs
|
LOG_FILE_PATH=./logs
|
||||||
|
APP_VERSION=1.2.0
|
||||||
|
|
||||||
|
# ============ Loki Configuration (Grafana Log Aggregation) ============
|
||||||
|
LOKI_HOST= # e.g., http://loki:3100 or http://monitoring.cloudtopiaa.com:3100
|
||||||
|
LOKI_USER= # Optional: Basic auth username
|
||||||
|
LOKI_PASSWORD= # Optional: Basic auth password
|
||||||
|
|
||||||
# CORS
|
# CORS
|
||||||
CORS_ORIGIN="*"
|
CORS_ORIGIN="*"
|
||||||
|
|||||||
248
monitoring/README.md
Normal file
248
monitoring/README.md
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
# RE Workflow Monitoring Stack
|
||||||
|
|
||||||
|
Complete monitoring solution with **Grafana**, **Prometheus**, **Loki**, and **Promtail** for the RE Workflow Management System.
|
||||||
|
|
||||||
|
## 🏗️ Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ RE Workflow System │
|
||||||
|
├────────────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
|
||||||
|
│ │ Node.js API │────│ PostgreSQL │────│ Redis │ │
|
||||||
|
│ │ (Port 5000) │ │ (Port 5432) │ │ (Port 6379) │ │
|
||||||
|
│ └────────┬────────┘ └─────────────────┘ └─────────────────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ │ /metrics endpoint │
|
||||||
|
│ │ Log files (./logs/) │
|
||||||
|
│ ▼ │
|
||||||
|
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ Monitoring Stack │ │
|
||||||
|
│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │ │
|
||||||
|
│ │ │ Prometheus │──│ Loki │──│ Promtail │ │ │
|
||||||
|
│ │ │ (Port 9090)│ │ (Port 3100) │ │ (Collects log files) │ │ │
|
||||||
|
│ │ └──────┬──────┘ └──────┬──────┘ └─────────────────────────┘ │ │
|
||||||
|
│ │ │ │ │ │
|
||||||
|
│ │ └────────┬───────┘ │ │
|
||||||
|
│ │ ▼ │ │
|
||||||
|
│ │ ┌─────────────────┐ │ │
|
||||||
|
│ │ │ Grafana │ │ │
|
||||||
|
│ │ │ (Port 3001) │◄── Pre-configured Dashboards │ │
|
||||||
|
│ │ └─────────────────┘ │ │
|
||||||
|
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||||
|
└────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🚀 Quick Start
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- **Docker Desktop** installed and running
|
||||||
|
- **WSL2** enabled (recommended for Windows)
|
||||||
|
- Backend API running on port 5000
|
||||||
|
|
||||||
|
### Step 1: Start Monitoring Stack
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Navigate to monitoring folder
|
||||||
|
cd C:\Laxman\Royal_Enfield\Re_Backend\monitoring
|
||||||
|
|
||||||
|
# Start all monitoring services
|
||||||
|
docker-compose -f docker-compose.monitoring.yml up -d
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
docker ps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Configure Backend Environment
|
||||||
|
|
||||||
|
Add these to your backend `.env` file:
|
||||||
|
|
||||||
|
```env
|
||||||
|
# Loki configuration (for direct log shipping from Winston)
|
||||||
|
LOKI_HOST=http://localhost:3100
|
||||||
|
|
||||||
|
# Optional: Basic auth if enabled
|
||||||
|
# LOKI_USER=your_username
|
||||||
|
# LOKI_PASSWORD=your_password
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Access Dashboards
|
||||||
|
|
||||||
|
| Service | URL | Credentials |
|
||||||
|
|---------|-----|-------------|
|
||||||
|
| **Grafana** | http://localhost:3001 | admin / REWorkflow@2024 |
|
||||||
|
| **Prometheus** | http://localhost:9090 | - |
|
||||||
|
| **Loki** | http://localhost:3100 | - |
|
||||||
|
| **Alertmanager** | http://localhost:9093 | - |
|
||||||
|
|
||||||
|
## 📊 Available Dashboards
|
||||||
|
|
||||||
|
### 1. RE Workflow Overview
|
||||||
|
Pre-configured dashboard with:
|
||||||
|
- **API Metrics**: Request rate, error rate, latency percentiles
|
||||||
|
- **Logs Overview**: Error count, warnings, TAT breaches
|
||||||
|
- **Node.js Runtime**: Memory usage, event loop lag, CPU
|
||||||
|
|
||||||
|
### 2. Custom LogQL Queries
|
||||||
|
|
||||||
|
| Purpose | Query |
|
||||||
|
|---------|-------|
|
||||||
|
| All errors | `{app="re-workflow"} \| json \| level="error"` |
|
||||||
|
| TAT breaches | `{app="re-workflow"} \| json \| tatEvent="breached"` |
|
||||||
|
| Auth failures | `{app="re-workflow"} \| json \| authEvent="auth_failure"` |
|
||||||
|
| Slow requests (>3s) | `{app="re-workflow"} \| json \| duration>3000` |
|
||||||
|
| By user | `{app="re-workflow"} \| json \| userId="USER-ID"` |
|
||||||
|
| By request | `{app="re-workflow"} \| json \| requestId="REQ-XXX"` |
|
||||||
|
|
||||||
|
### 3. PromQL Queries (Prometheus)
|
||||||
|
|
||||||
|
| Purpose | Query |
|
||||||
|
|---------|-------|
|
||||||
|
| Request rate | `rate(http_requests_total{job="re-workflow-backend"}[5m])` |
|
||||||
|
| Error rate | `rate(http_request_errors_total[5m]) / rate(http_requests_total[5m])` |
|
||||||
|
| P95 latency | `histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))` |
|
||||||
|
| Memory usage | `process_resident_memory_bytes{job="re-workflow-backend"}` |
|
||||||
|
| Event loop lag | `nodejs_eventloop_lag_seconds{job="re-workflow-backend"}` |
|
||||||
|
|
||||||
|
## 📁 File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
monitoring/
|
||||||
|
├── docker-compose.monitoring.yml # Main compose file
|
||||||
|
├── prometheus/
|
||||||
|
│ ├── prometheus.yml # Prometheus configuration
|
||||||
|
│ └── alert.rules.yml # Alert rules
|
||||||
|
├── loki/
|
||||||
|
│ └── loki-config.yml # Loki configuration
|
||||||
|
├── promtail/
|
||||||
|
│ └── promtail-config.yml # Promtail log shipper config
|
||||||
|
├── alertmanager/
|
||||||
|
│ └── alertmanager.yml # Alert notification config
|
||||||
|
└── grafana/
|
||||||
|
├── provisioning/
|
||||||
|
│ ├── datasources/
|
||||||
|
│ │ └── datasources.yml # Auto-configure data sources
|
||||||
|
│ └── dashboards/
|
||||||
|
│ └── dashboards.yml # Dashboard provisioning
|
||||||
|
└── dashboards/
|
||||||
|
└── re-workflow-overview.json # Pre-built dashboard
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Configuration
|
||||||
|
|
||||||
|
### Prometheus Scrape Targets
|
||||||
|
|
||||||
|
Edit `prometheus/prometheus.yml` to add/modify scrape targets:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 're-workflow-backend'
|
||||||
|
static_configs:
|
||||||
|
# For local development (backend outside Docker)
|
||||||
|
- targets: ['host.docker.internal:5000']
|
||||||
|
# For Docker deployment (backend in Docker)
|
||||||
|
# - targets: ['re_workflow_backend:5000']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Log Retention
|
||||||
|
|
||||||
|
Edit `loki/loki-config.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
limits_config:
|
||||||
|
retention_period: 15d # Adjust retention period
|
||||||
|
```
|
||||||
|
|
||||||
|
### Alert Notifications
|
||||||
|
|
||||||
|
Edit `alertmanager/alertmanager.yml` to configure:
|
||||||
|
- **Email** notifications
|
||||||
|
- **Slack** webhooks
|
||||||
|
- **Custom** webhook endpoints
|
||||||
|
|
||||||
|
## 🛠️ Common Commands
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Start services
|
||||||
|
docker-compose -f docker-compose.monitoring.yml up -d
|
||||||
|
|
||||||
|
# Stop services
|
||||||
|
docker-compose -f docker-compose.monitoring.yml down
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
docker-compose -f docker-compose.monitoring.yml logs -f
|
||||||
|
|
||||||
|
# View specific service logs
|
||||||
|
docker-compose -f docker-compose.monitoring.yml logs -f grafana
|
||||||
|
|
||||||
|
# Restart a service
|
||||||
|
docker-compose -f docker-compose.monitoring.yml restart prometheus
|
||||||
|
|
||||||
|
# Check service health
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Remove all data (fresh start)
|
||||||
|
docker-compose -f docker-compose.monitoring.yml down -v
|
||||||
|
```
|
||||||
|
|
||||||
|
## ⚡ Metrics Exposed by Backend
|
||||||
|
|
||||||
|
The backend exposes these metrics at `/metrics`:
|
||||||
|
|
||||||
|
### HTTP Metrics
|
||||||
|
- `http_requests_total` - Total HTTP requests (by method, route, status)
|
||||||
|
- `http_request_duration_seconds` - Request latency histogram
|
||||||
|
- `http_request_errors_total` - Error count (4xx, 5xx)
|
||||||
|
- `http_active_connections` - Current active connections
|
||||||
|
|
||||||
|
### Business Metrics
|
||||||
|
- `tat_breaches_total` - TAT breach events
|
||||||
|
- `pending_workflows_count` - Pending workflow gauge
|
||||||
|
- `workflow_operations_total` - Workflow operation count
|
||||||
|
- `auth_events_total` - Authentication events
|
||||||
|
|
||||||
|
### Node.js Runtime
|
||||||
|
- `nodejs_heap_size_*` - Heap memory metrics
|
||||||
|
- `nodejs_eventloop_lag_*` - Event loop lag
|
||||||
|
- `process_cpu_*` - CPU usage
|
||||||
|
- `process_resident_memory_bytes` - RSS memory
|
||||||
|
|
||||||
|
## 🔒 Security Notes
|
||||||
|
|
||||||
|
1. **Change default passwords** in production
|
||||||
|
2. **Enable TLS** for external access
|
||||||
|
3. **Configure firewall** to restrict access to monitoring ports
|
||||||
|
4. **Use reverse proxy** (nginx) for HTTPS
|
||||||
|
|
||||||
|
## 🐛 Troubleshooting
|
||||||
|
|
||||||
|
### Prometheus can't scrape backend
|
||||||
|
1. Ensure backend is running on port 5000
|
||||||
|
2. Check `/metrics` endpoint: `curl http://localhost:5000/metrics`
|
||||||
|
3. For Docker: use `host.docker.internal:5000`
|
||||||
|
|
||||||
|
### Logs not appearing in Loki
|
||||||
|
1. Check Promtail logs: `docker logs re_promtail`
|
||||||
|
2. Verify log file path in `promtail-config.yml`
|
||||||
|
3. Ensure backend has `LOKI_HOST` configured
|
||||||
|
|
||||||
|
### Grafana dashboards empty
|
||||||
|
1. Wait 30-60 seconds for data collection
|
||||||
|
2. Check data source configuration in Grafana
|
||||||
|
3. Verify time range selection
|
||||||
|
|
||||||
|
### Docker memory issues
|
||||||
|
```powershell
|
||||||
|
# Increase Docker Desktop memory allocation
|
||||||
|
# Settings → Resources → Memory → 4GB+
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📞 Support
|
||||||
|
|
||||||
|
For issues with the monitoring stack:
|
||||||
|
1. Check container logs: `docker logs <container_name>`
|
||||||
|
2. Verify configuration files syntax
|
||||||
|
3. Ensure Docker Desktop is running with sufficient resources
|
||||||
|
|
||||||
88
monitoring/alertmanager/alertmanager.yml
Normal file
88
monitoring/alertmanager/alertmanager.yml
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Alertmanager Configuration for RE Workflow
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
global:
|
||||||
|
# Global configuration options
|
||||||
|
resolve_timeout: 5m
|
||||||
|
|
||||||
|
# Route configuration
|
||||||
|
route:
|
||||||
|
# Default receiver
|
||||||
|
receiver: 'default-receiver'
|
||||||
|
|
||||||
|
# Group alerts by these labels
|
||||||
|
group_by: ['alertname', 'service', 'severity']
|
||||||
|
|
||||||
|
# Wait before sending grouped notifications
|
||||||
|
group_wait: 30s
|
||||||
|
|
||||||
|
# Interval for sending updates for a group
|
||||||
|
group_interval: 5m
|
||||||
|
|
||||||
|
# Interval for resending notifications
|
||||||
|
repeat_interval: 4h
|
||||||
|
|
||||||
|
# Child routes for specific routing
|
||||||
|
routes:
|
||||||
|
# Critical alerts - immediate notification
|
||||||
|
- match:
|
||||||
|
severity: critical
|
||||||
|
receiver: 'critical-receiver'
|
||||||
|
group_wait: 10s
|
||||||
|
repeat_interval: 1h
|
||||||
|
|
||||||
|
# Warning alerts
|
||||||
|
- match:
|
||||||
|
severity: warning
|
||||||
|
receiver: 'warning-receiver'
|
||||||
|
group_wait: 1m
|
||||||
|
repeat_interval: 4h
|
||||||
|
|
||||||
|
# Receivers configuration
|
||||||
|
receivers:
|
||||||
|
# Default receiver (logs to console)
|
||||||
|
- name: 'default-receiver'
|
||||||
|
# Webhook receiver for testing
|
||||||
|
webhook_configs:
|
||||||
|
- url: 'http://localhost:5000/api/webhooks/alerts'
|
||||||
|
send_resolved: true
|
||||||
|
|
||||||
|
# Critical alerts receiver
|
||||||
|
- name: 'critical-receiver'
|
||||||
|
# Configure email notifications
|
||||||
|
# email_configs:
|
||||||
|
# - to: 'devops@royalenfield.com'
|
||||||
|
# from: 'alerts@royalenfield.com'
|
||||||
|
# smarthost: 'smtp.gmail.com:587'
|
||||||
|
# auth_username: 'alerts@royalenfield.com'
|
||||||
|
# auth_password: 'your-app-password'
|
||||||
|
# send_resolved: true
|
||||||
|
|
||||||
|
# Slack notifications (uncomment and configure)
|
||||||
|
# slack_configs:
|
||||||
|
# - api_url: 'https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK'
|
||||||
|
# channel: '#alerts-critical'
|
||||||
|
# send_resolved: true
|
||||||
|
# title: '{{ .Status | toUpper }}: {{ .CommonAnnotations.summary }}'
|
||||||
|
# text: '{{ .CommonAnnotations.description }}'
|
||||||
|
|
||||||
|
webhook_configs:
|
||||||
|
- url: 'http://host.docker.internal:5000/api/webhooks/alerts'
|
||||||
|
send_resolved: true
|
||||||
|
|
||||||
|
# Warning alerts receiver
|
||||||
|
- name: 'warning-receiver'
|
||||||
|
webhook_configs:
|
||||||
|
- url: 'http://host.docker.internal:5000/api/webhooks/alerts'
|
||||||
|
send_resolved: true
|
||||||
|
|
||||||
|
# Inhibition rules - prevent duplicate alerts
|
||||||
|
inhibit_rules:
|
||||||
|
# If critical alert fires, inhibit warning alerts for same alertname
|
||||||
|
- source_match:
|
||||||
|
severity: 'critical'
|
||||||
|
target_match:
|
||||||
|
severity: 'warning'
|
||||||
|
equal: ['alertname', 'service']
|
||||||
|
|
||||||
170
monitoring/docker-compose.monitoring.yml
Normal file
170
monitoring/docker-compose.monitoring.yml
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# RE Workflow - Complete Monitoring Stack
|
||||||
|
# Docker Compose for Grafana, Prometheus, Loki, and Promtail
|
||||||
|
# =============================================================================
|
||||||
|
# Usage:
|
||||||
|
# cd monitoring
|
||||||
|
# docker-compose -f docker-compose.monitoring.yml up -d
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ===========================================================================
|
||||||
|
# PROMETHEUS - Metrics Collection
|
||||||
|
# ===========================================================================
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:v2.47.2
|
||||||
|
container_name: re_prometheus
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||||
|
- ./prometheus/alert.rules.yml:/etc/prometheus/alert.rules.yml:ro
|
||||||
|
- prometheus_data:/prometheus
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||||
|
- '--storage.tsdb.path=/prometheus'
|
||||||
|
- '--storage.tsdb.retention.time=15d'
|
||||||
|
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||||
|
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||||
|
- '--web.enable-lifecycle'
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# LOKI - Log Aggregation
|
||||||
|
# ===========================================================================
|
||||||
|
loki:
|
||||||
|
image: grafana/loki:2.9.2
|
||||||
|
container_name: re_loki
|
||||||
|
ports:
|
||||||
|
- "3100:3100"
|
||||||
|
volumes:
|
||||||
|
- ./loki/loki-config.yml:/etc/loki/local-config.yaml:ro
|
||||||
|
- loki_data:/loki
|
||||||
|
command: -config.file=/etc/loki/local-config.yaml
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# PROMTAIL - Log Shipping Agent
|
||||||
|
# ===========================================================================
|
||||||
|
promtail:
|
||||||
|
image: grafana/promtail:2.9.2
|
||||||
|
container_name: re_promtail
|
||||||
|
volumes:
|
||||||
|
- ./promtail/promtail-config.yml:/etc/promtail/config.yml:ro
|
||||||
|
- ../logs:/var/log/app:ro
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
- promtail_data:/tmp/promtail
|
||||||
|
command: -config.file=/etc/promtail/config.yml
|
||||||
|
depends_on:
|
||||||
|
- loki
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# GRAFANA - Visualization & Dashboards
|
||||||
|
# ===========================================================================
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:10.2.2
|
||||||
|
container_name: re_grafana
|
||||||
|
ports:
|
||||||
|
- "3001:3000" # Using 3001 to avoid conflict with React frontend (3000)
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_USER=admin
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=REWorkflow@2024
|
||||||
|
- GF_USERS_ALLOW_SIGN_UP=false
|
||||||
|
- GF_FEATURE_TOGGLES_ENABLE=publicDashboards
|
||||||
|
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
- ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||||
|
- ./grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||||
|
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||||
|
depends_on:
|
||||||
|
- prometheus
|
||||||
|
- loki
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# NODE EXPORTER - Host Metrics (Optional but recommended)
|
||||||
|
# ===========================================================================
|
||||||
|
node-exporter:
|
||||||
|
image: prom/node-exporter:v1.6.1
|
||||||
|
container_name: re_node_exporter
|
||||||
|
ports:
|
||||||
|
- "9100:9100"
|
||||||
|
volumes:
|
||||||
|
- /proc:/host/proc:ro
|
||||||
|
- /sys:/host/sys:ro
|
||||||
|
command:
|
||||||
|
- '--path.procfs=/host/proc'
|
||||||
|
- '--path.sysfs=/host/sys'
|
||||||
|
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# ALERTMANAGER - Alert Notifications (Optional)
|
||||||
|
# ===========================================================================
|
||||||
|
alertmanager:
|
||||||
|
image: prom/alertmanager:v0.26.0
|
||||||
|
container_name: re_alertmanager
|
||||||
|
ports:
|
||||||
|
- "9093:9093"
|
||||||
|
volumes:
|
||||||
|
- ./alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||||||
|
- alertmanager_data:/alertmanager
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||||
|
- '--storage.path=/alertmanager'
|
||||||
|
networks:
|
||||||
|
- monitoring_network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# NETWORKS
|
||||||
|
# ===========================================================================
|
||||||
|
networks:
|
||||||
|
monitoring_network:
|
||||||
|
driver: bridge
|
||||||
|
name: re_monitoring_network
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# VOLUMES
|
||||||
|
# ===========================================================================
|
||||||
|
volumes:
|
||||||
|
prometheus_data:
|
||||||
|
name: re_prometheus_data
|
||||||
|
loki_data:
|
||||||
|
name: re_loki_data
|
||||||
|
promtail_data:
|
||||||
|
name: re_promtail_data
|
||||||
|
grafana_data:
|
||||||
|
name: re_grafana_data
|
||||||
|
alertmanager_data:
|
||||||
|
name: re_alertmanager_data
|
||||||
|
|
||||||
651
monitoring/grafana/dashboards/re-workflow-overview.json
Normal file
651
monitoring/grafana/dashboards/re-workflow-overview.json
Normal file
@ -0,0 +1,651 @@
|
|||||||
|
{
|
||||||
|
"annotations": {
|
||||||
|
"list": []
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"id": null,
|
||||||
|
"links": [],
|
||||||
|
"liveNow": false,
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
|
||||||
|
"id": 100,
|
||||||
|
"panels": [],
|
||||||
|
"title": "📊 API Overview",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 100 },
|
||||||
|
{ "color": "red", "value": 500 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "reqps"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 1 },
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m]))",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Request Rate",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 0.01 },
|
||||||
|
{ "color": "red", "value": 0.05 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percentunit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 1 },
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "sum(rate(http_request_errors_total{job=\"re-workflow-backend\"}[5m])) / sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m]))",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Error Rate",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 0.5 },
|
||||||
|
{ "color": "red", "value": 1 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 1 },
|
||||||
|
"id": 3,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "P95 Latency",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "red", "value": null },
|
||||||
|
{ "color": "green", "value": 1 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "short"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 18, "y": 1 },
|
||||||
|
"id": 4,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "up{job=\"re-workflow-backend\"}",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "API Status",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "reqps"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 5 },
|
||||||
|
"id": 5,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "sum(rate(http_requests_total{job=\"re-workflow-backend\"}[5m])) by (method)",
|
||||||
|
"legendFormat": "{{method}}",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Request Rate by Method",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 5 },
|
||||||
|
"id": 6,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||||
|
"legendFormat": "P50",
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expr": "histogram_quantile(0.90, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||||
|
"legendFormat": "P90",
|
||||||
|
"refId": "B"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expr": "histogram_quantile(0.99, sum(rate(http_request_duration_seconds_bucket{job=\"re-workflow-backend\"}[5m])) by (le))",
|
||||||
|
"legendFormat": "P99",
|
||||||
|
"refId": "C"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Response Time Percentiles",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 13 },
|
||||||
|
"id": 101,
|
||||||
|
"panels": [],
|
||||||
|
"title": "📝 Logs",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "loki", "uid": "loki" },
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 14 },
|
||||||
|
"id": 7,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "count_over_time({app=\"re-workflow\"} | json | level=\"error\" [$__range])",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Errors (Time Range)",
|
||||||
|
"type": "stat",
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 10 },
|
||||||
|
{ "color": "red", "value": 50 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "loki", "uid": "loki" },
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 14 },
|
||||||
|
"id": 8,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "count_over_time({app=\"re-workflow\"} | json | level=\"warn\" [$__range])",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Warnings (Time Range)",
|
||||||
|
"type": "stat",
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 50 },
|
||||||
|
{ "color": "orange", "value": 200 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "loki", "uid": "loki" },
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 14 },
|
||||||
|
"id": 9,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "count_over_time({app=\"re-workflow\"} | json | tatEvent=\"breached\" [$__range])",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "TAT Breaches (Time Range)",
|
||||||
|
"type": "stat",
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 5 },
|
||||||
|
{ "color": "red", "value": 20 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "loki", "uid": "loki" },
|
||||||
|
"gridPos": { "h": 4, "w": 6, "x": 18, "y": 14 },
|
||||||
|
"id": 10,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": { "calcs": ["sum"], "fields": "", "values": false },
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "count_over_time({app=\"re-workflow\"} | json | authEvent=\"auth_failure\" [$__range])",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Auth Failures (Time Range)",
|
||||||
|
"type": "stat",
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "thresholds" },
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{ "color": "green", "value": null },
|
||||||
|
{ "color": "yellow", "value": 10 },
|
||||||
|
{ "color": "red", "value": 50 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "loki", "uid": "loki" },
|
||||||
|
"gridPos": { "h": 10, "w": 24, "x": 0, "y": 18 },
|
||||||
|
"id": 11,
|
||||||
|
"options": {
|
||||||
|
"dedupStrategy": "none",
|
||||||
|
"enableLogDetails": true,
|
||||||
|
"prettifyLogMessage": false,
|
||||||
|
"showCommonLabels": false,
|
||||||
|
"showLabels": false,
|
||||||
|
"showTime": true,
|
||||||
|
"sortOrder": "Descending",
|
||||||
|
"wrapLogMessage": false
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "{app=\"re-workflow\"} | json | level=~\"error|warn\"",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Recent Errors & Warnings",
|
||||||
|
"type": "logs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 28 },
|
||||||
|
"id": 102,
|
||||||
|
"panels": [],
|
||||||
|
"title": "💻 Node.js Runtime",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "bytes"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 29 },
|
||||||
|
"id": 12,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "process_resident_memory_bytes{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "RSS Memory",
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expr": "nodejs_heap_size_used_bytes{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "Heap Used",
|
||||||
|
"refId": "B"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expr": "nodejs_heap_size_total_bytes{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "Heap Total",
|
||||||
|
"refId": "C"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Memory Usage",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 29 },
|
||||||
|
"id": 13,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "nodejs_eventloop_lag_seconds{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "Event Loop Lag",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Event Loop Lag",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "short"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 37 },
|
||||||
|
"id": 14,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "nodejs_active_handles_total{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "Active Handles",
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expr": "nodejs_active_requests_total{job=\"re-workflow-backend\"}",
|
||||||
|
"legendFormat": "Active Requests",
|
||||||
|
"refId": "B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Active Handles & Requests",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": { "type": "prometheus", "uid": "prometheus" },
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": { "mode": "palette-classic" },
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 20,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 2,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": { "type": "linear" },
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": { "group": "A", "mode": "none" },
|
||||||
|
"thresholdsStyle": { "mode": "off" }
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] },
|
||||||
|
"unit": "percentunit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 37 },
|
||||||
|
"id": 15,
|
||||||
|
"options": {
|
||||||
|
"legend": { "calcs": ["mean", "max"], "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||||
|
"tooltip": { "mode": "multi", "sort": "desc" }
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"expr": "rate(process_cpu_seconds_total{job=\"re-workflow-backend\"}[5m])",
|
||||||
|
"legendFormat": "CPU Usage",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "CPU Usage",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refresh": "30s",
|
||||||
|
"schemaVersion": 38,
|
||||||
|
"style": "dark",
|
||||||
|
"tags": ["re-workflow", "backend", "monitoring"],
|
||||||
|
"templating": { "list": [] },
|
||||||
|
"time": { "from": "now-1h", "to": "now" },
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "browser",
|
||||||
|
"title": "RE Workflow - Overview",
|
||||||
|
"uid": "re-workflow-overview",
|
||||||
|
"version": 1
|
||||||
|
}
|
||||||
|
|
||||||
19
monitoring/grafana/provisioning/dashboards/dashboards.yml
Normal file
19
monitoring/grafana/provisioning/dashboards/dashboards.yml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Grafana Dashboards Provisioning
|
||||||
|
# Auto-loads dashboards from JSON files
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
providers:
|
||||||
|
- name: 'RE Workflow Dashboards'
|
||||||
|
orgId: 1
|
||||||
|
folder: 'RE Workflow'
|
||||||
|
folderUid: 're-workflow'
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
updateIntervalSeconds: 30
|
||||||
|
allowUiUpdates: true
|
||||||
|
options:
|
||||||
|
path: /var/lib/grafana/dashboards
|
||||||
|
|
||||||
43
monitoring/grafana/provisioning/datasources/datasources.yml
Normal file
43
monitoring/grafana/provisioning/datasources/datasources.yml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Grafana Datasources Provisioning
|
||||||
|
# Auto-configures Prometheus and Loki as data sources
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
# Prometheus - Metrics
|
||||||
|
- name: Prometheus
|
||||||
|
uid: prometheus
|
||||||
|
type: prometheus
|
||||||
|
access: proxy
|
||||||
|
url: http://prometheus:9090
|
||||||
|
isDefault: true
|
||||||
|
editable: false
|
||||||
|
jsonData:
|
||||||
|
httpMethod: POST
|
||||||
|
manageAlerts: true
|
||||||
|
prometheusType: Prometheus
|
||||||
|
prometheusVersion: 2.47.2
|
||||||
|
|
||||||
|
# Loki - Logs
|
||||||
|
- name: Loki
|
||||||
|
uid: loki
|
||||||
|
type: loki
|
||||||
|
access: proxy
|
||||||
|
url: http://loki:3100
|
||||||
|
editable: false
|
||||||
|
jsonData:
|
||||||
|
maxLines: 1000
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
# Alertmanager
|
||||||
|
- name: Alertmanager
|
||||||
|
uid: alertmanager
|
||||||
|
type: alertmanager
|
||||||
|
access: proxy
|
||||||
|
url: http://alertmanager:9093
|
||||||
|
editable: false
|
||||||
|
jsonData:
|
||||||
|
implementation: prometheus
|
||||||
|
|
||||||
79
monitoring/loki/loki-config.yml
Normal file
79
monitoring/loki/loki-config.yml
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Loki Configuration for RE Workflow
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
auth_enabled: false
|
||||||
|
|
||||||
|
server:
|
||||||
|
http_listen_port: 3100
|
||||||
|
grpc_listen_port: 9096
|
||||||
|
log_level: info
|
||||||
|
|
||||||
|
common:
|
||||||
|
instance_addr: 127.0.0.1
|
||||||
|
path_prefix: /loki
|
||||||
|
storage:
|
||||||
|
filesystem:
|
||||||
|
chunks_directory: /loki/chunks
|
||||||
|
rules_directory: /loki/rules
|
||||||
|
replication_factor: 1
|
||||||
|
ring:
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
|
||||||
|
# Query range settings
|
||||||
|
query_range:
|
||||||
|
results_cache:
|
||||||
|
cache:
|
||||||
|
embedded_cache:
|
||||||
|
enabled: true
|
||||||
|
max_size_mb: 100
|
||||||
|
|
||||||
|
# Schema configuration
|
||||||
|
schema_config:
|
||||||
|
configs:
|
||||||
|
- from: 2020-10-24
|
||||||
|
store: tsdb
|
||||||
|
object_store: filesystem
|
||||||
|
schema: v13
|
||||||
|
index:
|
||||||
|
prefix: index_
|
||||||
|
period: 24h
|
||||||
|
|
||||||
|
# Ingestion limits
|
||||||
|
limits_config:
|
||||||
|
retention_period: 15d # Keep logs for 15 days
|
||||||
|
ingestion_rate_mb: 10 # 10MB/s ingestion rate
|
||||||
|
ingestion_burst_size_mb: 20 # 20MB burst
|
||||||
|
max_streams_per_user: 10000 # Max number of streams
|
||||||
|
max_line_size: 256kb # Max log line size
|
||||||
|
max_entries_limit_per_query: 5000 # Max entries per query
|
||||||
|
max_query_length: 721h # Max query time range (30 days)
|
||||||
|
|
||||||
|
# Compactor for retention
|
||||||
|
compactor:
|
||||||
|
working_directory: /loki/compactor
|
||||||
|
retention_enabled: true
|
||||||
|
retention_delete_delay: 2h
|
||||||
|
delete_request_store: filesystem
|
||||||
|
compaction_interval: 10m
|
||||||
|
|
||||||
|
# Ruler configuration (for alerting)
|
||||||
|
ruler:
|
||||||
|
alertmanager_url: http://alertmanager:9093
|
||||||
|
storage:
|
||||||
|
type: local
|
||||||
|
local:
|
||||||
|
directory: /loki/rules
|
||||||
|
rule_path: /loki/rules-temp
|
||||||
|
enable_api: true
|
||||||
|
|
||||||
|
# Table manager (for index retention)
|
||||||
|
table_manager:
|
||||||
|
retention_deletes_enabled: true
|
||||||
|
retention_period: 360h # 15 days
|
||||||
|
|
||||||
|
# Analytics (optional - disable for privacy)
|
||||||
|
analytics:
|
||||||
|
reporting_enabled: false
|
||||||
|
|
||||||
150
monitoring/prometheus/alert.rules.yml
Normal file
150
monitoring/prometheus/alert.rules.yml
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Prometheus Alert Rules for RE Workflow
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
groups:
|
||||||
|
# ===========================================================================
|
||||||
|
# Backend API Alerts
|
||||||
|
# ===========================================================================
|
||||||
|
- name: re-workflow-backend
|
||||||
|
interval: 30s
|
||||||
|
rules:
|
||||||
|
# High Error Rate
|
||||||
|
- alert: HighErrorRate
|
||||||
|
expr: rate(http_request_errors_total{job="re-workflow-backend"}[5m]) > 0.1
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
service: backend
|
||||||
|
annotations:
|
||||||
|
summary: "High error rate detected in RE Workflow Backend"
|
||||||
|
description: "Error rate is {{ $value | printf \"%.2f\" }} errors/sec for the last 5 minutes."
|
||||||
|
|
||||||
|
# High Request Latency
|
||||||
|
- alert: HighRequestLatency
|
||||||
|
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket{job="re-workflow-backend"}[5m])) > 2
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: backend
|
||||||
|
annotations:
|
||||||
|
summary: "High API latency detected"
|
||||||
|
description: "95th percentile latency is {{ $value | printf \"%.2f\" }}s"
|
||||||
|
|
||||||
|
# API Down
|
||||||
|
- alert: BackendDown
|
||||||
|
expr: up{job="re-workflow-backend"} == 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
service: backend
|
||||||
|
annotations:
|
||||||
|
summary: "RE Workflow Backend is DOWN"
|
||||||
|
description: "Backend API has been unreachable for more than 1 minute."
|
||||||
|
|
||||||
|
# High Memory Usage
|
||||||
|
- alert: HighMemoryUsage
|
||||||
|
expr: process_resident_memory_bytes{job="re-workflow-backend"} / 1024 / 1024 > 500
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: backend
|
||||||
|
annotations:
|
||||||
|
summary: "High memory usage in Backend"
|
||||||
|
description: "Memory usage is {{ $value | printf \"%.0f\" }}MB"
|
||||||
|
|
||||||
|
# Event Loop Lag
|
||||||
|
- alert: HighEventLoopLag
|
||||||
|
expr: nodejs_eventloop_lag_seconds{job="re-workflow-backend"} > 0.5
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: backend
|
||||||
|
annotations:
|
||||||
|
summary: "High Node.js event loop lag"
|
||||||
|
description: "Event loop lag is {{ $value | printf \"%.3f\" }}s"
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# TAT/Workflow Alerts
|
||||||
|
# ===========================================================================
|
||||||
|
- name: re-workflow-tat
|
||||||
|
interval: 1m
|
||||||
|
rules:
|
||||||
|
# TAT Breach Rate
|
||||||
|
- alert: HighTATBreachRate
|
||||||
|
expr: increase(tat_breaches_total[1h]) > 10
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: workflow
|
||||||
|
annotations:
|
||||||
|
summary: "High TAT breach rate detected"
|
||||||
|
description: "{{ $value | printf \"%.0f\" }} TAT breaches in the last hour"
|
||||||
|
|
||||||
|
# Pending Workflows Queue
|
||||||
|
- alert: LargePendingQueue
|
||||||
|
expr: pending_workflows_count > 100
|
||||||
|
for: 30m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: workflow
|
||||||
|
annotations:
|
||||||
|
summary: "Large number of pending workflows"
|
||||||
|
description: "{{ $value | printf \"%.0f\" }} workflows pending approval"
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# Infrastructure Alerts
|
||||||
|
# ===========================================================================
|
||||||
|
- name: infrastructure
|
||||||
|
interval: 30s
|
||||||
|
rules:
|
||||||
|
# High CPU Usage (Node Exporter)
|
||||||
|
- alert: HighCPUUsage
|
||||||
|
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: infrastructure
|
||||||
|
annotations:
|
||||||
|
summary: "High CPU usage on {{ $labels.instance }}"
|
||||||
|
description: "CPU usage is {{ $value | printf \"%.1f\" }}%"
|
||||||
|
|
||||||
|
# High Disk Usage
|
||||||
|
- alert: HighDiskUsage
|
||||||
|
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes * 100 > 85
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
service: infrastructure
|
||||||
|
annotations:
|
||||||
|
summary: "High disk usage on {{ $labels.instance }}"
|
||||||
|
description: "Disk usage is {{ $value | printf \"%.1f\" }}%"
|
||||||
|
|
||||||
|
# Low Memory
|
||||||
|
- alert: LowMemory
|
||||||
|
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 < 15
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
service: infrastructure
|
||||||
|
annotations:
|
||||||
|
summary: "Low memory on {{ $labels.instance }}"
|
||||||
|
description: "Available memory is {{ $value | printf \"%.1f\" }}%"
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# Loki/Logging Alerts
|
||||||
|
# ===========================================================================
|
||||||
|
- name: logging
|
||||||
|
interval: 1m
|
||||||
|
rules:
|
||||||
|
# Loki Down
|
||||||
|
- alert: LokiDown
|
||||||
|
expr: up{job="loki"} == 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
service: loki
|
||||||
|
annotations:
|
||||||
|
summary: "Loki is DOWN"
|
||||||
|
description: "Loki has been unreachable for more than 2 minutes."
|
||||||
|
|
||||||
61
monitoring/prometheus/prometheus-docker.yml
Normal file
61
monitoring/prometheus/prometheus-docker.yml
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Prometheus Configuration for RE Workflow (Full Docker Stack)
|
||||||
|
# Use this when running docker-compose.full.yml
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
evaluation_interval: 15s
|
||||||
|
external_labels:
|
||||||
|
monitor: 're-workflow-monitor'
|
||||||
|
environment: 'docker'
|
||||||
|
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets:
|
||||||
|
- alertmanager:9093
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- /etc/prometheus/alert.rules.yml
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
# Prometheus Self-Monitoring
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:9090']
|
||||||
|
labels:
|
||||||
|
service: 'prometheus'
|
||||||
|
|
||||||
|
# RE Workflow Backend (running in Docker)
|
||||||
|
- job_name: 're-workflow-backend'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['re_workflow_backend:5000']
|
||||||
|
labels:
|
||||||
|
service: 'backend'
|
||||||
|
environment: 'docker'
|
||||||
|
metrics_path: /metrics
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 5s
|
||||||
|
|
||||||
|
# Node Exporter
|
||||||
|
- job_name: 'node-exporter'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['node-exporter:9100']
|
||||||
|
labels:
|
||||||
|
service: 'node-exporter'
|
||||||
|
|
||||||
|
# Loki
|
||||||
|
- job_name: 'loki'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['loki:3100']
|
||||||
|
labels:
|
||||||
|
service: 'loki'
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
- job_name: 'grafana'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['grafana:3000']
|
||||||
|
labels:
|
||||||
|
service: 'grafana'
|
||||||
|
|
||||||
100
monitoring/prometheus/prometheus.yml
Normal file
100
monitoring/prometheus/prometheus.yml
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Prometheus Configuration for RE Workflow
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s # How frequently to scrape targets
|
||||||
|
evaluation_interval: 15s # How frequently to evaluate rules
|
||||||
|
external_labels:
|
||||||
|
monitor: 're-workflow-monitor'
|
||||||
|
environment: 'development'
|
||||||
|
|
||||||
|
# Alerting configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets:
|
||||||
|
- alertmanager:9093
|
||||||
|
|
||||||
|
# Rule files
|
||||||
|
rule_files:
|
||||||
|
- /etc/prometheus/alert.rules.yml
|
||||||
|
|
||||||
|
# Scrape configurations
|
||||||
|
scrape_configs:
|
||||||
|
# ============================================
|
||||||
|
# Prometheus Self-Monitoring
|
||||||
|
# ============================================
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:9090']
|
||||||
|
labels:
|
||||||
|
service: 'prometheus'
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# RE Workflow Backend API Metrics
|
||||||
|
# ============================================
|
||||||
|
- job_name: 're-workflow-backend'
|
||||||
|
static_configs:
|
||||||
|
# Option 1: Backend running locally (outside Docker monitoring stack)
|
||||||
|
- targets: ['host.docker.internal:5000']
|
||||||
|
labels:
|
||||||
|
service: 'backend'
|
||||||
|
environment: 'development'
|
||||||
|
deployment: 'local'
|
||||||
|
# Option 2: Backend running in Docker (docker-compose.full.yml)
|
||||||
|
# Uncomment below and comment above when using full stack
|
||||||
|
# - targets: ['re_workflow_backend:5000']
|
||||||
|
# labels:
|
||||||
|
# service: 'backend'
|
||||||
|
# environment: 'development'
|
||||||
|
# deployment: 'docker'
|
||||||
|
metrics_path: /metrics
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 5s
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# Node Exporter - Host Metrics
|
||||||
|
# ============================================
|
||||||
|
- job_name: 'node-exporter'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['node-exporter:9100']
|
||||||
|
labels:
|
||||||
|
service: 'node-exporter'
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# PostgreSQL Metrics (if using pg_exporter)
|
||||||
|
# ============================================
|
||||||
|
# - job_name: 'postgres'
|
||||||
|
# static_configs:
|
||||||
|
# - targets: ['postgres-exporter:9187']
|
||||||
|
# labels:
|
||||||
|
# service: 'postgresql'
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# Redis Metrics (if using redis_exporter)
|
||||||
|
# ============================================
|
||||||
|
# - job_name: 'redis'
|
||||||
|
# static_configs:
|
||||||
|
# - targets: ['redis-exporter:9121']
|
||||||
|
# labels:
|
||||||
|
# service: 'redis'
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# Loki Metrics
|
||||||
|
# ============================================
|
||||||
|
- job_name: 'loki'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['loki:3100']
|
||||||
|
labels:
|
||||||
|
service: 'loki'
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# Grafana Metrics
|
||||||
|
# ============================================
|
||||||
|
- job_name: 'grafana'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['grafana:3000']
|
||||||
|
labels:
|
||||||
|
service: 'grafana'
|
||||||
|
|
||||||
129
monitoring/promtail/promtail-config.yml
Normal file
129
monitoring/promtail/promtail-config.yml
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Promtail Configuration for RE Workflow
|
||||||
|
# Ships logs from application log files to Loki
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
server:
|
||||||
|
http_listen_port: 9080
|
||||||
|
grpc_listen_port: 0
|
||||||
|
|
||||||
|
# Positions file (tracks what's been read)
|
||||||
|
positions:
|
||||||
|
filename: /tmp/promtail/positions.yaml
|
||||||
|
|
||||||
|
# Loki client configuration
|
||||||
|
clients:
|
||||||
|
- url: http://loki:3100/loki/api/v1/push
|
||||||
|
batchwait: 1s
|
||||||
|
batchsize: 1048576 # 1MB
|
||||||
|
timeout: 10s
|
||||||
|
|
||||||
|
# Scrape configurations
|
||||||
|
scrape_configs:
|
||||||
|
# ============================================
|
||||||
|
# RE Workflow Backend Application Logs
|
||||||
|
# ============================================
|
||||||
|
- job_name: re-workflow-app
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- localhost
|
||||||
|
labels:
|
||||||
|
job: re-workflow
|
||||||
|
app: re-workflow
|
||||||
|
service: backend
|
||||||
|
__path__: /var/log/app/*.log
|
||||||
|
|
||||||
|
pipeline_stages:
|
||||||
|
# Parse JSON logs
|
||||||
|
- json:
|
||||||
|
expressions:
|
||||||
|
level: level
|
||||||
|
message: message
|
||||||
|
timestamp: timestamp
|
||||||
|
requestId: requestId
|
||||||
|
userId: userId
|
||||||
|
method: method
|
||||||
|
url: url
|
||||||
|
statusCode: statusCode
|
||||||
|
duration: duration
|
||||||
|
workflowEvent: workflowEvent
|
||||||
|
tatEvent: tatEvent
|
||||||
|
authEvent: authEvent
|
||||||
|
error: error
|
||||||
|
|
||||||
|
# Set log level as label
|
||||||
|
- labels:
|
||||||
|
level:
|
||||||
|
requestId:
|
||||||
|
workflowEvent:
|
||||||
|
tatEvent:
|
||||||
|
authEvent:
|
||||||
|
|
||||||
|
# Timestamp parsing
|
||||||
|
- timestamp:
|
||||||
|
source: timestamp
|
||||||
|
format: "2006-01-02 15:04:05"
|
||||||
|
fallback_formats:
|
||||||
|
- RFC3339
|
||||||
|
|
||||||
|
# Output stage
|
||||||
|
- output:
|
||||||
|
source: message
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# Docker Container Logs (if running in Docker)
|
||||||
|
# ============================================
|
||||||
|
- job_name: docker-containers
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- localhost
|
||||||
|
labels:
|
||||||
|
job: docker
|
||||||
|
__path__: /var/lib/docker/containers/*/*-json.log
|
||||||
|
|
||||||
|
pipeline_stages:
|
||||||
|
# Parse Docker JSON format
|
||||||
|
- json:
|
||||||
|
expressions:
|
||||||
|
output: log
|
||||||
|
stream: stream
|
||||||
|
time: time
|
||||||
|
|
||||||
|
# Extract container info from path
|
||||||
|
- regex:
|
||||||
|
source: filename
|
||||||
|
expression: '/var/lib/docker/containers/(?P<container_id>[a-f0-9]+)/.*'
|
||||||
|
|
||||||
|
# Add labels
|
||||||
|
- labels:
|
||||||
|
stream:
|
||||||
|
container_id:
|
||||||
|
|
||||||
|
# Parse application JSON from log field
|
||||||
|
- json:
|
||||||
|
source: output
|
||||||
|
expressions:
|
||||||
|
level: level
|
||||||
|
message: message
|
||||||
|
service: service
|
||||||
|
|
||||||
|
# Add level as label if present
|
||||||
|
- labels:
|
||||||
|
level:
|
||||||
|
service:
|
||||||
|
|
||||||
|
# Output the log message
|
||||||
|
- output:
|
||||||
|
source: output
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# System Logs (optional - for infrastructure monitoring)
|
||||||
|
# ============================================
|
||||||
|
# - job_name: system
|
||||||
|
# static_configs:
|
||||||
|
# - targets:
|
||||||
|
# - localhost
|
||||||
|
# labels:
|
||||||
|
# job: system
|
||||||
|
# __path__: /var/log/syslog
|
||||||
|
|
||||||
68
monitoring/start-monitoring.bat
Normal file
68
monitoring/start-monitoring.bat
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
@echo off
|
||||||
|
echo ============================================================
|
||||||
|
echo RE Workflow Monitoring Stack - Startup Script
|
||||||
|
echo ============================================================
|
||||||
|
echo.
|
||||||
|
|
||||||
|
:: Check if Docker is running
|
||||||
|
docker info >nul 2>&1
|
||||||
|
if errorlevel 1 (
|
||||||
|
echo [ERROR] Docker is not running. Please start Docker Desktop first.
|
||||||
|
pause
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
echo [INFO] Docker is running.
|
||||||
|
echo.
|
||||||
|
|
||||||
|
:: Navigate to monitoring directory
|
||||||
|
cd /d "%~dp0"
|
||||||
|
echo [INFO] Working directory: %cd%
|
||||||
|
echo.
|
||||||
|
|
||||||
|
:: Start monitoring stack
|
||||||
|
echo [INFO] Starting monitoring stack...
|
||||||
|
echo.
|
||||||
|
docker-compose -f docker-compose.monitoring.yml up -d
|
||||||
|
|
||||||
|
if errorlevel 1 (
|
||||||
|
echo.
|
||||||
|
echo [ERROR] Failed to start monitoring stack.
|
||||||
|
pause
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================
|
||||||
|
echo Monitoring Stack Started Successfully!
|
||||||
|
echo ============================================================
|
||||||
|
echo.
|
||||||
|
echo Services:
|
||||||
|
echo ---------------------------------------------------------
|
||||||
|
echo Grafana: http://localhost:3001
|
||||||
|
echo Username: admin
|
||||||
|
echo Password: REWorkflow@2024
|
||||||
|
echo.
|
||||||
|
echo Prometheus: http://localhost:9090
|
||||||
|
echo.
|
||||||
|
echo Loki: http://localhost:3100
|
||||||
|
echo.
|
||||||
|
echo Alertmanager: http://localhost:9093
|
||||||
|
echo ---------------------------------------------------------
|
||||||
|
echo.
|
||||||
|
echo Next Steps:
|
||||||
|
echo 1. Add LOKI_HOST=http://localhost:3100 to your .env file
|
||||||
|
echo 2. Restart your backend application
|
||||||
|
echo 3. Open Grafana at http://localhost:3001
|
||||||
|
echo 4. Navigate to Dashboards ^> RE Workflow
|
||||||
|
echo.
|
||||||
|
echo ============================================================
|
||||||
|
|
||||||
|
:: Show container status
|
||||||
|
echo.
|
||||||
|
echo [INFO] Container Status:
|
||||||
|
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||||
|
echo.
|
||||||
|
|
||||||
|
pause
|
||||||
|
|
||||||
36
monitoring/stop-monitoring.bat
Normal file
36
monitoring/stop-monitoring.bat
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
@echo off
|
||||||
|
echo ============================================================
|
||||||
|
echo RE Workflow Monitoring Stack - Shutdown Script
|
||||||
|
echo ============================================================
|
||||||
|
echo.
|
||||||
|
|
||||||
|
:: Navigate to monitoring directory
|
||||||
|
cd /d "%~dp0"
|
||||||
|
|
||||||
|
echo [INFO] Stopping monitoring stack...
|
||||||
|
echo.
|
||||||
|
|
||||||
|
docker-compose -f docker-compose.monitoring.yml down
|
||||||
|
|
||||||
|
if errorlevel 1 (
|
||||||
|
echo.
|
||||||
|
echo [ERROR] Failed to stop monitoring stack.
|
||||||
|
pause
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================
|
||||||
|
echo Monitoring Stack Stopped Successfully!
|
||||||
|
echo ============================================================
|
||||||
|
echo.
|
||||||
|
echo Note: Data volumes are preserved. Use the following
|
||||||
|
echo command to remove all data:
|
||||||
|
echo.
|
||||||
|
echo docker-compose -f docker-compose.monitoring.yml down -v
|
||||||
|
echo.
|
||||||
|
echo ============================================================
|
||||||
|
echo.
|
||||||
|
|
||||||
|
pause
|
||||||
|
|
||||||
551
package-lock.json
generated
551
package-lock.json
generated
@ -32,11 +32,13 @@
|
|||||||
"passport-jwt": "^4.0.1",
|
"passport-jwt": "^4.0.1",
|
||||||
"pg": "^8.13.1",
|
"pg": "^8.13.1",
|
||||||
"pg-hstore": "^2.3.4",
|
"pg-hstore": "^2.3.4",
|
||||||
|
"prom-client": "^15.1.3",
|
||||||
"sequelize": "^6.37.5",
|
"sequelize": "^6.37.5",
|
||||||
"socket.io": "^4.8.1",
|
"socket.io": "^4.8.1",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-push": "^3.6.7",
|
"web-push": "^3.6.7",
|
||||||
"winston": "^3.17.0",
|
"winston": "^3.17.0",
|
||||||
|
"winston-loki": "^6.1.3",
|
||||||
"zod": "^3.24.1"
|
"zod": "^3.24.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
@ -663,6 +665,37 @@
|
|||||||
"kuler": "^2.0.0"
|
"kuler": "^2.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@emnapi/core": {
|
||||||
|
"version": "1.7.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.7.1.tgz",
|
||||||
|
"integrity": "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@emnapi/wasi-threads": "1.1.0",
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@emnapi/runtime": {
|
||||||
|
"version": "1.7.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.7.1.tgz",
|
||||||
|
"integrity": "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@emnapi/wasi-threads": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@eslint-community/eslint-utils": {
|
"node_modules/@eslint-community/eslint-utils": {
|
||||||
"version": "4.9.0",
|
"version": "4.9.0",
|
||||||
"resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
|
"resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
|
||||||
@ -1616,6 +1649,306 @@
|
|||||||
"win32"
|
"win32"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-android-arm-eabi": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-android-arm-eabi/-/snappy-android-arm-eabi-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-d4vUFFzNBvazGfB/KU8MnEax6itTIgRWXodPdZDnWKHy9HwVBndpCiedQDcSNHcZNYV36rx034rpn7SAuTL2NA==",
|
||||||
|
"cpu": [
|
||||||
|
"arm"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"android"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-android-arm64": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-android-arm64/-/snappy-android-arm64-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-Uh+w18dhzjVl85MGhRnojb7OLlX2ErvMsYIunO/7l3Frvc2zQvfqsWsFJanu2dwqlE2YDooeNP84S+ywgN9sxg==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"android"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-darwin-arm64": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-darwin-arm64/-/snappy-darwin-arm64-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-AmJn+6yOu/0V0YNHLKmRUNYkn93iv/1wtPayC7O1OHtfY6YqHQ31/MVeeRBiEYtQW9TwVZxXrDirxSB1PxRdtw==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-darwin-x64": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-darwin-x64/-/snappy-darwin-x64-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-biLTXBmPjPmO7HIpv+5BaV9Gy/4+QJSUNJW8Pjx1UlWAVnocPy7um+zbvAWStZssTI5sfn/jOClrAegD4w09UA==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-freebsd-x64": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-freebsd-x64/-/snappy-freebsd-x64-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-E3R3ewm8Mrjm0yL2TC3VgnphDsQaCPixNJqBbGiz3NTshVDhlPlOgPKF0NGYqKiKaDGdD9PKtUgOR4vagUtn7g==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"freebsd"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-arm-gnueabihf": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm-gnueabihf/-/snappy-linux-arm-gnueabihf-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-ZuNgtmk9j0KyT7TfLyEnvZJxOhbkyNR761nk04F0Q4NTHMICP28wQj0xgEsnCHUsEeA9OXrRL4R7waiLn+rOQA==",
|
||||||
|
"cpu": [
|
||||||
|
"arm"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-arm64-gnu": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm64-gnu/-/snappy-linux-arm64-gnu-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-KIzwtq0dAzshzpqZWjg0Q9lUx93iZN7wCCUzCdLYIQ+mvJZKM10VCdn0RcuQze1R3UJTPwpPLXQIVskNMBYyPA==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-arm64-musl": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-arm64-musl/-/snappy-linux-arm64-musl-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-AAED4cQS74xPvktsyVmz5sy8vSxG/+3d7Rq2FDBZzj3Fv6v5vux6uZnECPCAqpALCdTtJ61unqpOyqO7hZCt1Q==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-ppc64-gnu": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-ppc64-gnu/-/snappy-linux-ppc64-gnu-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-pofO5eSLg8ZTBwVae4WHHwJxJGZI8NEb4r5Mppvq12J/1/Hq1HecClXmfY3A7bdT2fsS2Td+Q7CI9VdBOj2sbA==",
|
||||||
|
"cpu": [
|
||||||
|
"ppc64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-riscv64-gnu": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-riscv64-gnu/-/snappy-linux-riscv64-gnu-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-OiHYdeuwj0TVBXADUmmQDQ4lL1TB+8EwmXnFgOutoDVXHaUl0CJFyXLa6tYUXe+gRY8hs1v7eb0vyE97LKY06Q==",
|
||||||
|
"cpu": [
|
||||||
|
"riscv64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-s390x-gnu": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-s390x-gnu/-/snappy-linux-s390x-gnu-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-66QdmuV9CTq/S/xifZXlMy3PsZTviAgkqqpZ+7vPCmLtuP+nqhaeupShOFf/sIDsS0gZePazPosPTeTBbhkLHg==",
|
||||||
|
"cpu": [
|
||||||
|
"s390x"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-x64-gnu": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-x64-gnu/-/snappy-linux-x64-gnu-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-g6KURjOxrgb8yXDEZMuIcHkUr/7TKlDwSiydEQtMtP3n4iI4sNjkcE/WNKlR3+t9bZh1pFGAq7NFRBtouQGHpQ==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-linux-x64-musl": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-linux-x64-musl/-/snappy-linux-x64-musl-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-6UvOyczHknpaKjrlKKSlX3rwpOrfJwiMG6qA0NRKJFgbcCAEUxmN9A8JvW4inP46DKdQ0bekdOxwRtAhFiTDfg==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-openharmony-arm64": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-openharmony-arm64/-/snappy-openharmony-arm64-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-I5mak/5rTprobf7wMCk0vFhClmWOL/QiIJM4XontysnadmP/R9hAcmuFmoMV2GaxC9MblqLA7Z++gy8ou5hJVw==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"openharmony"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-wasm32-wasi": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-wasm32-wasi/-/snappy-wasm32-wasi-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-+EroeygVYo9RksOchjF206frhMkfD2PaIun3yH4Zp5j/Y0oIEgs/+VhAYx/f+zHRylQYUIdLzDRclcoepvlR8Q==",
|
||||||
|
"cpu": [
|
||||||
|
"wasm32"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@napi-rs/wasm-runtime": "^1.0.3"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-win32-arm64-msvc": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-arm64-msvc/-/snappy-win32-arm64-msvc-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-rxqfntBsCfzgOha/OlG8ld2hs6YSMGhpMUbFjeQLyVDbooY041fRXv3S7yk52DfO6H4QQhLT5+p7cW0mYdhyiQ==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-win32-ia32-msvc": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-ia32-msvc/-/snappy-win32-ia32-msvc-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-joRV16DsRtqjGt0CdSpxGCkO0UlHGeTZ/GqvdscoALpRKbikR2Top4C61dxEchmOd3lSYsXutuwWWGg3Nr++WA==",
|
||||||
|
"cpu": [
|
||||||
|
"ia32"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/snappy-win32-x64-msvc": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/snappy-win32-x64-msvc/-/snappy-win32-x64-msvc-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-cEnQwcsdJyOU7HSZODWsHpKuQoSYM4jaqw/hn9pOXYbRN1+02WxYppD3fdMuKN6TOA6YG5KA5PHRNeVilNX86Q==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@napi-rs/wasm-runtime": {
|
||||||
|
"version": "1.0.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.0.7.tgz",
|
||||||
|
"integrity": "sha512-SeDnOO0Tk7Okiq6DbXmmBODgOAb9dp9gjlphokTUxmt8U3liIP1ZsozBahH69j/RJv+Rfs6IwUKHTgQYJ/HBAw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@emnapi/core": "^1.5.0",
|
||||||
|
"@emnapi/runtime": "^1.5.0",
|
||||||
|
"@tybys/wasm-util": "^0.10.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@noble/hashes": {
|
"node_modules/@noble/hashes": {
|
||||||
"version": "1.8.0",
|
"version": "1.8.0",
|
||||||
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz",
|
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz",
|
||||||
@ -1674,6 +2007,15 @@
|
|||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/@opentelemetry/api": {
|
||||||
|
"version": "1.9.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
||||||
|
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@paralleldrive/cuid2": {
|
"node_modules/@paralleldrive/cuid2": {
|
||||||
"version": "2.3.1",
|
"version": "2.3.1",
|
||||||
"resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.3.1.tgz",
|
||||||
@ -1695,6 +2037,70 @@
|
|||||||
"node": ">=14"
|
"node": ">=14"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@protobufjs/aspromise": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/base64": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/codegen": {
|
||||||
|
"version": "2.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
|
||||||
|
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/eventemitter": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/fetch": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
|
||||||
|
"license": "BSD-3-Clause",
|
||||||
|
"dependencies": {
|
||||||
|
"@protobufjs/aspromise": "^1.1.1",
|
||||||
|
"@protobufjs/inquire": "^1.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/float": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/inquire": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/path": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/pool": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/utf8": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
|
||||||
|
"license": "BSD-3-Clause"
|
||||||
|
},
|
||||||
"node_modules/@sinclair/typebox": {
|
"node_modules/@sinclair/typebox": {
|
||||||
"version": "0.27.8",
|
"version": "0.27.8",
|
||||||
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
||||||
@ -1775,6 +2181,16 @@
|
|||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/@tybys/wasm-util": {
|
||||||
|
"version": "0.10.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz",
|
||||||
|
"integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@types/babel__core": {
|
"node_modules/@types/babel__core": {
|
||||||
"version": "7.20.5",
|
"version": "7.20.5",
|
||||||
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
||||||
@ -2717,6 +3133,15 @@
|
|||||||
"integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==",
|
"integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/async-exit-hook": {
|
||||||
|
"version": "2.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz",
|
||||||
|
"integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.12.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/async-retry": {
|
"node_modules/async-retry": {
|
||||||
"version": "1.3.3",
|
"version": "1.3.3",
|
||||||
"resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.3.tgz",
|
"resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.3.tgz",
|
||||||
@ -2971,6 +3396,12 @@
|
|||||||
"url": "https://github.com/sponsors/sindresorhus"
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/bintrees": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/bluebird": {
|
"node_modules/bluebird": {
|
||||||
"version": "3.7.2",
|
"version": "3.7.2",
|
||||||
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
|
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
|
||||||
@ -3103,6 +3534,18 @@
|
|||||||
"node-int64": "^0.4.0"
|
"node-int64": "^0.4.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/btoa": {
|
||||||
|
"version": "1.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/btoa/-/btoa-1.2.1.tgz",
|
||||||
|
"integrity": "sha512-SB4/MIGlsiVkMcHmT+pSmIPoNDoHg+7cMzmt3Uxt628MTz2487DKSqK/fuhFBrkuqrYv5UCEnACpF4dTFNKc/g==",
|
||||||
|
"license": "(MIT OR Apache-2.0)",
|
||||||
|
"bin": {
|
||||||
|
"btoa": "bin/btoa.js"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/buffer-equal-constant-time": {
|
"node_modules/buffer-equal-constant-time": {
|
||||||
"version": "1.0.1",
|
"version": "1.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
|
||||||
@ -6688,6 +7131,12 @@
|
|||||||
"node": ">= 12.0.0"
|
"node": ">= 12.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/long": {
|
||||||
|
"version": "5.3.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz",
|
||||||
|
"integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==",
|
||||||
|
"license": "Apache-2.0"
|
||||||
|
},
|
||||||
"node_modules/lru-cache": {
|
"node_modules/lru-cache": {
|
||||||
"version": "5.1.1",
|
"version": "5.1.1",
|
||||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
|
||||||
@ -7876,6 +8325,19 @@
|
|||||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
|
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/prom-client": {
|
||||||
|
"version": "15.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz",
|
||||||
|
"integrity": "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@opentelemetry/api": "^1.4.0",
|
||||||
|
"tdigest": "^0.1.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": "^16 || ^18 || >=20"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/prompts": {
|
"node_modules/prompts": {
|
||||||
"version": "2.4.2",
|
"version": "2.4.2",
|
||||||
"resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
|
"resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
|
||||||
@ -7897,6 +8359,30 @@
|
|||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "ISC"
|
"license": "ISC"
|
||||||
},
|
},
|
||||||
|
"node_modules/protobufjs": {
|
||||||
|
"version": "7.5.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz",
|
||||||
|
"integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==",
|
||||||
|
"hasInstallScript": true,
|
||||||
|
"license": "BSD-3-Clause",
|
||||||
|
"dependencies": {
|
||||||
|
"@protobufjs/aspromise": "^1.1.2",
|
||||||
|
"@protobufjs/base64": "^1.1.2",
|
||||||
|
"@protobufjs/codegen": "^2.0.4",
|
||||||
|
"@protobufjs/eventemitter": "^1.1.0",
|
||||||
|
"@protobufjs/fetch": "^1.1.0",
|
||||||
|
"@protobufjs/float": "^1.0.2",
|
||||||
|
"@protobufjs/inquire": "^1.1.0",
|
||||||
|
"@protobufjs/path": "^1.1.2",
|
||||||
|
"@protobufjs/pool": "^1.1.0",
|
||||||
|
"@protobufjs/utf8": "^1.1.0",
|
||||||
|
"@types/node": ">=13.7.0",
|
||||||
|
"long": "^5.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=12.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/proxy-addr": {
|
"node_modules/proxy-addr": {
|
||||||
"version": "2.0.7",
|
"version": "2.0.7",
|
||||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
||||||
@ -8632,6 +9118,40 @@
|
|||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/snappy": {
|
||||||
|
"version": "7.3.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/snappy/-/snappy-7.3.3.tgz",
|
||||||
|
"integrity": "sha512-UDJVCunvgblRpfTOjo/uT7pQzfrTsSICJ4yVS4aq7SsGBaUSpJwaVP15nF//jqinSLpN7boe/BqbUmtWMTQ5MQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 10"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/Brooooooklyn"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"@napi-rs/snappy-android-arm-eabi": "7.3.3",
|
||||||
|
"@napi-rs/snappy-android-arm64": "7.3.3",
|
||||||
|
"@napi-rs/snappy-darwin-arm64": "7.3.3",
|
||||||
|
"@napi-rs/snappy-darwin-x64": "7.3.3",
|
||||||
|
"@napi-rs/snappy-freebsd-x64": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-arm-gnueabihf": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-arm64-gnu": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-arm64-musl": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-ppc64-gnu": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-riscv64-gnu": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-s390x-gnu": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-x64-gnu": "7.3.3",
|
||||||
|
"@napi-rs/snappy-linux-x64-musl": "7.3.3",
|
||||||
|
"@napi-rs/snappy-openharmony-arm64": "7.3.3",
|
||||||
|
"@napi-rs/snappy-wasm32-wasi": "7.3.3",
|
||||||
|
"@napi-rs/snappy-win32-arm64-msvc": "7.3.3",
|
||||||
|
"@napi-rs/snappy-win32-ia32-msvc": "7.3.3",
|
||||||
|
"@napi-rs/snappy-win32-x64-msvc": "7.3.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/socket.io": {
|
"node_modules/socket.io": {
|
||||||
"version": "4.8.1",
|
"version": "4.8.1",
|
||||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz",
|
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz",
|
||||||
@ -9058,6 +9578,15 @@
|
|||||||
"url": "https://github.com/sponsors/ljharb"
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/tdigest": {
|
||||||
|
"version": "0.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.2.tgz",
|
||||||
|
"integrity": "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"bintrees": "1.0.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/teeny-request": {
|
"node_modules/teeny-request": {
|
||||||
"version": "9.0.0",
|
"version": "9.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/teeny-request/-/teeny-request-9.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/teeny-request/-/teeny-request-9.0.0.tgz",
|
||||||
@ -9675,6 +10204,12 @@
|
|||||||
"punycode": "^2.1.0"
|
"punycode": "^2.1.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/url-polyfill": {
|
||||||
|
"version": "1.1.14",
|
||||||
|
"resolved": "https://registry.npmjs.org/url-polyfill/-/url-polyfill-1.1.14.tgz",
|
||||||
|
"integrity": "sha512-p4f3TTAG6ADVF3mwbXw7hGw+QJyw5CnNGvYh5fCuQQZIiuKUswqcznyV3pGDP9j0TSmC4UvRKm8kl1QsX1diiQ==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/util-deprecate": {
|
"node_modules/util-deprecate": {
|
||||||
"version": "1.0.2",
|
"version": "1.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||||
@ -9822,6 +10357,22 @@
|
|||||||
"node": ">= 12.0.0"
|
"node": ">= 12.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/winston-loki": {
|
||||||
|
"version": "6.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/winston-loki/-/winston-loki-6.1.3.tgz",
|
||||||
|
"integrity": "sha512-DjWtJ230xHyYQWr9mZJa93yhwHttn3JEtSYWP8vXZWJOahiQheUhf+88dSIidbGXB3u0oLweV6G1vkL/ouT62Q==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"async-exit-hook": "2.0.1",
|
||||||
|
"btoa": "^1.2.1",
|
||||||
|
"protobufjs": "^7.2.4",
|
||||||
|
"url-polyfill": "^1.1.12",
|
||||||
|
"winston-transport": "^4.3.0"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"snappy": "^7.2.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/winston-transport": {
|
"node_modules/winston-transport": {
|
||||||
"version": "4.9.0",
|
"version": "4.9.0",
|
||||||
"resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz",
|
"resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz",
|
||||||
|
|||||||
@ -44,11 +44,13 @@
|
|||||||
"passport-jwt": "^4.0.1",
|
"passport-jwt": "^4.0.1",
|
||||||
"pg": "^8.13.1",
|
"pg": "^8.13.1",
|
||||||
"pg-hstore": "^2.3.4",
|
"pg-hstore": "^2.3.4",
|
||||||
|
"prom-client": "^15.1.3",
|
||||||
"sequelize": "^6.37.5",
|
"sequelize": "^6.37.5",
|
||||||
"socket.io": "^4.8.1",
|
"socket.io": "^4.8.1",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-push": "^3.6.7",
|
"web-push": "^3.6.7",
|
||||||
"winston": "^3.17.0",
|
"winston": "^3.17.0",
|
||||||
|
"winston-loki": "^6.1.3",
|
||||||
"zod": "^3.24.1"
|
"zod": "^3.24.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import { UserService } from './services/user.service';
|
|||||||
import { SSOUserData } from './types/auth.types';
|
import { SSOUserData } from './types/auth.types';
|
||||||
import { sequelize } from './config/database';
|
import { sequelize } from './config/database';
|
||||||
import { corsMiddleware } from './middlewares/cors.middleware';
|
import { corsMiddleware } from './middlewares/cors.middleware';
|
||||||
|
import { metricsMiddleware, createMetricsRouter } from './middlewares/metrics.middleware';
|
||||||
import routes from './routes/index';
|
import routes from './routes/index';
|
||||||
import { ensureUploadDir, UPLOAD_DIR } from './config/storage';
|
import { ensureUploadDir, UPLOAD_DIR } from './config/storage';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
@ -101,6 +102,12 @@ app.use(express.urlencoded({ extended: true, limit: '10mb' }));
|
|||||||
// Logging middleware
|
// Logging middleware
|
||||||
app.use(morgan('combined'));
|
app.use(morgan('combined'));
|
||||||
|
|
||||||
|
// Prometheus metrics middleware - collect request metrics
|
||||||
|
app.use(metricsMiddleware);
|
||||||
|
|
||||||
|
// Prometheus metrics endpoint - expose metrics for scraping
|
||||||
|
app.use(createMetricsRouter());
|
||||||
|
|
||||||
// Health check endpoint (before API routes)
|
// Health check endpoint (before API routes)
|
||||||
app.get('/health', (_req: express.Request, res: express.Response) => {
|
app.get('/health', (_req: express.Request, res: express.Response) => {
|
||||||
res.status(200).json({
|
res.status(200).json({
|
||||||
|
|||||||
@ -348,16 +348,18 @@ export class ConclusionController {
|
|||||||
logger.info(`[Conclusion] ✅ Request ${requestId} finalized and closed`);
|
logger.info(`[Conclusion] ✅ Request ${requestId} finalized and closed`);
|
||||||
|
|
||||||
// Automatically create summary when request is closed (idempotent - returns existing if already exists)
|
// Automatically create summary when request is closed (idempotent - returns existing if already exists)
|
||||||
|
// Since the initiator is finalizing, this should always succeed
|
||||||
let summaryId = null;
|
let summaryId = null;
|
||||||
try {
|
try {
|
||||||
const { summaryService } = await import('@services/summary.service');
|
const { summaryService } = await import('@services/summary.service');
|
||||||
const summary = await summaryService.createSummary(requestId, userId);
|
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||||
|
const summary = await summaryService.createSummary(requestId, userId, { userRole });
|
||||||
summaryId = (summary as any).summaryId;
|
summaryId = (summary as any).summaryId;
|
||||||
logger.info(`[Conclusion] ✅ Summary ${summaryId} created automatically for closed request ${requestId}`);
|
logger.info(`[Conclusion] ✅ Summary ${summaryId} created automatically for closed request ${requestId}`);
|
||||||
} catch (summaryError: any) {
|
} catch (summaryError: any) {
|
||||||
// Log error but don't fail the closure if summary creation fails
|
// Log error but don't fail the closure if summary creation fails
|
||||||
// Frontend can retry summary creation if needed
|
// Frontend can retry summary creation if needed
|
||||||
logger.error(`[Conclusion] Failed to create summary for request ${requestId}:`, summaryError);
|
logger.error(`[Conclusion] Failed to create summary for request ${requestId}:`, summaryError.message);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log activity
|
// Log activity
|
||||||
|
|||||||
@ -219,8 +219,10 @@ export class DashboardController {
|
|||||||
const endDate = req.query.endDate as string | undefined;
|
const endDate = req.query.endDate as string | undefined;
|
||||||
const page = Number(req.query.page || 1);
|
const page = Number(req.query.page || 1);
|
||||||
const limit = Number(req.query.limit || 10);
|
const limit = Number(req.query.limit || 10);
|
||||||
|
const priority = req.query.priority as string | undefined;
|
||||||
|
const slaCompliance = req.query.slaCompliance as string | undefined;
|
||||||
|
|
||||||
const result = await this.dashboardService.getApproverPerformance(userId, dateRange, page, limit, startDate, endDate);
|
const result = await this.dashboardService.getApproverPerformance(userId, dateRange, page, limit, startDate, endDate, priority, slaCompliance);
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
@ -540,6 +542,50 @@ export class DashboardController {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get single approver stats only (separate API for performance)
|
||||||
|
*/
|
||||||
|
async getSingleApproverStats(req: Request, res: Response): Promise<void> {
|
||||||
|
try {
|
||||||
|
const userId = (req as any).user?.userId;
|
||||||
|
const approverId = req.query.approverId as string;
|
||||||
|
const dateRange = req.query.dateRange as string | undefined;
|
||||||
|
const startDate = req.query.startDate as string | undefined;
|
||||||
|
const endDate = req.query.endDate as string | undefined;
|
||||||
|
const priority = req.query.priority as string | undefined;
|
||||||
|
const slaCompliance = req.query.slaCompliance as string | undefined;
|
||||||
|
|
||||||
|
if (!approverId) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Approver ID is required'
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const stats = await this.dashboardService.getSingleApproverStats(
|
||||||
|
userId,
|
||||||
|
approverId,
|
||||||
|
dateRange,
|
||||||
|
startDate,
|
||||||
|
endDate,
|
||||||
|
priority,
|
||||||
|
slaCompliance
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: stats
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('[Dashboard] Error fetching single approver stats:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Failed to fetch approver stats'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get requests filtered by approver ID for detailed performance analysis
|
* Get requests filtered by approver ID for detailed performance analysis
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import { activityService } from '@services/activity.service';
|
|||||||
import type { AuthenticatedRequest } from '../types/express';
|
import type { AuthenticatedRequest } from '../types/express';
|
||||||
import { getRequestMetadata } from '@utils/requestUtils';
|
import { getRequestMetadata } from '@utils/requestUtils';
|
||||||
import { getConfigNumber, getConfigValue } from '@services/configReader.service';
|
import { getConfigNumber, getConfigValue } from '@services/configReader.service';
|
||||||
|
import { logDocumentEvent, logWithContext } from '@utils/logger';
|
||||||
|
|
||||||
export class DocumentController {
|
export class DocumentController {
|
||||||
async upload(req: AuthenticatedRequest, res: Response): Promise<void> {
|
async upload(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||||
@ -82,6 +83,16 @@ export class DocumentController {
|
|||||||
downloadCount: 0,
|
downloadCount: 0,
|
||||||
} as any);
|
} as any);
|
||||||
|
|
||||||
|
// Log document upload event
|
||||||
|
logDocumentEvent('uploaded', doc.documentId, {
|
||||||
|
requestId,
|
||||||
|
userId,
|
||||||
|
fileName: file.originalname,
|
||||||
|
fileType: extension,
|
||||||
|
fileSize: file.size,
|
||||||
|
category,
|
||||||
|
});
|
||||||
|
|
||||||
// Get user details for activity logging
|
// Get user details for activity logging
|
||||||
const user = await User.findByPk(userId);
|
const user = await User.findByPk(userId);
|
||||||
const uploaderName = (user as any)?.displayName || (user as any)?.email || 'User';
|
const uploaderName = (user as any)?.displayName || (user as any)?.email || 'User';
|
||||||
@ -108,6 +119,11 @@ export class DocumentController {
|
|||||||
ResponseHandler.success(res, doc, 'File uploaded', 201);
|
ResponseHandler.success(res, doc, 'File uploaded', 201);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
logWithContext('error', 'Document upload failed', {
|
||||||
|
userId: req.user?.userId,
|
||||||
|
requestId: req.body?.requestId,
|
||||||
|
error,
|
||||||
|
});
|
||||||
ResponseHandler.error(res, 'Upload failed', 500, message);
|
ResponseHandler.error(res, 'Upload failed', 500, message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import { Request, Response } from 'express';
|
|||||||
import { Notification } from '@models/Notification';
|
import { Notification } from '@models/Notification';
|
||||||
import { Op } from 'sequelize';
|
import { Op } from 'sequelize';
|
||||||
import logger from '@utils/logger';
|
import logger from '@utils/logger';
|
||||||
|
import { notificationService } from '@services/notification.service';
|
||||||
|
|
||||||
export class NotificationController {
|
export class NotificationController {
|
||||||
/**
|
/**
|
||||||
@ -172,5 +173,32 @@ export class NotificationController {
|
|||||||
res.status(500).json({ success: false, message: error.message });
|
res.status(500).json({ success: false, message: error.message });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get user's push notification subscriptions
|
||||||
|
*/
|
||||||
|
async getUserSubscriptions(req: Request, res: Response): Promise<void> {
|
||||||
|
try {
|
||||||
|
const userId = (req as any).user?.userId;
|
||||||
|
|
||||||
|
if (!userId) {
|
||||||
|
res.status(401).json({ success: false, message: 'Unauthorized' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const subscriptions = await notificationService.getUserSubscriptions(userId);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
subscriptions,
|
||||||
|
count: subscriptions.length
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
logger.error('[Notification Controller] Error fetching subscriptions:', error);
|
||||||
|
res.status(500).json({ success: false, message: error.message });
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -12,7 +12,7 @@ const pauseWorkflowSchema = z.object({
|
|||||||
});
|
});
|
||||||
|
|
||||||
const resumeWorkflowSchema = z.object({
|
const resumeWorkflowSchema = z.object({
|
||||||
// No body required for resume
|
notes: z.string().max(1000, 'Notes must be less than 1000 characters').optional()
|
||||||
});
|
});
|
||||||
|
|
||||||
export class PauseController {
|
export class PauseController {
|
||||||
@ -72,13 +72,20 @@ export class PauseController {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = await pauseService.resumeWorkflow(id, userId);
|
// Validate request body (notes is optional)
|
||||||
|
const validated = resumeWorkflowSchema.parse(req.body || {});
|
||||||
|
|
||||||
|
const result = await pauseService.resumeWorkflow(id, userId, validated.notes);
|
||||||
|
|
||||||
ResponseHandler.success(res, {
|
ResponseHandler.success(res, {
|
||||||
workflow: result.workflow,
|
workflow: result.workflow,
|
||||||
level: result.level
|
level: result.level
|
||||||
}, 'Workflow resumed successfully', 200);
|
}, 'Workflow resumed successfully', 200);
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
|
if (error instanceof z.ZodError) {
|
||||||
|
ResponseHandler.error(res, 'Validation failed', 400, error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join('; '));
|
||||||
|
return;
|
||||||
|
}
|
||||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
ResponseHandler.error(res, 'Failed to resume workflow', 400, errorMessage);
|
ResponseHandler.error(res, 'Failed to resume workflow', 400, errorMessage);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,30 +2,69 @@ import { Request, Response } from 'express';
|
|||||||
import { summaryService } from '@services/summary.service';
|
import { summaryService } from '@services/summary.service';
|
||||||
import { ResponseHandler } from '@utils/responseHandler';
|
import { ResponseHandler } from '@utils/responseHandler';
|
||||||
import type { AuthenticatedRequest } from '../types/express';
|
import type { AuthenticatedRequest } from '../types/express';
|
||||||
|
import logger from '@utils/logger';
|
||||||
|
|
||||||
export class SummaryController {
|
export class SummaryController {
|
||||||
/**
|
/**
|
||||||
* Create a summary for a closed request
|
* Create a summary for a closed request
|
||||||
* POST /api/v1/summaries
|
* POST /api/v1/summaries
|
||||||
|
*
|
||||||
|
* Access: Initiator or Admin/Management users
|
||||||
|
* Body: { requestId: string, regenerate?: boolean }
|
||||||
*/
|
*/
|
||||||
async createSummary(req: AuthenticatedRequest, res: Response): Promise<void> {
|
async createSummary(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||||
try {
|
try {
|
||||||
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||||
const { requestId } = req.body;
|
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||||
|
const { requestId, regenerate } = req.body;
|
||||||
|
|
||||||
if (!requestId) {
|
if (!requestId) {
|
||||||
ResponseHandler.error(res, 'requestId is required', 400);
|
ResponseHandler.error(res, 'requestId is required', 400);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const summary = await summaryService.createSummary(requestId, userId);
|
const summary = await summaryService.createSummary(requestId, userId, {
|
||||||
ResponseHandler.success(res, summary, 'Summary created successfully', 201);
|
userRole,
|
||||||
|
regenerate: regenerate === true
|
||||||
|
});
|
||||||
|
|
||||||
|
const message = regenerate ? 'Summary regenerated successfully' : 'Summary created successfully';
|
||||||
|
ResponseHandler.success(res, summary, message, 201);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
ResponseHandler.error(res, 'Failed to create summary', 400, errorMessage);
|
ResponseHandler.error(res, 'Failed to create summary', 400, errorMessage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regenerate summary for a closed request (deletes existing and creates new)
|
||||||
|
* POST /api/v1/summaries/:requestId/regenerate
|
||||||
|
*
|
||||||
|
* Access: Initiator or Admin/Management users
|
||||||
|
*/
|
||||||
|
async regenerateSummary(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||||
|
try {
|
||||||
|
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||||
|
const userRole = (req as any).user?.role || (req as any).auth?.role;
|
||||||
|
const { requestId } = req.params;
|
||||||
|
|
||||||
|
if (!requestId) {
|
||||||
|
ResponseHandler.error(res, 'requestId is required', 400);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const summary = await summaryService.createSummary(requestId, userId, {
|
||||||
|
userRole,
|
||||||
|
regenerate: true
|
||||||
|
});
|
||||||
|
|
||||||
|
ResponseHandler.success(res, summary, 'Summary regenerated successfully', 201);
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
ResponseHandler.error(res, 'Failed to regenerate summary', 400, errorMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get summary details
|
* Get summary details
|
||||||
* GET /api/v1/summaries/:summaryId
|
* GET /api/v1/summaries/:summaryId
|
||||||
@ -35,21 +74,27 @@ export class SummaryController {
|
|||||||
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
const userId = (req as any).user?.userId || (req as any).user?.id || (req as any).auth?.userId;
|
||||||
const { summaryId } = req.params;
|
const { summaryId } = req.params;
|
||||||
|
|
||||||
// Check if this is a sharedSummaryId (UUID format) - if it starts with a shared summary pattern, try that first
|
// The ID can be either a sharedSummaryId or a summaryId
|
||||||
// For now, we'll check if it's a shared summary by trying to get it
|
// Try shared summary first (for SharedSummaryDetail component)
|
||||||
// If it fails, fall back to regular summary lookup
|
// If not found, try regular summary (for SummaryTab component)
|
||||||
try {
|
try {
|
||||||
const summary = await summaryService.getSummaryDetailsBySharedId(summaryId, userId);
|
const summary = await summaryService.getSummaryDetailsBySharedId(summaryId, userId);
|
||||||
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
||||||
return;
|
return;
|
||||||
} catch (sharedError) {
|
} catch (sharedError: any) {
|
||||||
// If it's not a shared summary, try regular summary lookup
|
// Only log error if it's not "not found" (other errors are real issues)
|
||||||
|
if (!sharedError.message?.includes('not found')) {
|
||||||
|
logger.error('[Summary] Error getting summary by shared ID:', sharedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try as regular summaryId (the service checks for both initiator and shared access)
|
||||||
const summary = await summaryService.getSummaryDetails(summaryId, userId);
|
const summary = await summaryService.getSummaryDetails(summaryId, userId);
|
||||||
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
ResponseHandler.success(res, summary, 'Summary retrieved successfully');
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
const statusCode = errorMessage.includes('not found') || errorMessage.includes('Access denied') ? 404 : 500;
|
const statusCode = errorMessage.includes('not found') ? 404 :
|
||||||
|
errorMessage.includes('Access denied') ? 403 : 500;
|
||||||
ResponseHandler.error(res, 'Failed to get summary details', statusCode, errorMessage);
|
ResponseHandler.error(res, 'Failed to get summary details', statusCode, errorMessage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
113
src/controllers/userPreference.controller.ts
Normal file
113
src/controllers/userPreference.controller.ts
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
import { Request, Response } from 'express';
|
||||||
|
import { User } from '@models/User';
|
||||||
|
import { updateNotificationPreferencesSchema } from '@validators/userPreference.validator';
|
||||||
|
import logger from '@utils/logger';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current user's notification preferences
|
||||||
|
*/
|
||||||
|
export const getNotificationPreferences = async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const userId = req.user!.userId;
|
||||||
|
|
||||||
|
const user = await User.findByPk(userId, {
|
||||||
|
attributes: [
|
||||||
|
'userId',
|
||||||
|
'emailNotificationsEnabled',
|
||||||
|
'pushNotificationsEnabled',
|
||||||
|
'inAppNotificationsEnabled'
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
res.status(404).json({
|
||||||
|
success: false,
|
||||||
|
message: 'User not found'
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`[UserPreference] Retrieved notification preferences for user ${userId}`);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
emailNotificationsEnabled: user.emailNotificationsEnabled,
|
||||||
|
pushNotificationsEnabled: user.pushNotificationsEnabled,
|
||||||
|
inAppNotificationsEnabled: user.inAppNotificationsEnabled
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
logger.error('[UserPreference] Failed to get notification preferences:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
message: 'Failed to retrieve notification preferences',
|
||||||
|
error: error.message
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update current user's notification preferences
|
||||||
|
*/
|
||||||
|
export const updateNotificationPreferences = async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const userId = req.user!.userId;
|
||||||
|
|
||||||
|
// Validate request body
|
||||||
|
const validated = updateNotificationPreferencesSchema.parse(req.body);
|
||||||
|
|
||||||
|
const user = await User.findByPk(userId);
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
res.status(404).json({
|
||||||
|
success: false,
|
||||||
|
message: 'User not found'
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update only provided fields
|
||||||
|
const updateData: any = {};
|
||||||
|
if (validated.emailNotificationsEnabled !== undefined) {
|
||||||
|
updateData.emailNotificationsEnabled = validated.emailNotificationsEnabled;
|
||||||
|
}
|
||||||
|
if (validated.pushNotificationsEnabled !== undefined) {
|
||||||
|
updateData.pushNotificationsEnabled = validated.pushNotificationsEnabled;
|
||||||
|
}
|
||||||
|
if (validated.inAppNotificationsEnabled !== undefined) {
|
||||||
|
updateData.inAppNotificationsEnabled = validated.inAppNotificationsEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
await user.update(updateData);
|
||||||
|
|
||||||
|
logger.info(`[UserPreference] Updated notification preferences for user ${userId}:`, updateData);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Notification preferences updated successfully',
|
||||||
|
data: {
|
||||||
|
emailNotificationsEnabled: user.emailNotificationsEnabled,
|
||||||
|
pushNotificationsEnabled: user.pushNotificationsEnabled,
|
||||||
|
inAppNotificationsEnabled: user.inAppNotificationsEnabled
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.name === 'ZodError') {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
message: 'Validation failed',
|
||||||
|
error: error.errors.map((e: any) => e.message).join(', ')
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.error('[UserPreference] Failed to update notification preferences:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
message: 'Failed to update notification preferences',
|
||||||
|
error: error.message
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
@ -11,6 +11,8 @@ import fs from 'fs';
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import crypto from 'crypto';
|
import crypto from 'crypto';
|
||||||
import { getRequestMetadata } from '@utils/requestUtils';
|
import { getRequestMetadata } from '@utils/requestUtils';
|
||||||
|
import { enrichApprovalLevels, enrichSpectators, validateInitiator } from '@services/userEnrichment.service';
|
||||||
|
import logger from '@utils/logger';
|
||||||
|
|
||||||
const workflowService = new WorkflowService();
|
const workflowService = new WorkflowService();
|
||||||
|
|
||||||
@ -18,11 +20,88 @@ export class WorkflowController {
|
|||||||
async createWorkflow(req: AuthenticatedRequest, res: Response): Promise<void> {
|
async createWorkflow(req: AuthenticatedRequest, res: Response): Promise<void> {
|
||||||
try {
|
try {
|
||||||
const validatedData = validateCreateWorkflow(req.body);
|
const validatedData = validateCreateWorkflow(req.body);
|
||||||
|
|
||||||
|
// Validate initiator exists
|
||||||
|
await validateInitiator(req.user.userId);
|
||||||
|
|
||||||
|
// Handle frontend format: map 'approvers' -> 'approvalLevels' for backward compatibility
|
||||||
|
let approvalLevels = validatedData.approvalLevels || [];
|
||||||
|
if (!approvalLevels.length && (req.body as any).approvers) {
|
||||||
|
const approvers = (req.body as any).approvers || [];
|
||||||
|
approvalLevels = approvers.map((a: any, index: number) => ({
|
||||||
|
levelNumber: index + 1,
|
||||||
|
email: a.email || a.approverEmail,
|
||||||
|
tatHours: a.tatType === 'days' ? (a.tat || 0) * 24 : (a.tat || a.tatHours || 24),
|
||||||
|
isFinalApprover: index === approvers.length - 1,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize approval levels: map approverEmail -> email for backward compatibility
|
||||||
|
const normalizedApprovalLevels = approvalLevels.map((level: any) => ({
|
||||||
|
...level,
|
||||||
|
email: level.email || level.approverEmail, // Support both formats
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Enrich approval levels with user data (auto-lookup from AD if not in DB)
|
||||||
|
logger.info(`[WorkflowController] Enriching ${normalizedApprovalLevels.length} approval levels`);
|
||||||
|
const enrichedApprovalLevels = await enrichApprovalLevels(normalizedApprovalLevels as any);
|
||||||
|
|
||||||
|
// Enrich spectators if provided
|
||||||
|
// Normalize spectators: map userEmail -> email for backward compatibility
|
||||||
|
// Filter participants to only include SPECTATOR type (exclude INITIATOR and APPROVER)
|
||||||
|
const allParticipants = validatedData.spectators || validatedData.participants || [];
|
||||||
|
const spectators = allParticipants.filter((p: any) =>
|
||||||
|
!p.participantType || p.participantType === 'SPECTATOR'
|
||||||
|
);
|
||||||
|
const normalizedSpectators = spectators.map((spec: any) => ({
|
||||||
|
...spec,
|
||||||
|
email: spec.email || spec.userEmail, // Support both formats
|
||||||
|
})).filter((spec: any) => spec.email); // Only include entries with email
|
||||||
|
const enrichedSpectators = normalizedSpectators.length > 0
|
||||||
|
? await enrichSpectators(normalizedSpectators as any)
|
||||||
|
: [];
|
||||||
|
|
||||||
|
// Build complete participants array automatically
|
||||||
|
// This includes: INITIATOR + all APPROVERs + all SPECTATORs
|
||||||
|
const initiator = await User.findByPk(req.user.userId);
|
||||||
|
const initiatorEmail = (initiator as any).email;
|
||||||
|
const initiatorName = (initiator as any).displayName || (initiator as any).email;
|
||||||
|
|
||||||
|
const autoGeneratedParticipants = [
|
||||||
|
// Add initiator
|
||||||
|
{
|
||||||
|
userId: req.user.userId,
|
||||||
|
userEmail: initiatorEmail,
|
||||||
|
userName: initiatorName,
|
||||||
|
participantType: 'INITIATOR' as const,
|
||||||
|
canComment: true,
|
||||||
|
canViewDocuments: true,
|
||||||
|
canDownloadDocuments: true,
|
||||||
|
notificationEnabled: true,
|
||||||
|
},
|
||||||
|
// Add all approvers from approval levels
|
||||||
|
...enrichedApprovalLevels.map((level: any) => ({
|
||||||
|
userId: level.approverId,
|
||||||
|
userEmail: level.approverEmail,
|
||||||
|
userName: level.approverName,
|
||||||
|
participantType: 'APPROVER' as const,
|
||||||
|
canComment: true,
|
||||||
|
canViewDocuments: true,
|
||||||
|
canDownloadDocuments: true,
|
||||||
|
notificationEnabled: true,
|
||||||
|
})),
|
||||||
|
// Add all spectators
|
||||||
|
...enrichedSpectators,
|
||||||
|
];
|
||||||
|
|
||||||
// Convert string literal priority to enum
|
// Convert string literal priority to enum
|
||||||
const workflowData = {
|
const workflowData = {
|
||||||
...validatedData,
|
...validatedData,
|
||||||
priority: validatedData.priority as Priority
|
priority: validatedData.priority as Priority,
|
||||||
|
approvalLevels: enrichedApprovalLevels,
|
||||||
|
participants: autoGeneratedParticipants,
|
||||||
};
|
};
|
||||||
|
|
||||||
const requestMeta = getRequestMetadata(req);
|
const requestMeta = getRequestMetadata(req);
|
||||||
const workflow = await workflowService.createWorkflow(req.user.userId, workflowData, {
|
const workflow = await workflowService.createWorkflow(req.user.userId, workflowData, {
|
||||||
ipAddress: requestMeta.ipAddress,
|
ipAddress: requestMeta.ipAddress,
|
||||||
@ -32,6 +111,7 @@ export class WorkflowController {
|
|||||||
ResponseHandler.success(res, workflow, 'Workflow created successfully', 201);
|
ResponseHandler.success(res, workflow, 'Workflow created successfully', 201);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
logger.error('[WorkflowController] Failed to create workflow:', error);
|
||||||
ResponseHandler.error(res, 'Failed to create workflow', 400, errorMessage);
|
ResponseHandler.error(res, 'Failed to create workflow', 400, errorMessage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -59,6 +139,18 @@ export class WorkflowController {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Transform frontend format to backend format BEFORE validation
|
||||||
|
// Map 'approvers' -> 'approvalLevels' for backward compatibility
|
||||||
|
if (!parsed.approvalLevels && parsed.approvers) {
|
||||||
|
const approvers = parsed.approvers || [];
|
||||||
|
parsed.approvalLevels = approvers.map((a: any, index: number) => ({
|
||||||
|
levelNumber: index + 1,
|
||||||
|
email: a.email || a.approverEmail,
|
||||||
|
tatHours: a.tatType === 'days' ? (a.tat || 0) * 24 : (a.tat || a.tatHours || 24),
|
||||||
|
isFinalApprover: index === approvers.length - 1,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
let validated;
|
let validated;
|
||||||
try {
|
try {
|
||||||
validated = validateCreateWorkflow(parsed);
|
validated = validateCreateWorkflow(parsed);
|
||||||
@ -67,11 +159,81 @@ export class WorkflowController {
|
|||||||
const errorMessage = validationError?.errors
|
const errorMessage = validationError?.errors
|
||||||
? validationError.errors.map((e: any) => `${e.path.join('.')}: ${e.message}`).join('; ')
|
? validationError.errors.map((e: any) => `${e.path.join('.')}: ${e.message}`).join('; ')
|
||||||
: (validationError instanceof Error ? validationError.message : 'Validation failed');
|
: (validationError instanceof Error ? validationError.message : 'Validation failed');
|
||||||
|
logger.error(`[WorkflowController] Validation failed:`, errorMessage);
|
||||||
ResponseHandler.error(res, 'Validation failed', 400, errorMessage);
|
ResponseHandler.error(res, 'Validation failed', 400, errorMessage);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const workflowData = { ...validated, priority: validated.priority as Priority } as any;
|
// Validate initiator exists
|
||||||
|
await validateInitiator(userId);
|
||||||
|
|
||||||
|
// Use the approval levels from validation (already transformed above)
|
||||||
|
let approvalLevels = validated.approvalLevels || [];
|
||||||
|
|
||||||
|
// Normalize approval levels: map approverEmail -> email for backward compatibility
|
||||||
|
const normalizedApprovalLevels = approvalLevels.map((level: any) => ({
|
||||||
|
...level,
|
||||||
|
email: level.email || level.approverEmail, // Support both formats
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Enrich approval levels with user data (auto-lookup from AD if not in DB)
|
||||||
|
logger.info(`[WorkflowController] Enriching ${normalizedApprovalLevels.length} approval levels`);
|
||||||
|
const enrichedApprovalLevels = await enrichApprovalLevels(normalizedApprovalLevels as any);
|
||||||
|
|
||||||
|
// Enrich spectators if provided
|
||||||
|
// Normalize spectators: map userEmail -> email for backward compatibility
|
||||||
|
// Filter participants to only include SPECTATOR type (exclude INITIATOR and APPROVER)
|
||||||
|
const allParticipants = validated.spectators || validated.participants || [];
|
||||||
|
const spectators = allParticipants.filter((p: any) =>
|
||||||
|
!p.participantType || p.participantType === 'SPECTATOR'
|
||||||
|
);
|
||||||
|
const normalizedSpectators = spectators.map((spec: any) => ({
|
||||||
|
...spec,
|
||||||
|
email: spec.email || spec.userEmail, // Support both formats
|
||||||
|
})).filter((spec: any) => spec.email); // Only include entries with email
|
||||||
|
const enrichedSpectators = normalizedSpectators.length > 0
|
||||||
|
? await enrichSpectators(normalizedSpectators as any)
|
||||||
|
: [];
|
||||||
|
|
||||||
|
// Build complete participants array automatically
|
||||||
|
// This includes: INITIATOR + all APPROVERs + all SPECTATORs
|
||||||
|
const initiator = await User.findByPk(userId);
|
||||||
|
const initiatorEmail = (initiator as any).email;
|
||||||
|
const initiatorName = (initiator as any).displayName || (initiator as any).email;
|
||||||
|
|
||||||
|
const autoGeneratedParticipants = [
|
||||||
|
// Add initiator
|
||||||
|
{
|
||||||
|
userId: userId,
|
||||||
|
userEmail: initiatorEmail,
|
||||||
|
userName: initiatorName,
|
||||||
|
participantType: 'INITIATOR' as const,
|
||||||
|
canComment: true,
|
||||||
|
canViewDocuments: true,
|
||||||
|
canDownloadDocuments: true,
|
||||||
|
notificationEnabled: true,
|
||||||
|
},
|
||||||
|
// Add all approvers from approval levels
|
||||||
|
...enrichedApprovalLevels.map((level: any) => ({
|
||||||
|
userId: level.approverId,
|
||||||
|
userEmail: level.approverEmail,
|
||||||
|
userName: level.approverName,
|
||||||
|
participantType: 'APPROVER' as const,
|
||||||
|
canComment: true,
|
||||||
|
canViewDocuments: true,
|
||||||
|
canDownloadDocuments: true,
|
||||||
|
notificationEnabled: true,
|
||||||
|
})),
|
||||||
|
// Add all spectators
|
||||||
|
...enrichedSpectators,
|
||||||
|
];
|
||||||
|
|
||||||
|
const workflowData = {
|
||||||
|
...validated,
|
||||||
|
priority: validated.priority as Priority,
|
||||||
|
approvalLevels: enrichedApprovalLevels,
|
||||||
|
participants: autoGeneratedParticipants,
|
||||||
|
} as any;
|
||||||
|
|
||||||
const requestMeta = getRequestMetadata(req);
|
const requestMeta = getRequestMetadata(req);
|
||||||
const workflow = await workflowService.createWorkflow(userId, workflowData, {
|
const workflow = await workflowService.createWorkflow(userId, workflowData, {
|
||||||
|
|||||||
291
src/middlewares/metrics.middleware.ts
Normal file
291
src/middlewares/metrics.middleware.ts
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
/**
|
||||||
|
* Prometheus Metrics Middleware
|
||||||
|
* Exposes application metrics for monitoring with Prometheus/Grafana
|
||||||
|
*
|
||||||
|
* Metrics exposed:
|
||||||
|
* - http_requests_total: Total number of HTTP requests
|
||||||
|
* - http_request_duration_seconds: HTTP request latency histogram
|
||||||
|
* - http_request_errors_total: Total number of HTTP errors
|
||||||
|
* - nodejs_*: Node.js runtime metrics (memory, event loop, etc.)
|
||||||
|
* - Custom business metrics (TAT breaches, workflow counts, etc.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Request, Response, NextFunction, Router } from 'express';
|
||||||
|
import client from 'prom-client';
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// REGISTRY SETUP
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Create a custom registry to avoid conflicts with default metrics
|
||||||
|
const register = new client.Registry();
|
||||||
|
|
||||||
|
// Add default Node.js metrics (memory, CPU, event loop, GC, etc.)
|
||||||
|
client.collectDefaultMetrics({
|
||||||
|
register,
|
||||||
|
prefix: 'nodejs_',
|
||||||
|
labels: { app: 're-workflow', service: 'backend' },
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// HTTP METRICS
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Total HTTP requests counter
|
||||||
|
const httpRequestsTotal = new client.Counter({
|
||||||
|
name: 'http_requests_total',
|
||||||
|
help: 'Total number of HTTP requests',
|
||||||
|
labelNames: ['method', 'route', 'status_code'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// HTTP request duration histogram
|
||||||
|
const httpRequestDuration = new client.Histogram({
|
||||||
|
name: 'http_request_duration_seconds',
|
||||||
|
help: 'HTTP request latency in seconds',
|
||||||
|
labelNames: ['method', 'route', 'status_code'],
|
||||||
|
buckets: [0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// HTTP errors counter
|
||||||
|
const httpRequestErrors = new client.Counter({
|
||||||
|
name: 'http_request_errors_total',
|
||||||
|
help: 'Total number of HTTP errors (4xx and 5xx)',
|
||||||
|
labelNames: ['method', 'route', 'status_code', 'error_type'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Active HTTP connections gauge
|
||||||
|
const activeConnections = new client.Gauge({
|
||||||
|
name: 'http_active_connections',
|
||||||
|
help: 'Number of active HTTP connections',
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// BUSINESS METRICS
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// TAT breaches counter
|
||||||
|
export const tatBreachesTotal = new client.Counter({
|
||||||
|
name: 'tat_breaches_total',
|
||||||
|
help: 'Total number of TAT breaches',
|
||||||
|
labelNames: ['department', 'workflow_type', 'breach_level'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pending workflows gauge
|
||||||
|
export const pendingWorkflowsCount = new client.Gauge({
|
||||||
|
name: 'pending_workflows_count',
|
||||||
|
help: 'Current number of pending workflows',
|
||||||
|
labelNames: ['department', 'status'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Workflow operations counter
|
||||||
|
export const workflowOperationsTotal = new client.Counter({
|
||||||
|
name: 'workflow_operations_total',
|
||||||
|
help: 'Total number of workflow operations',
|
||||||
|
labelNames: ['operation', 'status'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// User authentication events counter
|
||||||
|
export const authEventsTotal = new client.Counter({
|
||||||
|
name: 'auth_events_total',
|
||||||
|
help: 'Total number of authentication events',
|
||||||
|
labelNames: ['event_type', 'success'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Database query duration histogram
|
||||||
|
export const dbQueryDuration = new client.Histogram({
|
||||||
|
name: 'db_query_duration_seconds',
|
||||||
|
help: 'Database query latency in seconds',
|
||||||
|
labelNames: ['operation', 'table'],
|
||||||
|
buckets: [0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Redis operations counter
|
||||||
|
export const redisOperationsTotal = new client.Counter({
|
||||||
|
name: 'redis_operations_total',
|
||||||
|
help: 'Total number of Redis operations',
|
||||||
|
labelNames: ['operation', 'status'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// AI service calls
|
||||||
|
export const aiServiceCalls = new client.Counter({
|
||||||
|
name: 'ai_service_calls_total',
|
||||||
|
help: 'Total number of AI service calls',
|
||||||
|
labelNames: ['provider', 'operation', 'status'],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
export const aiServiceDuration = new client.Histogram({
|
||||||
|
name: 'ai_service_duration_seconds',
|
||||||
|
help: 'AI service call latency in seconds',
|
||||||
|
labelNames: ['provider', 'operation'],
|
||||||
|
buckets: [0.5, 1, 2, 5, 10, 30, 60],
|
||||||
|
registers: [register],
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// MIDDLEWARE
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize route path for metrics labels
|
||||||
|
* Replaces dynamic segments like UUIDs and IDs with placeholders
|
||||||
|
*/
|
||||||
|
function normalizeRoutePath(path: string): string {
|
||||||
|
return path
|
||||||
|
// Replace UUIDs
|
||||||
|
.replace(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/gi, ':id')
|
||||||
|
// Replace numeric IDs
|
||||||
|
.replace(/\/\d+/g, '/:id')
|
||||||
|
// Replace request IDs (REQ-XXXX-XXX format)
|
||||||
|
.replace(/REQ-\d+-\d+/gi, ':requestId')
|
||||||
|
// Clean up multiple slashes
|
||||||
|
.replace(/\/+/g, '/');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* HTTP metrics middleware
|
||||||
|
* Tracks request counts, durations, and errors
|
||||||
|
*/
|
||||||
|
export function metricsMiddleware(req: Request, res: Response, next: NextFunction): void {
|
||||||
|
// Skip metrics endpoint itself
|
||||||
|
if (req.path === '/metrics') {
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
|
||||||
|
const startTime = Date.now();
|
||||||
|
activeConnections.inc();
|
||||||
|
|
||||||
|
// Capture response on finish
|
||||||
|
res.on('finish', () => {
|
||||||
|
const duration = (Date.now() - startTime) / 1000; // Convert to seconds
|
||||||
|
const route = normalizeRoutePath(req.route?.path || req.path);
|
||||||
|
const statusCode = res.statusCode.toString();
|
||||||
|
const method = req.method;
|
||||||
|
|
||||||
|
// Record request count
|
||||||
|
httpRequestsTotal.inc({ method, route, status_code: statusCode });
|
||||||
|
|
||||||
|
// Record request duration
|
||||||
|
httpRequestDuration.observe(
|
||||||
|
{ method, route, status_code: statusCode },
|
||||||
|
duration
|
||||||
|
);
|
||||||
|
|
||||||
|
// Record errors (4xx and 5xx)
|
||||||
|
if (res.statusCode >= 400) {
|
||||||
|
const errorType = res.statusCode >= 500 ? 'server_error' : 'client_error';
|
||||||
|
httpRequestErrors.inc({
|
||||||
|
method,
|
||||||
|
route,
|
||||||
|
status_code: statusCode,
|
||||||
|
error_type: errorType,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
activeConnections.dec();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle connection errors
|
||||||
|
res.on('error', () => {
|
||||||
|
activeConnections.dec();
|
||||||
|
});
|
||||||
|
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metrics endpoint handler
|
||||||
|
* Returns Prometheus-formatted metrics
|
||||||
|
*/
|
||||||
|
export async function metricsHandler(_req: Request, res: Response): Promise<void> {
|
||||||
|
try {
|
||||||
|
res.set('Content-Type', register.contentType);
|
||||||
|
const metrics = await register.metrics();
|
||||||
|
res.end(metrics);
|
||||||
|
} catch (error) {
|
||||||
|
res.status(500).end('Error collecting metrics');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create metrics router
|
||||||
|
* Sets up the /metrics endpoint
|
||||||
|
*/
|
||||||
|
export function createMetricsRouter(): Router {
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// Metrics endpoint (GET /metrics)
|
||||||
|
router.get('/metrics', metricsHandler);
|
||||||
|
|
||||||
|
return router;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// HELPER FUNCTIONS FOR RECORDING METRICS
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record a TAT breach event
|
||||||
|
*/
|
||||||
|
export function recordTATBreach(department: string, workflowType: string, breachLevel: string = 'warning'): void {
|
||||||
|
tatBreachesTotal.inc({ department, workflow_type: workflowType, breach_level: breachLevel });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update pending workflows count
|
||||||
|
*/
|
||||||
|
export function updatePendingWorkflows(department: string, status: string, count: number): void {
|
||||||
|
pendingWorkflowsCount.set({ department, status }, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record a workflow operation
|
||||||
|
*/
|
||||||
|
export function recordWorkflowOperation(operation: string, success: boolean): void {
|
||||||
|
workflowOperationsTotal.inc({ operation, status: success ? 'success' : 'failure' });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record an authentication event
|
||||||
|
*/
|
||||||
|
export function recordAuthEvent(eventType: string, success: boolean): void {
|
||||||
|
authEventsTotal.inc({ event_type: eventType, success: success ? 'true' : 'false' });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record a database query duration
|
||||||
|
*/
|
||||||
|
export function recordDBQuery(operation: string, table: string, durationMs: number): void {
|
||||||
|
dbQueryDuration.observe({ operation, table }, durationMs / 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record a Redis operation
|
||||||
|
*/
|
||||||
|
export function recordRedisOperation(operation: string, success: boolean): void {
|
||||||
|
redisOperationsTotal.inc({ operation, status: success ? 'success' : 'failure' });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record an AI service call
|
||||||
|
*/
|
||||||
|
export function recordAIServiceCall(provider: string, operation: string, success: boolean, durationMs?: number): void {
|
||||||
|
aiServiceCalls.inc({ provider, operation, status: success ? 'success' : 'failure' });
|
||||||
|
if (durationMs !== undefined) {
|
||||||
|
aiServiceDuration.observe({ provider, operation }, durationMs / 1000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export the registry for advanced use cases
|
||||||
|
export { register };
|
||||||
|
|
||||||
53
src/migrations/20251203-add-user-notification-preferences.ts
Normal file
53
src/migrations/20251203-add-user-notification-preferences.ts
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
import { QueryInterface, DataTypes } from 'sequelize';
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
async up(queryInterface: QueryInterface): Promise<void> {
|
||||||
|
// Add notification preference columns to users table
|
||||||
|
await queryInterface.addColumn('users', 'email_notifications_enabled', {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
comment: 'User preference for receiving email notifications'
|
||||||
|
});
|
||||||
|
|
||||||
|
await queryInterface.addColumn('users', 'push_notifications_enabled', {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
comment: 'User preference for receiving push notifications'
|
||||||
|
});
|
||||||
|
|
||||||
|
await queryInterface.addColumn('users', 'in_app_notifications_enabled', {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
comment: 'User preference for receiving in-app notifications'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add indexes for faster queries
|
||||||
|
await queryInterface.addIndex('users', ['email_notifications_enabled'], {
|
||||||
|
name: 'idx_users_email_notifications_enabled'
|
||||||
|
});
|
||||||
|
|
||||||
|
await queryInterface.addIndex('users', ['push_notifications_enabled'], {
|
||||||
|
name: 'idx_users_push_notifications_enabled'
|
||||||
|
});
|
||||||
|
|
||||||
|
await queryInterface.addIndex('users', ['in_app_notifications_enabled'], {
|
||||||
|
name: 'idx_users_in_app_notifications_enabled'
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
async down(queryInterface: QueryInterface): Promise<void> {
|
||||||
|
// Remove indexes first
|
||||||
|
await queryInterface.removeIndex('users', 'idx_users_in_app_notifications_enabled');
|
||||||
|
await queryInterface.removeIndex('users', 'idx_users_push_notifications_enabled');
|
||||||
|
await queryInterface.removeIndex('users', 'idx_users_email_notifications_enabled');
|
||||||
|
|
||||||
|
// Remove columns
|
||||||
|
await queryInterface.removeColumn('users', 'in_app_notifications_enabled');
|
||||||
|
await queryInterface.removeColumn('users', 'push_notifications_enabled');
|
||||||
|
await queryInterface.removeColumn('users', 'email_notifications_enabled');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
@ -39,6 +39,12 @@ interface UserAttributes {
|
|||||||
office?: string;
|
office?: string;
|
||||||
timezone?: string;
|
timezone?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Notification Preferences
|
||||||
|
emailNotificationsEnabled: boolean;
|
||||||
|
pushNotificationsEnabled: boolean;
|
||||||
|
inAppNotificationsEnabled: boolean;
|
||||||
|
|
||||||
isActive: boolean;
|
isActive: boolean;
|
||||||
role: UserRole; // RBAC: USER, MANAGEMENT, ADMIN
|
role: UserRole; // RBAC: USER, MANAGEMENT, ADMIN
|
||||||
lastLogin?: Date;
|
lastLogin?: Date;
|
||||||
@ -46,7 +52,7 @@ interface UserAttributes {
|
|||||||
updatedAt: Date;
|
updatedAt: Date;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface UserCreationAttributes extends Optional<UserAttributes, 'userId' | 'employeeId' | 'department' | 'designation' | 'phone' | 'manager' | 'secondEmail' | 'jobTitle' | 'employeeNumber' | 'postalAddress' | 'mobilePhone' | 'adGroups' | 'role' | 'lastLogin' | 'createdAt' | 'updatedAt'> {}
|
interface UserCreationAttributes extends Optional<UserAttributes, 'userId' | 'employeeId' | 'department' | 'designation' | 'phone' | 'manager' | 'secondEmail' | 'jobTitle' | 'employeeNumber' | 'postalAddress' | 'mobilePhone' | 'adGroups' | 'emailNotificationsEnabled' | 'pushNotificationsEnabled' | 'inAppNotificationsEnabled' | 'role' | 'lastLogin' | 'createdAt' | 'updatedAt'> {}
|
||||||
|
|
||||||
class User extends Model<UserAttributes, UserCreationAttributes> implements UserAttributes {
|
class User extends Model<UserAttributes, UserCreationAttributes> implements UserAttributes {
|
||||||
public userId!: string;
|
public userId!: string;
|
||||||
@ -77,6 +83,12 @@ class User extends Model<UserAttributes, UserCreationAttributes> implements User
|
|||||||
office?: string;
|
office?: string;
|
||||||
timezone?: string;
|
timezone?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Notification Preferences
|
||||||
|
public emailNotificationsEnabled!: boolean;
|
||||||
|
public pushNotificationsEnabled!: boolean;
|
||||||
|
public inAppNotificationsEnabled!: boolean;
|
||||||
|
|
||||||
public isActive!: boolean;
|
public isActive!: boolean;
|
||||||
public role!: UserRole; // RBAC: USER, MANAGEMENT, ADMIN
|
public role!: UserRole; // RBAC: USER, MANAGEMENT, ADMIN
|
||||||
public lastLogin?: Date;
|
public lastLogin?: Date;
|
||||||
@ -222,6 +234,30 @@ User.init(
|
|||||||
allowNull: true,
|
allowNull: true,
|
||||||
comment: 'JSON object containing location details (city, state, country, office, timezone)'
|
comment: 'JSON object containing location details (city, state, country, office, timezone)'
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Notification Preferences
|
||||||
|
emailNotificationsEnabled: {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
field: 'email_notifications_enabled',
|
||||||
|
comment: 'User preference for receiving email notifications'
|
||||||
|
},
|
||||||
|
pushNotificationsEnabled: {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
field: 'push_notifications_enabled',
|
||||||
|
comment: 'User preference for receiving push notifications'
|
||||||
|
},
|
||||||
|
inAppNotificationsEnabled: {
|
||||||
|
type: DataTypes.BOOLEAN,
|
||||||
|
allowNull: false,
|
||||||
|
defaultValue: true,
|
||||||
|
field: 'in_app_notifications_enabled',
|
||||||
|
comment: 'User preference for receiving in-app notifications'
|
||||||
|
},
|
||||||
|
|
||||||
isActive: {
|
isActive: {
|
||||||
type: DataTypes.BOOLEAN,
|
type: DataTypes.BOOLEAN,
|
||||||
defaultValue: true,
|
defaultValue: true,
|
||||||
|
|||||||
@ -114,6 +114,12 @@ router.get('/metadata/departments',
|
|||||||
asyncHandler(dashboardController.getDepartments.bind(dashboardController))
|
asyncHandler(dashboardController.getDepartments.bind(dashboardController))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Get single approver stats only (for performance - separate from requests)
|
||||||
|
router.get('/stats/single-approver',
|
||||||
|
authenticateToken,
|
||||||
|
asyncHandler(dashboardController.getSingleApproverStats.bind(dashboardController))
|
||||||
|
);
|
||||||
|
|
||||||
// Get requests filtered by approver ID (for detailed performance analysis)
|
// Get requests filtered by approver ID (for detailed performance analysis)
|
||||||
router.get('/requests/by-approver',
|
router.get('/requests/by-approver',
|
||||||
authenticateToken,
|
authenticateToken,
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import authRoutes from './auth.routes';
|
|||||||
import workflowRoutes from './workflow.routes';
|
import workflowRoutes from './workflow.routes';
|
||||||
import summaryRoutes from './summary.routes';
|
import summaryRoutes from './summary.routes';
|
||||||
import userRoutes from './user.routes';
|
import userRoutes from './user.routes';
|
||||||
|
import userPreferenceRoutes from './userPreference.routes';
|
||||||
import documentRoutes from './document.routes';
|
import documentRoutes from './document.routes';
|
||||||
import tatRoutes from './tat.routes';
|
import tatRoutes from './tat.routes';
|
||||||
import adminRoutes from './admin.routes';
|
import adminRoutes from './admin.routes';
|
||||||
@ -29,6 +30,7 @@ router.use('/auth', authRoutes);
|
|||||||
router.use('/config', configRoutes); // System configuration (public)
|
router.use('/config', configRoutes); // System configuration (public)
|
||||||
router.use('/workflows', workflowRoutes);
|
router.use('/workflows', workflowRoutes);
|
||||||
router.use('/users', userRoutes);
|
router.use('/users', userRoutes);
|
||||||
|
router.use('/user/preferences', userPreferenceRoutes); // User preferences (authenticated)
|
||||||
router.use('/documents', documentRoutes);
|
router.use('/documents', documentRoutes);
|
||||||
router.use('/tat', tatRoutes);
|
router.use('/tat', tatRoutes);
|
||||||
router.use('/admin', adminRoutes);
|
router.use('/admin', adminRoutes);
|
||||||
|
|||||||
@ -42,5 +42,11 @@ router.delete('/:notificationId',
|
|||||||
asyncHandler(notificationController.deleteNotification.bind(notificationController))
|
asyncHandler(notificationController.deleteNotification.bind(notificationController))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Get user's push subscriptions
|
||||||
|
router.get('/subscriptions',
|
||||||
|
authenticateToken,
|
||||||
|
asyncHandler(notificationController.getUserSubscriptions.bind(notificationController))
|
||||||
|
);
|
||||||
|
|
||||||
export default router;
|
export default router;
|
||||||
|
|
||||||
|
|||||||
@ -38,6 +38,13 @@ router.get(
|
|||||||
asyncHandler(summaryController.getSummaryByRequestId.bind(summaryController))
|
asyncHandler(summaryController.getSummaryByRequestId.bind(summaryController))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Regenerate summary for a request (MUST come before /:summaryId)
|
||||||
|
// Access: Initiator or Admin/Management users
|
||||||
|
router.post(
|
||||||
|
'/request/:requestId/regenerate',
|
||||||
|
asyncHandler(summaryController.regenerateSummary.bind(summaryController))
|
||||||
|
);
|
||||||
|
|
||||||
// Share summary with users (MUST come before /:summaryId)
|
// Share summary with users (MUST come before /:summaryId)
|
||||||
router.post(
|
router.post(
|
||||||
'/:summaryId/share',
|
'/:summaryId/share',
|
||||||
|
|||||||
28
src/routes/userPreference.routes.ts
Normal file
28
src/routes/userPreference.routes.ts
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
import { Router } from 'express';
|
||||||
|
import { authenticateToken } from '@middlewares/auth.middleware';
|
||||||
|
import {
|
||||||
|
getNotificationPreferences,
|
||||||
|
updateNotificationPreferences
|
||||||
|
} from '@controllers/userPreference.controller';
|
||||||
|
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// All routes require authentication
|
||||||
|
router.use(authenticateToken);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @route GET /api/v1/user/preferences/notifications
|
||||||
|
* @desc Get current user's notification preferences
|
||||||
|
* @access Private (Authenticated users)
|
||||||
|
*/
|
||||||
|
router.get('/notifications', getNotificationPreferences);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @route PUT /api/v1/user/preferences/notifications
|
||||||
|
* @desc Update current user's notification preferences
|
||||||
|
* @access Private (Authenticated users)
|
||||||
|
*/
|
||||||
|
router.put('/notifications', updateNotificationPreferences);
|
||||||
|
|
||||||
|
export default router;
|
||||||
|
|
||||||
@ -460,18 +460,18 @@ router.post('/:id/approvers/at-level',
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Pause workflow routes
|
// Pause workflow routes
|
||||||
// POST /workflows/:id/pause - Pause a workflow (approver only)
|
// POST /workflows/:id/pause - Pause a workflow (approver or initiator)
|
||||||
router.post('/:id/pause',
|
router.post('/:id/pause',
|
||||||
authenticateToken,
|
authenticateToken,
|
||||||
requireParticipantTypes(['APPROVER']), // Only approvers can pause
|
requireParticipantTypes(['APPROVER', 'INITIATOR']), // Both approvers and initiators can pause
|
||||||
validateParams(workflowParamsSchema),
|
validateParams(workflowParamsSchema),
|
||||||
asyncHandler(pauseController.pauseWorkflow.bind(pauseController))
|
asyncHandler(pauseController.pauseWorkflow.bind(pauseController))
|
||||||
);
|
);
|
||||||
|
|
||||||
// POST /workflows/:id/resume - Resume a paused workflow (approver who paused or initiator)
|
// POST /workflows/:id/resume - Resume a paused workflow (approver or initiator)
|
||||||
router.post('/:id/resume',
|
router.post('/:id/resume',
|
||||||
authenticateToken,
|
authenticateToken,
|
||||||
requireParticipantTypes(['APPROVER', 'INITIATOR']),
|
requireParticipantTypes(['APPROVER', 'INITIATOR']), // Both approvers and initiators can resume
|
||||||
validateParams(workflowParamsSchema),
|
validateParams(workflowParamsSchema),
|
||||||
asyncHandler(pauseController.resumeWorkflow.bind(pauseController))
|
asyncHandler(pauseController.resumeWorkflow.bind(pauseController))
|
||||||
);
|
);
|
||||||
|
|||||||
@ -118,6 +118,7 @@ async function runMigrations(): Promise<void> {
|
|||||||
const m25 = require('../migrations/20250126-add-pause-fields-to-workflow-requests');
|
const m25 = require('../migrations/20250126-add-pause-fields-to-workflow-requests');
|
||||||
const m26 = require('../migrations/20250126-add-pause-fields-to-approval-levels');
|
const m26 = require('../migrations/20250126-add-pause-fields-to-approval-levels');
|
||||||
const m27 = require('../migrations/20250127-migrate-in-progress-to-pending');
|
const m27 = require('../migrations/20250127-migrate-in-progress-to-pending');
|
||||||
|
const m28 = require('../migrations/20251203-add-user-notification-preferences');
|
||||||
|
|
||||||
const migrations = [
|
const migrations = [
|
||||||
{ name: '2025103000-create-users', module: m0 },
|
{ name: '2025103000-create-users', module: m0 },
|
||||||
@ -148,6 +149,7 @@ async function runMigrations(): Promise<void> {
|
|||||||
{ name: '20250126-add-pause-fields-to-workflow-requests', module: m25 },
|
{ name: '20250126-add-pause-fields-to-workflow-requests', module: m25 },
|
||||||
{ name: '20250126-add-pause-fields-to-approval-levels', module: m26 },
|
{ name: '20250126-add-pause-fields-to-approval-levels', module: m26 },
|
||||||
{ name: '20250127-migrate-in-progress-to-pending', module: m27 },
|
{ name: '20250127-migrate-in-progress-to-pending', module: m27 },
|
||||||
|
{ name: '20251203-add-user-notification-preferences', module: m28 },
|
||||||
];
|
];
|
||||||
|
|
||||||
const queryInterface = sequelize.getQueryInterface();
|
const queryInterface = sequelize.getQueryInterface();
|
||||||
|
|||||||
@ -5,7 +5,7 @@ export const SYSTEM_EVENT_REQUEST_ID = '00000000-0000-0000-0000-000000000001';
|
|||||||
|
|
||||||
export type ActivityEntry = {
|
export type ActivityEntry = {
|
||||||
requestId: string;
|
requestId: string;
|
||||||
type: 'created' | 'submitted' | 'assignment' | 'approval' | 'rejection' | 'status_change' | 'comment' | 'reminder' | 'document_added' | 'sla_warning' | 'ai_conclusion_generated' | 'closed' | 'login' | 'paused' | 'resumed' | 'pause_retriggered';
|
type: 'created' | 'submitted' | 'assignment' | 'approval' | 'rejection' | 'status_change' | 'comment' | 'reminder' | 'document_added' | 'sla_warning' | 'ai_conclusion_generated' | 'summary_generated' | 'closed' | 'login' | 'paused' | 'resumed' | 'pause_retriggered';
|
||||||
user?: { userId: string; name?: string; email?: string };
|
user?: { userId: string; name?: string; email?: string };
|
||||||
timestamp: string;
|
timestamp: string;
|
||||||
action: string;
|
action: string;
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
import logger from '@utils/logger';
|
import logger, { logAIEvent } from '@utils/logger';
|
||||||
import { getAIProviderConfig } from './configReader.service';
|
import { getAIProviderConfig } from './configReader.service';
|
||||||
|
|
||||||
// Provider-specific interfaces
|
// Provider-specific interfaces
|
||||||
@ -45,7 +45,7 @@ class ClaudeProvider implements AIProvider {
|
|||||||
async generateText(prompt: string): Promise<string> {
|
async generateText(prompt: string): Promise<string> {
|
||||||
if (!this.client) throw new Error('Claude client not initialized');
|
if (!this.client) throw new Error('Claude client not initialized');
|
||||||
|
|
||||||
logger.info(`[AI Service] Generating with Claude model: ${this.model}`);
|
logAIEvent('request', { provider: 'claude', model: this.model });
|
||||||
|
|
||||||
const response = await this.client.messages.create({
|
const response = await this.client.messages.create({
|
||||||
model: this.model,
|
model: this.model,
|
||||||
@ -103,7 +103,7 @@ class OpenAIProvider implements AIProvider {
|
|||||||
async generateText(prompt: string): Promise<string> {
|
async generateText(prompt: string): Promise<string> {
|
||||||
if (!this.client) throw new Error('OpenAI client not initialized');
|
if (!this.client) throw new Error('OpenAI client not initialized');
|
||||||
|
|
||||||
logger.info(`[AI Service] Generating with OpenAI model: ${this.model}`);
|
logAIEvent('request', { provider: 'openai', model: this.model });
|
||||||
|
|
||||||
const response = await this.client.chat.completions.create({
|
const response = await this.client.chat.completions.create({
|
||||||
model: this.model,
|
model: this.model,
|
||||||
@ -160,7 +160,7 @@ class GeminiProvider implements AIProvider {
|
|||||||
async generateText(prompt: string): Promise<string> {
|
async generateText(prompt: string): Promise<string> {
|
||||||
if (!this.client) throw new Error('Gemini client not initialized');
|
if (!this.client) throw new Error('Gemini client not initialized');
|
||||||
|
|
||||||
logger.info(`[AI Service] Generating with Gemini model: ${this.model}`);
|
logAIEvent('request', { provider: 'gemini', model: this.model });
|
||||||
|
|
||||||
const model = this.client.getGenerativeModel({ model: this.model });
|
const model = this.client.getGenerativeModel({ model: this.model });
|
||||||
const result = await model.generateContent(prompt);
|
const result = await model.generateContent(prompt);
|
||||||
@ -379,35 +379,14 @@ class AIService {
|
|||||||
// Use provider's generateText method
|
// Use provider's generateText method
|
||||||
let remarkText = await this.provider.generateText(prompt);
|
let remarkText = await this.provider.generateText(prompt);
|
||||||
|
|
||||||
// Get max length from config for validation
|
// Get max length from config for logging
|
||||||
const { getConfigValue } = require('./configReader.service');
|
const { getConfigValue } = require('./configReader.service');
|
||||||
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
|
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
|
||||||
const maxLength = parseInt(maxLengthStr || '2000', 10);
|
const maxLength = parseInt(maxLengthStr || '2000', 10);
|
||||||
|
|
||||||
// Validate length - AI should already be within limit, but trim as safety net
|
// Log length (no trimming - preserve complete AI-generated content)
|
||||||
if (remarkText.length > maxLength) {
|
if (remarkText.length > maxLength) {
|
||||||
logger.warn(`[AI Service] ⚠️ AI exceeded character limit (${remarkText.length} > ${maxLength}). This should be rare - AI was instructed to prioritize and condense. Applying safety trim...`);
|
logger.warn(`[AI Service] ⚠️ AI exceeded suggested limit (${remarkText.length} > ${maxLength}). Content preserved to avoid incomplete information.`);
|
||||||
|
|
||||||
// Try to find a natural break point (sentence end) near the limit
|
|
||||||
const safeLimit = maxLength - 3;
|
|
||||||
let trimPoint = safeLimit;
|
|
||||||
|
|
||||||
// Look for last sentence end (. ! ?) within the safe limit
|
|
||||||
const lastPeriod = remarkText.lastIndexOf('.', safeLimit);
|
|
||||||
const lastExclaim = remarkText.lastIndexOf('!', safeLimit);
|
|
||||||
const lastQuestion = remarkText.lastIndexOf('?', safeLimit);
|
|
||||||
const bestBreak = Math.max(lastPeriod, lastExclaim, lastQuestion);
|
|
||||||
|
|
||||||
// Use sentence break if it's reasonably close to the limit (within 80% of max)
|
|
||||||
if (bestBreak > maxLength * 0.8) {
|
|
||||||
trimPoint = bestBreak + 1; // Include the punctuation
|
|
||||||
remarkText = remarkText.substring(0, trimPoint).trim();
|
|
||||||
} else {
|
|
||||||
// Fall back to hard trim with ellipsis
|
|
||||||
remarkText = remarkText.substring(0, safeLimit).trim() + '...';
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(`[AI Service] Trimmed to ${remarkText.length} characters`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract key points (look for bullet points or numbered items)
|
// Extract key points (look for bullet points or numbered items)
|
||||||
@ -548,13 +527,33 @@ ${isRejected
|
|||||||
- Be concise and direct - every word must add value
|
- Be concise and direct - every word must add value
|
||||||
- No time-specific words like "today", "now", "currently", "recently"
|
- No time-specific words like "today", "now", "currently", "recently"
|
||||||
- No corporate jargon or buzzwords
|
- No corporate jargon or buzzwords
|
||||||
- No emojis or excessive formatting
|
- No emojis
|
||||||
- Write like a professional documenting a completed process
|
- Write like a professional documenting a completed process
|
||||||
- Focus on facts: what was requested, who ${isRejected ? 'rejected' : 'approved'}, what was decided
|
- Focus on facts: what was requested, who ${isRejected ? 'rejected' : 'approved'}, what was decided
|
||||||
- Use past tense for completed actions
|
- Use past tense for completed actions
|
||||||
- Use short sentences and avoid filler words
|
- Use short sentences and avoid filler words
|
||||||
|
|
||||||
Write the conclusion now. STRICT LIMIT: ${maxLength} characters maximum. Prioritize and condense if needed:`;
|
**FORMAT REQUIREMENT - HTML Rich Text:**
|
||||||
|
- Generate content in HTML format for rich text editor display
|
||||||
|
- Use proper HTML tags for structure and formatting:
|
||||||
|
* <p>...</p> for paragraphs
|
||||||
|
* <strong>...</strong> for important text/headings
|
||||||
|
* <ul><li>...</li></ul> for bullet points
|
||||||
|
* <ol><li>...</li></ol> for numbered lists
|
||||||
|
* <br> for line breaks only when necessary
|
||||||
|
- Use semantic HTML to make the content readable and well-structured
|
||||||
|
- Example format:
|
||||||
|
<p><strong>Request Summary:</strong> [Brief description]</p>
|
||||||
|
<p><strong>Approval Decision:</strong> [Decision details]</p>
|
||||||
|
<ul>
|
||||||
|
<li>Key point 1</li>
|
||||||
|
<li>Key point 2</li>
|
||||||
|
</ul>
|
||||||
|
<p><strong>Outcome:</strong> [Final outcome]</p>
|
||||||
|
- Keep HTML clean and minimal - no inline styles, no divs, no classes
|
||||||
|
- The HTML should render nicely in a rich text editor
|
||||||
|
|
||||||
|
Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters maximum (including HTML tags). Prioritize and condense if needed:`;
|
||||||
|
|
||||||
return prompt;
|
return prompt;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,11 +6,12 @@ import { ApprovalAction } from '../types/approval.types';
|
|||||||
import { ApprovalStatus, WorkflowStatus } from '../types/common.types';
|
import { ApprovalStatus, WorkflowStatus } from '../types/common.types';
|
||||||
import { calculateTATPercentage } from '@utils/helpers';
|
import { calculateTATPercentage } from '@utils/helpers';
|
||||||
import { calculateElapsedWorkingHours } from '@utils/tatTimeUtils';
|
import { calculateElapsedWorkingHours } from '@utils/tatTimeUtils';
|
||||||
import logger from '@utils/logger';
|
import logger, { logWorkflowEvent, logAIEvent } from '@utils/logger';
|
||||||
import { Op } from 'sequelize';
|
import { Op } from 'sequelize';
|
||||||
import { notificationService } from './notification.service';
|
import { notificationService } from './notification.service';
|
||||||
import { activityService } from './activity.service';
|
import { activityService } from './activity.service';
|
||||||
import { tatSchedulerService } from './tatScheduler.service';
|
import { tatSchedulerService } from './tatScheduler.service';
|
||||||
|
import { emitToRequestRoom } from '../realtime/socket';
|
||||||
|
|
||||||
export class ApprovalService {
|
export class ApprovalService {
|
||||||
async approveLevel(levelId: string, action: ApprovalAction, _userId: string, requestMetadata?: { ipAddress?: string | null; userAgent?: string | null }): Promise<ApprovalLevel | null> {
|
async approveLevel(levelId: string, action: ApprovalAction, _userId: string, requestMetadata?: { ipAddress?: string | null; userAgent?: string | null }): Promise<ApprovalLevel | null> {
|
||||||
@ -121,7 +122,11 @@ export class ApprovalService {
|
|||||||
},
|
},
|
||||||
{ where: { requestId: level.requestId } }
|
{ where: { requestId: level.requestId } }
|
||||||
);
|
);
|
||||||
logger.info(`Final approver approved. Workflow ${level.requestId} closed as APPROVED`);
|
logWorkflowEvent('approved', level.requestId, {
|
||||||
|
level: level.levelNumber,
|
||||||
|
isFinalApproval: true,
|
||||||
|
status: 'APPROVED',
|
||||||
|
});
|
||||||
|
|
||||||
// Log final approval activity first (so it's included in AI context)
|
// Log final approval activity first (so it's included in AI context)
|
||||||
activityService.log({
|
activityService.log({
|
||||||
@ -152,7 +157,10 @@ export class ApprovalService {
|
|||||||
const remarkGenerationEnabled = (await getConfigValue('AI_REMARK_GENERATION_ENABLED', 'true'))?.toLowerCase() === 'true';
|
const remarkGenerationEnabled = (await getConfigValue('AI_REMARK_GENERATION_ENABLED', 'true'))?.toLowerCase() === 'true';
|
||||||
|
|
||||||
if (aiEnabled && remarkGenerationEnabled && aiService.isAvailable()) {
|
if (aiEnabled && remarkGenerationEnabled && aiService.isAvailable()) {
|
||||||
logger.info(`[Approval] 🔄 Starting background AI conclusion generation for ${level.requestId}...`);
|
logAIEvent('request', {
|
||||||
|
requestId: level.requestId,
|
||||||
|
action: 'conclusion_generation_started',
|
||||||
|
});
|
||||||
|
|
||||||
// Gather context for AI generation
|
// Gather context for AI generation
|
||||||
const approvalLevels = await ApprovalLevel.findAll({
|
const approvalLevels = await ApprovalLevel.findAll({
|
||||||
@ -243,7 +251,10 @@ export class ApprovalService {
|
|||||||
finalizedAt: null
|
finalizedAt: null
|
||||||
} as any);
|
} as any);
|
||||||
|
|
||||||
logger.info(`[Approval] ✅ Background AI conclusion completed for ${level.requestId}`);
|
logAIEvent('response', {
|
||||||
|
requestId: level.requestId,
|
||||||
|
action: 'conclusion_generation_completed',
|
||||||
|
});
|
||||||
|
|
||||||
// Log activity
|
// Log activity
|
||||||
activityService.log({
|
activityService.log({
|
||||||
@ -266,9 +277,50 @@ export class ApprovalService {
|
|||||||
logger.warn(`[Approval] AI service unavailable for ${level.requestId}, skipping conclusion generation`);
|
logger.warn(`[Approval] AI service unavailable for ${level.requestId}, skipping conclusion generation`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Auto-generate RequestSummary after final approval (system-level generation)
|
||||||
|
// This makes the summary immediately available when user views the approved request
|
||||||
|
try {
|
||||||
|
const { summaryService } = await import('./summary.service');
|
||||||
|
const summary = await summaryService.createSummary(level.requestId, 'system', {
|
||||||
|
isSystemGeneration: true
|
||||||
|
});
|
||||||
|
logger.info(`[Approval] ✅ Auto-generated summary ${(summary as any).summaryId} for approved request ${level.requestId}`);
|
||||||
|
|
||||||
|
// Log summary generation activity
|
||||||
|
activityService.log({
|
||||||
|
requestId: level.requestId,
|
||||||
|
type: 'summary_generated',
|
||||||
|
user: { userId: 'system', name: 'System' },
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
action: 'Summary Auto-Generated',
|
||||||
|
details: 'Request summary auto-generated after final approval',
|
||||||
|
ipAddress: undefined,
|
||||||
|
userAgent: undefined
|
||||||
|
});
|
||||||
|
} catch (summaryError: any) {
|
||||||
|
// Log but don't fail - initiator can regenerate later
|
||||||
|
logger.error(`[Approval] Failed to auto-generate summary for ${level.requestId}:`, summaryError.message);
|
||||||
|
}
|
||||||
|
|
||||||
} catch (aiError) {
|
} catch (aiError) {
|
||||||
logger.error(`[Approval] Background AI generation failed for ${level.requestId}:`, aiError);
|
logAIEvent('error', {
|
||||||
|
requestId: level.requestId,
|
||||||
|
action: 'conclusion_generation_failed',
|
||||||
|
error: aiError,
|
||||||
|
});
|
||||||
// Silent failure - initiator can write manually
|
// Silent failure - initiator can write manually
|
||||||
|
|
||||||
|
// Still try to generate summary even if AI conclusion failed
|
||||||
|
try {
|
||||||
|
const { summaryService } = await import('./summary.service');
|
||||||
|
const summary = await summaryService.createSummary(level.requestId, 'system', {
|
||||||
|
isSystemGeneration: true
|
||||||
|
});
|
||||||
|
logger.info(`[Approval] ✅ Auto-generated summary ${(summary as any).summaryId} for approved request ${level.requestId} (without AI conclusion)`);
|
||||||
|
} catch (summaryError: any) {
|
||||||
|
logger.error(`[Approval] Failed to auto-generate summary for ${level.requestId}:`, summaryError.message);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})().catch(err => {
|
})().catch(err => {
|
||||||
// Catch any unhandled promise rejections
|
// Catch any unhandled promise rejections
|
||||||
@ -448,7 +500,11 @@ export class ApprovalService {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
logger.info(`Level ${level.levelNumber} rejected. Workflow ${level.requestId} marked as REJECTED. Awaiting closure from initiator.`);
|
logWorkflowEvent('rejected', level.requestId, {
|
||||||
|
level: level.levelNumber,
|
||||||
|
status: 'REJECTED',
|
||||||
|
message: 'Awaiting closure from initiator',
|
||||||
|
});
|
||||||
|
|
||||||
// Log rejection activity first (so it's included in AI context)
|
// Log rejection activity first (so it's included in AI context)
|
||||||
if (wf) {
|
if (wf) {
|
||||||
@ -621,6 +677,16 @@ export class ApprovalService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.info(`Approval level ${levelId} ${action.action.toLowerCase()}ed`);
|
logger.info(`Approval level ${levelId} ${action.action.toLowerCase()}ed`);
|
||||||
|
|
||||||
|
// Emit real-time update to all users viewing this request
|
||||||
|
emitToRequestRoom(level.requestId, 'request:updated', {
|
||||||
|
requestId: level.requestId,
|
||||||
|
requestNumber: (wf as any)?.requestNumber,
|
||||||
|
action: action.action,
|
||||||
|
levelNumber: level.levelNumber,
|
||||||
|
timestamp: now.toISOString()
|
||||||
|
});
|
||||||
|
|
||||||
return updatedLevel;
|
return updatedLevel;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`Failed to ${action.action.toLowerCase()} level ${levelId}:`, error);
|
logger.error(`Failed to ${action.action.toLowerCase()} level ${levelId}:`, error);
|
||||||
|
|||||||
@ -3,7 +3,7 @@ import { SSOUserData, ssoConfig } from '../config/sso';
|
|||||||
import jwt, { SignOptions } from 'jsonwebtoken';
|
import jwt, { SignOptions } from 'jsonwebtoken';
|
||||||
import type { StringValue } from 'ms';
|
import type { StringValue } from 'ms';
|
||||||
import { LoginResponse } from '../types/auth.types';
|
import { LoginResponse } from '../types/auth.types';
|
||||||
import logger from '../utils/logger';
|
import logger, { logAuthEvent } from '../utils/logger';
|
||||||
import axios from 'axios';
|
import axios from 'axios';
|
||||||
|
|
||||||
export class AuthService {
|
export class AuthService {
|
||||||
@ -71,9 +71,9 @@ export class AuthService {
|
|||||||
// Reload to get updated data
|
// Reload to get updated data
|
||||||
user = await user.reload();
|
user = await user.reload();
|
||||||
|
|
||||||
logger.info(`User updated via SSO`, {
|
logAuthEvent('sso_callback', user.userId, {
|
||||||
email: userData.email,
|
email: userData.email,
|
||||||
oktaSub: userData.oktaSub,
|
action: 'user_updated',
|
||||||
updatedFields: Object.keys(userUpdateData),
|
updatedFields: Object.keys(userUpdateData),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
@ -93,10 +93,9 @@ export class AuthService {
|
|||||||
lastLogin: new Date()
|
lastLogin: new Date()
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info(`New user created via SSO`, {
|
logAuthEvent('sso_callback', user.userId, {
|
||||||
email: userData.email,
|
email: userData.email,
|
||||||
oktaSub: userData.oktaSub,
|
action: 'user_created',
|
||||||
employeeId: userData.employeeId || 'not provided',
|
|
||||||
displayName,
|
displayName,
|
||||||
hasDepartment: !!userData.department,
|
hasDepartment: !!userData.department,
|
||||||
hasDesignation: !!userData.designation,
|
hasDesignation: !!userData.designation,
|
||||||
@ -123,9 +122,9 @@ export class AuthService {
|
|||||||
refreshToken
|
refreshToken
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`SSO callback failed`, {
|
logAuthEvent('auth_failure', undefined, {
|
||||||
email: userData.email,
|
email: userData.email,
|
||||||
oktaSub: userData.oktaSub,
|
action: 'sso_callback_failed',
|
||||||
error: error instanceof Error ? error.message : 'Unknown error',
|
error: error instanceof Error ? error.message : 'Unknown error',
|
||||||
});
|
});
|
||||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
@ -204,7 +203,10 @@ export class AuthService {
|
|||||||
|
|
||||||
return this.generateAccessToken(user);
|
return this.generateAccessToken(user);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Token refresh failed:', error);
|
logAuthEvent('auth_failure', undefined, {
|
||||||
|
action: 'token_refresh_failed',
|
||||||
|
error,
|
||||||
|
});
|
||||||
throw new Error('Token refresh failed');
|
throw new Error('Token refresh failed');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -447,14 +449,13 @@ export class AuthService {
|
|||||||
oktaIdToken: id_token, // Include id_token for proper Okta logout
|
oktaIdToken: id_token, // Include id_token for proper Okta logout
|
||||||
};
|
};
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
logger.error('Token exchange with Okta failed:', {
|
logAuthEvent('auth_failure', undefined, {
|
||||||
message: error.message,
|
action: 'okta_token_exchange_failed',
|
||||||
response: error.response?.data,
|
errorMessage: error.message,
|
||||||
status: error.response?.status,
|
status: error.response?.status,
|
||||||
statusText: error.response?.statusText,
|
statusText: error.response?.statusText,
|
||||||
headers: error.response?.headers,
|
oktaError: error.response?.data?.error,
|
||||||
code: error.code,
|
oktaErrorDescription: error.response?.data?.error_description,
|
||||||
stack: error.stack,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Provide a more user-friendly error message
|
// Provide a more user-friendly error message
|
||||||
|
|||||||
@ -12,6 +12,30 @@ let configCache: Map<string, string> = new Map();
|
|||||||
let cacheExpiry: Date | null = null;
|
let cacheExpiry: Date | null = null;
|
||||||
const CACHE_DURATION_MS = 5 * 60 * 1000; // 5 minutes
|
const CACHE_DURATION_MS = 5 * 60 * 1000; // 5 minutes
|
||||||
|
|
||||||
|
// Sensitive config keys that should be masked in logs
|
||||||
|
const SENSITIVE_CONFIG_PATTERNS = [
|
||||||
|
'API_KEY', 'SECRET', 'PASSWORD', 'TOKEN', 'CREDENTIAL',
|
||||||
|
'PRIVATE', 'AUTH', 'KEY', 'VAPID'
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a config key contains sensitive data
|
||||||
|
*/
|
||||||
|
function isSensitiveConfig(configKey: string): boolean {
|
||||||
|
const upperKey = configKey.toUpperCase();
|
||||||
|
return SENSITIVE_CONFIG_PATTERNS.some(pattern => upperKey.includes(pattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mask sensitive value for logging (show first 4 and last 2 chars)
|
||||||
|
*/
|
||||||
|
function maskSensitiveValue(value: string): string {
|
||||||
|
if (!value || value.length <= 8) {
|
||||||
|
return '***REDACTED***';
|
||||||
|
}
|
||||||
|
return `${value.substring(0, 4)}****${value.substring(value.length - 2)}`;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a configuration value from database (with caching)
|
* Get a configuration value from database (with caching)
|
||||||
*/
|
*/
|
||||||
@ -40,12 +64,16 @@ export async function getConfigValue(configKey: string, defaultValue: string = '
|
|||||||
// Always update cache expiry when loading from database
|
// Always update cache expiry when loading from database
|
||||||
cacheExpiry = new Date(Date.now() + CACHE_DURATION_MS);
|
cacheExpiry = new Date(Date.now() + CACHE_DURATION_MS);
|
||||||
|
|
||||||
logger.info(`[ConfigReader] Loaded config '${configKey}' = '${value}' from database (cached for 5min)`);
|
// Mask sensitive values in logs for security
|
||||||
|
const logValue = isSensitiveConfig(configKey) ? maskSensitiveValue(value) : value;
|
||||||
|
logger.info(`[ConfigReader] Loaded config '${configKey}' = '${logValue}' from database (cached for 5min)`);
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.warn(`[ConfigReader] Config key '${configKey}' not found, using default: ${defaultValue}`);
|
// Mask sensitive default values in logs for security
|
||||||
|
const logDefault = isSensitiveConfig(configKey) ? maskSensitiveValue(defaultValue) : defaultValue;
|
||||||
|
logger.warn(`[ConfigReader] Config key '${configKey}' not found, using default: ${logDefault}`);
|
||||||
return defaultValue;
|
return defaultValue;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[ConfigReader] Error reading config '${configKey}':`, error);
|
logger.error(`[ConfigReader] Error reading config '${configKey}':`, error);
|
||||||
|
|||||||
@ -282,27 +282,6 @@ export async function seedDefaultConfigurations(): Promise<void> {
|
|||||||
NOW(),
|
NOW(),
|
||||||
NOW()
|
NOW()
|
||||||
),
|
),
|
||||||
(
|
|
||||||
gen_random_uuid(),
|
|
||||||
'AI_REMARK_MAX_CHARACTERS',
|
|
||||||
'AI_CONFIGURATION',
|
|
||||||
'500',
|
|
||||||
'NUMBER',
|
|
||||||
'AI Remark Maximum Characters',
|
|
||||||
'Maximum character limit for AI-generated conclusion remarks',
|
|
||||||
'500',
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
'{"min": 100, "max": 2000}'::jsonb,
|
|
||||||
'number',
|
|
||||||
NULL,
|
|
||||||
21,
|
|
||||||
false,
|
|
||||||
NULL,
|
|
||||||
NULL,
|
|
||||||
NOW(),
|
|
||||||
NOW()
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
gen_random_uuid(),
|
gen_random_uuid(),
|
||||||
'AI_PROVIDER',
|
'AI_PROVIDER',
|
||||||
@ -471,6 +450,27 @@ export async function seedDefaultConfigurations(): Promise<void> {
|
|||||||
NOW(),
|
NOW(),
|
||||||
NOW()
|
NOW()
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
gen_random_uuid(),
|
||||||
|
'AI_MAX_REMARK_LENGTH',
|
||||||
|
'AI_CONFIGURATION',
|
||||||
|
'2000',
|
||||||
|
'NUMBER',
|
||||||
|
'AI Max Remark Length',
|
||||||
|
'Maximum character length for AI-generated conclusion remarks',
|
||||||
|
'2000',
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
'{"min": 500, "max": 5000}'::jsonb,
|
||||||
|
'number',
|
||||||
|
NULL,
|
||||||
|
30,
|
||||||
|
false,
|
||||||
|
NULL,
|
||||||
|
NULL,
|
||||||
|
NOW(),
|
||||||
|
NOW()
|
||||||
|
),
|
||||||
-- Notification Rules
|
-- Notification Rules
|
||||||
(
|
(
|
||||||
gen_random_uuid(),
|
gen_random_uuid(),
|
||||||
@ -486,7 +486,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
|
|||||||
'{}'::jsonb,
|
'{}'::jsonb,
|
||||||
'toggle',
|
'toggle',
|
||||||
NULL,
|
NULL,
|
||||||
30,
|
31,
|
||||||
false,
|
false,
|
||||||
NULL,
|
NULL,
|
||||||
NULL,
|
NULL,
|
||||||
@ -507,7 +507,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
|
|||||||
'{}'::jsonb,
|
'{}'::jsonb,
|
||||||
'toggle',
|
'toggle',
|
||||||
NULL,
|
NULL,
|
||||||
31,
|
32,
|
||||||
false,
|
false,
|
||||||
NULL,
|
NULL,
|
||||||
NULL,
|
NULL,
|
||||||
@ -528,7 +528,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
|
|||||||
'{"min": 1000, "max": 30000}'::jsonb,
|
'{"min": 1000, "max": 30000}'::jsonb,
|
||||||
'number',
|
'number',
|
||||||
NULL,
|
NULL,
|
||||||
32,
|
33,
|
||||||
false,
|
false,
|
||||||
NULL,
|
NULL,
|
||||||
NULL,
|
NULL,
|
||||||
|
|||||||
@ -907,8 +907,18 @@ export class DashboardService {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get Approver Performance metrics with pagination
|
* Get Approver Performance metrics with pagination
|
||||||
|
* Supports priority and SLA filters for stats calculation
|
||||||
*/
|
*/
|
||||||
async getApproverPerformance(userId: string, dateRange?: string, page: number = 1, limit: number = 10, startDate?: string, endDate?: string) {
|
async getApproverPerformance(
|
||||||
|
userId: string,
|
||||||
|
dateRange?: string,
|
||||||
|
page: number = 1,
|
||||||
|
limit: number = 10,
|
||||||
|
startDate?: string,
|
||||||
|
endDate?: string,
|
||||||
|
priority?: string,
|
||||||
|
slaCompliance?: string
|
||||||
|
) {
|
||||||
const range = this.parseDateRange(dateRange, startDate, endDate);
|
const range = this.parseDateRange(dateRange, startDate, endDate);
|
||||||
|
|
||||||
// Check if user is admin or management (has broader access)
|
// Check if user is admin or management (has broader access)
|
||||||
@ -929,22 +939,48 @@ export class DashboardService {
|
|||||||
// Calculate offset
|
// Calculate offset
|
||||||
const offset = (page - 1) * limit;
|
const offset = (page - 1) * limit;
|
||||||
|
|
||||||
|
// Build filter conditions
|
||||||
|
const replacements: any = { start: range.start, end: range.end };
|
||||||
|
let priorityFilter = '';
|
||||||
|
let slaFilter = '';
|
||||||
|
|
||||||
|
if (priority && priority !== 'all') {
|
||||||
|
priorityFilter = `AND wf.priority = :priority`;
|
||||||
|
replacements.priority = priority.toUpperCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
// SLA filter logic - will be applied in main query
|
||||||
|
if (slaCompliance && slaCompliance !== 'all') {
|
||||||
|
if (slaCompliance === 'breached') {
|
||||||
|
slaFilter = `AND al.tat_breached = true`;
|
||||||
|
} else if (slaCompliance === 'compliant') {
|
||||||
|
slaFilter = `AND (al.tat_breached = false OR (al.tat_breached IS NULL AND al.elapsed_hours < al.tat_hours))`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get total count - only count distinct approvers who have completed approvals
|
// Get total count - only count distinct approvers who have completed approvals
|
||||||
|
// IMPORTANT: WHERE conditions must match the main query to avoid pagination mismatch
|
||||||
const countResult = await sequelize.query(`
|
const countResult = await sequelize.query(`
|
||||||
SELECT COUNT(*) as total
|
SELECT COUNT(*) as total
|
||||||
FROM (
|
FROM (
|
||||||
SELECT DISTINCT al.approver_id
|
SELECT DISTINCT al.approver_id
|
||||||
FROM approval_levels al
|
FROM approval_levels al
|
||||||
|
INNER JOIN workflow_requests wf ON al.request_id = wf.request_id
|
||||||
WHERE al.action_date BETWEEN :start AND :end
|
WHERE al.action_date BETWEEN :start AND :end
|
||||||
AND al.status IN ('APPROVED', 'REJECTED')
|
AND al.status IN ('APPROVED', 'REJECTED')
|
||||||
AND al.action_date IS NOT NULL
|
AND al.action_date IS NOT NULL
|
||||||
|
AND al.level_start_time IS NOT NULL
|
||||||
AND al.tat_hours > 0
|
AND al.tat_hours > 0
|
||||||
AND al.approver_id IS NOT NULL
|
AND al.approver_id IS NOT NULL
|
||||||
|
AND al.elapsed_hours IS NOT NULL
|
||||||
|
AND al.elapsed_hours >= 0
|
||||||
|
${priorityFilter}
|
||||||
|
${slaFilter}
|
||||||
GROUP BY al.approver_id
|
GROUP BY al.approver_id
|
||||||
HAVING COUNT(DISTINCT al.level_id) > 0
|
HAVING COUNT(DISTINCT al.level_id) > 0
|
||||||
) AS distinct_approvers
|
) AS distinct_approvers
|
||||||
`, {
|
`, {
|
||||||
replacements: { start: range.start, end: range.end },
|
replacements,
|
||||||
type: QueryTypes.SELECT
|
type: QueryTypes.SELECT
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -962,57 +998,37 @@ export class DashboardService {
|
|||||||
al.approver_name,
|
al.approver_name,
|
||||||
COUNT(DISTINCT al.level_id)::int AS total_approved,
|
COUNT(DISTINCT al.level_id)::int AS total_approved,
|
||||||
COUNT(DISTINCT CASE
|
COUNT(DISTINCT CASE
|
||||||
WHEN al.elapsed_hours IS NOT NULL
|
WHEN al.status = 'APPROVED'
|
||||||
AND al.elapsed_hours > 0
|
THEN al.level_id
|
||||||
AND al.level_start_time IS NOT NULL
|
END)::int AS approved_count,
|
||||||
AND al.action_date IS NOT NULL
|
COUNT(DISTINCT CASE
|
||||||
AND (
|
WHEN al.status = 'REJECTED'
|
||||||
al.elapsed_hours < al.tat_hours
|
THEN al.level_id
|
||||||
OR (al.elapsed_hours <= al.tat_hours AND (al.tat_breached IS NULL OR al.tat_breached = false))
|
END)::int AS rejected_count,
|
||||||
OR (al.tat_breached IS NOT NULL AND al.tat_breached = false)
|
COUNT(DISTINCT CASE
|
||||||
)
|
WHEN wf.status = 'CLOSED'
|
||||||
|
THEN al.level_id
|
||||||
|
END)::int AS closed_count,
|
||||||
|
COUNT(DISTINCT CASE
|
||||||
|
WHEN al.tat_breached = false
|
||||||
|
OR (al.tat_breached IS NULL AND al.elapsed_hours < al.tat_hours)
|
||||||
THEN al.level_id
|
THEN al.level_id
|
||||||
END)::int AS within_tat_count,
|
END)::int AS within_tat_count,
|
||||||
COUNT(DISTINCT CASE
|
COUNT(DISTINCT CASE
|
||||||
WHEN al.elapsed_hours IS NOT NULL
|
WHEN al.tat_breached = true
|
||||||
AND al.elapsed_hours > 0
|
|
||||||
AND al.level_start_time IS NOT NULL
|
|
||||||
AND al.action_date IS NOT NULL
|
|
||||||
AND (
|
|
||||||
al.elapsed_hours > al.tat_hours
|
|
||||||
OR (al.tat_breached IS NOT NULL AND al.tat_breached = true)
|
|
||||||
)
|
|
||||||
THEN al.level_id
|
THEN al.level_id
|
||||||
END)::int AS breached_count,
|
END)::int AS breached_count,
|
||||||
ROUND(
|
ROUND(
|
||||||
((COUNT(DISTINCT CASE
|
((COUNT(DISTINCT CASE
|
||||||
WHEN al.elapsed_hours IS NOT NULL
|
WHEN al.tat_breached = false
|
||||||
AND al.elapsed_hours > 0
|
OR (al.tat_breached IS NULL AND al.elapsed_hours < al.tat_hours)
|
||||||
AND al.level_start_time IS NOT NULL
|
|
||||||
AND al.action_date IS NOT NULL
|
|
||||||
AND (
|
|
||||||
al.elapsed_hours < al.tat_hours
|
|
||||||
OR (al.elapsed_hours <= al.tat_hours AND (al.tat_breached IS NULL OR al.tat_breached = false))
|
|
||||||
OR (al.tat_breached IS NOT NULL AND al.tat_breached = false)
|
|
||||||
)
|
|
||||||
THEN al.level_id
|
THEN al.level_id
|
||||||
END)::numeric / NULLIF(COUNT(DISTINCT CASE
|
END)::numeric / NULLIF(COUNT(DISTINCT al.level_id), 0)) * 100)::numeric,
|
||||||
WHEN al.elapsed_hours IS NOT NULL
|
|
||||||
AND al.elapsed_hours > 0
|
|
||||||
AND al.level_start_time IS NOT NULL
|
|
||||||
AND al.action_date IS NOT NULL
|
|
||||||
THEN al.level_id
|
|
||||||
END), 0)) * 100)::numeric,
|
|
||||||
0
|
0
|
||||||
)::int AS tat_compliance_percent,
|
)::int AS tat_compliance_percent,
|
||||||
ROUND(AVG(CASE
|
ROUND(AVG(COALESCE(al.elapsed_hours, 0))::numeric, 1) AS avg_response_hours
|
||||||
WHEN al.elapsed_hours IS NOT NULL
|
|
||||||
AND al.elapsed_hours > 0
|
|
||||||
AND al.level_start_time IS NOT NULL
|
|
||||||
AND al.action_date IS NOT NULL
|
|
||||||
THEN al.elapsed_hours
|
|
||||||
END)::numeric, 1) AS avg_response_hours
|
|
||||||
FROM approval_levels al
|
FROM approval_levels al
|
||||||
|
INNER JOIN workflow_requests wf ON al.request_id = wf.request_id
|
||||||
WHERE al.action_date BETWEEN :start AND :end
|
WHERE al.action_date BETWEEN :start AND :end
|
||||||
AND al.status IN ('APPROVED', 'REJECTED')
|
AND al.status IN ('APPROVED', 'REJECTED')
|
||||||
AND al.action_date IS NOT NULL
|
AND al.action_date IS NOT NULL
|
||||||
@ -1020,7 +1036,9 @@ export class DashboardService {
|
|||||||
AND al.tat_hours > 0
|
AND al.tat_hours > 0
|
||||||
AND al.approver_id IS NOT NULL
|
AND al.approver_id IS NOT NULL
|
||||||
AND al.elapsed_hours IS NOT NULL
|
AND al.elapsed_hours IS NOT NULL
|
||||||
AND al.elapsed_hours > 0
|
AND al.elapsed_hours >= 0
|
||||||
|
${priorityFilter}
|
||||||
|
${slaFilter}
|
||||||
GROUP BY al.approver_id, al.approver_name
|
GROUP BY al.approver_id, al.approver_name
|
||||||
HAVING COUNT(DISTINCT al.level_id) > 0
|
HAVING COUNT(DISTINCT al.level_id) > 0
|
||||||
ORDER BY
|
ORDER BY
|
||||||
@ -1029,7 +1047,7 @@ export class DashboardService {
|
|||||||
total_approved DESC -- More approvals as tie-breaker
|
total_approved DESC -- More approvals as tie-breaker
|
||||||
LIMIT :limit OFFSET :offset
|
LIMIT :limit OFFSET :offset
|
||||||
`, {
|
`, {
|
||||||
replacements: { start: range.start, end: range.end, limit, offset },
|
replacements: { ...replacements, limit, offset },
|
||||||
type: QueryTypes.SELECT
|
type: QueryTypes.SELECT
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -1050,7 +1068,8 @@ export class DashboardService {
|
|||||||
al.level_number,
|
al.level_number,
|
||||||
al.level_start_time,
|
al.level_start_time,
|
||||||
al.tat_hours,
|
al.tat_hours,
|
||||||
wf.priority
|
wf.priority,
|
||||||
|
wf.initiator_id
|
||||||
FROM approval_levels al
|
FROM approval_levels al
|
||||||
JOIN workflow_requests wf ON al.request_id = wf.request_id
|
JOIN workflow_requests wf ON al.request_id = wf.request_id
|
||||||
WHERE al.status IN ('PENDING', 'IN_PROGRESS')
|
WHERE al.status IN ('PENDING', 'IN_PROGRESS')
|
||||||
@ -1058,6 +1077,7 @@ export class DashboardService {
|
|||||||
AND wf.is_draft = false
|
AND wf.is_draft = false
|
||||||
AND al.level_start_time IS NOT NULL
|
AND al.level_start_time IS NOT NULL
|
||||||
AND al.tat_hours > 0
|
AND al.tat_hours > 0
|
||||||
|
AND wf.initiator_id != al.approver_id
|
||||||
ORDER BY al.request_id, al.level_number ASC
|
ORDER BY al.request_id, al.level_number ASC
|
||||||
)
|
)
|
||||||
SELECT
|
SELECT
|
||||||
@ -1155,9 +1175,14 @@ export class DashboardService {
|
|||||||
approverId: a.approver_id,
|
approverId: a.approver_id,
|
||||||
approverName: a.approver_name,
|
approverName: a.approver_name,
|
||||||
totalApproved: a.total_approved,
|
totalApproved: a.total_approved,
|
||||||
|
approvedCount: a.approved_count,
|
||||||
|
rejectedCount: a.rejected_count,
|
||||||
|
closedCount: a.closed_count,
|
||||||
tatCompliancePercent,
|
tatCompliancePercent,
|
||||||
avgResponseHours: parseFloat(a.avg_response_hours || 0),
|
avgResponseHours: parseFloat(a.avg_response_hours || 0),
|
||||||
pendingCount: pendingCountMap.get(a.approver_id) || 0
|
pendingCount: pendingCountMap.get(a.approver_id) || 0,
|
||||||
|
withinTatCount: a.within_tat_count,
|
||||||
|
breachedCount: a.breached_count
|
||||||
};
|
};
|
||||||
}),
|
}),
|
||||||
currentPage: page,
|
currentPage: page,
|
||||||
@ -2281,6 +2306,153 @@ export class DashboardService {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get single approver stats only (dedicated API for performance)
|
||||||
|
* Only respects date, priority, and SLA filters
|
||||||
|
*/
|
||||||
|
async getSingleApproverStats(
|
||||||
|
userId: string,
|
||||||
|
approverId: string,
|
||||||
|
dateRange?: string,
|
||||||
|
startDate?: string,
|
||||||
|
endDate?: string,
|
||||||
|
priority?: string,
|
||||||
|
slaCompliance?: string
|
||||||
|
) {
|
||||||
|
const user = await User.findByPk(userId);
|
||||||
|
const isAdmin = user?.hasManagementAccess() || false;
|
||||||
|
|
||||||
|
// Allow users to view their own performance, or admins to view any approver's performance
|
||||||
|
if (!isAdmin && approverId !== userId) {
|
||||||
|
throw new Error('Unauthorized: You can only view your own performance');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse date range if provided
|
||||||
|
let dateFilter = '';
|
||||||
|
const replacements: any = { approverId };
|
||||||
|
|
||||||
|
logger.info(`[Dashboard] Single approver stats - Received filters:`, {
|
||||||
|
dateRange,
|
||||||
|
startDate,
|
||||||
|
endDate,
|
||||||
|
priority,
|
||||||
|
slaCompliance
|
||||||
|
});
|
||||||
|
|
||||||
|
if (dateRange) {
|
||||||
|
const dateFilterObj = this.parseDateRange(dateRange, startDate, endDate);
|
||||||
|
dateFilter = `
|
||||||
|
AND (
|
||||||
|
(wf.submission_date IS NOT NULL AND wf.submission_date >= :dateStart AND wf.submission_date <= :dateEnd)
|
||||||
|
OR (al.action_date IS NOT NULL AND al.action_date >= :dateStart AND al.action_date <= :dateEnd)
|
||||||
|
)
|
||||||
|
`;
|
||||||
|
replacements.dateStart = dateFilterObj.start;
|
||||||
|
replacements.dateEnd = dateFilterObj.end;
|
||||||
|
logger.info(`[Dashboard] Date filter applied:`, {
|
||||||
|
start: dateFilterObj.start,
|
||||||
|
end: dateFilterObj.end
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
logger.info(`[Dashboard] No date filter applied - showing all data`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority filter
|
||||||
|
let priorityFilter = '';
|
||||||
|
if (priority && priority !== 'all') {
|
||||||
|
priorityFilter = `AND wf.priority = :priorityFilter`;
|
||||||
|
replacements.priorityFilter = priority.toUpperCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
// SLA Compliance filter
|
||||||
|
let slaFilter = '';
|
||||||
|
if (slaCompliance && slaCompliance !== 'all') {
|
||||||
|
if (slaCompliance === 'breached') {
|
||||||
|
slaFilter = `AND al.tat_breached = true`;
|
||||||
|
} else if (slaCompliance === 'compliant') {
|
||||||
|
slaFilter = `AND (al.tat_breached = false OR (al.tat_breached IS NULL AND al.elapsed_hours < al.tat_hours))`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate aggregated stats using approval_levels directly
|
||||||
|
// Count ALL approval levels assigned to this approver (like the All Requests pattern)
|
||||||
|
// TAT Compliance includes: completed + pending breached + levels from closed workflows
|
||||||
|
const statsQuery = `
|
||||||
|
SELECT
|
||||||
|
COUNT(DISTINCT al.level_id) as totalApproved,
|
||||||
|
SUM(CASE WHEN al.status = 'APPROVED' THEN 1 ELSE 0 END) as approvedCount,
|
||||||
|
SUM(CASE WHEN al.status = 'REJECTED' THEN 1 ELSE 0 END) as rejectedCount,
|
||||||
|
SUM(CASE WHEN al.status IN ('PENDING', 'IN_PROGRESS') THEN 1 ELSE 0 END) as pendingCount,
|
||||||
|
SUM(CASE
|
||||||
|
WHEN (al.status IN ('APPROVED', 'REJECTED') OR wf.status = 'CLOSED')
|
||||||
|
AND (al.tat_breached = false
|
||||||
|
OR (al.tat_breached IS NULL AND al.elapsed_hours IS NOT NULL AND al.elapsed_hours < al.tat_hours))
|
||||||
|
THEN 1 ELSE 0
|
||||||
|
END) as withinTatCount,
|
||||||
|
SUM(CASE
|
||||||
|
WHEN ((al.status IN ('APPROVED', 'REJECTED') OR wf.status = 'CLOSED') AND al.tat_breached = true)
|
||||||
|
OR (al.status IN ('PENDING', 'IN_PROGRESS') AND al.tat_breached = true)
|
||||||
|
THEN 1 ELSE 0
|
||||||
|
END) as breachedCount,
|
||||||
|
SUM(CASE
|
||||||
|
WHEN al.status IN ('PENDING', 'IN_PROGRESS')
|
||||||
|
AND al.tat_breached = true
|
||||||
|
THEN 1 ELSE 0
|
||||||
|
END) as pendingBreachedCount,
|
||||||
|
AVG(CASE
|
||||||
|
WHEN (al.status IN ('APPROVED', 'REJECTED') OR wf.status = 'CLOSED')
|
||||||
|
AND al.elapsed_hours IS NOT NULL
|
||||||
|
AND al.elapsed_hours >= 0
|
||||||
|
THEN al.elapsed_hours
|
||||||
|
ELSE NULL
|
||||||
|
END) as avgResponseHours,
|
||||||
|
SUM(CASE WHEN wf.status = 'CLOSED' THEN 1 ELSE 0 END) as closedCount
|
||||||
|
FROM approval_levels al
|
||||||
|
INNER JOIN workflow_requests wf ON al.request_id = wf.request_id
|
||||||
|
WHERE al.approver_id = :approverId
|
||||||
|
AND wf.is_draft = false
|
||||||
|
${dateFilter}
|
||||||
|
${priorityFilter}
|
||||||
|
${slaFilter}
|
||||||
|
`;
|
||||||
|
|
||||||
|
const [statsResult] = await sequelize.query(statsQuery, {
|
||||||
|
replacements,
|
||||||
|
type: QueryTypes.SELECT
|
||||||
|
});
|
||||||
|
|
||||||
|
const stats = statsResult as any;
|
||||||
|
|
||||||
|
// Database returns lowercase column names
|
||||||
|
// TAT Compliance calculation includes pending breached requests
|
||||||
|
// Total for compliance = completed + pending breached
|
||||||
|
const totalCompleted = (parseInt(stats.approvedcount) || 0) + (parseInt(stats.rejectedcount) || 0);
|
||||||
|
const pendingBreached = parseInt(stats.pendingbreachedcount) || 0;
|
||||||
|
const totalForCompliance = totalCompleted + pendingBreached;
|
||||||
|
const tatCompliancePercent = totalForCompliance > 0
|
||||||
|
? Math.round(((parseInt(stats.withintatcount) || 0) / totalForCompliance) * 100)
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
// Get approver name
|
||||||
|
const approver = await User.findByPk(approverId);
|
||||||
|
|
||||||
|
const approverStats = {
|
||||||
|
approverId,
|
||||||
|
approverName: approver ? `${approver.firstName} ${approver.lastName}` : 'Unknown',
|
||||||
|
totalApproved: parseInt(stats.totalapproved) || 0,
|
||||||
|
approvedCount: parseInt(stats.approvedcount) || 0,
|
||||||
|
rejectedCount: parseInt(stats.rejectedcount) || 0,
|
||||||
|
closedCount: parseInt(stats.closedcount) || 0,
|
||||||
|
pendingCount: parseInt(stats.pendingcount) || 0,
|
||||||
|
withinTatCount: parseInt(stats.withintatcount) || 0,
|
||||||
|
breachedCount: parseInt(stats.breachedcount) || 0,
|
||||||
|
tatCompliancePercent,
|
||||||
|
avgResponseHours: parseFloat(stats.avgresponsehours) || 0
|
||||||
|
};
|
||||||
|
|
||||||
|
return approverStats;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get requests filtered by approver ID with detailed filtering support
|
* Get requests filtered by approver ID with detailed filtering support
|
||||||
*/
|
*/
|
||||||
@ -2331,12 +2503,23 @@ export class DashboardService {
|
|||||||
replacements.dateEnd = dateFilterObj.end;
|
replacements.dateEnd = dateFilterObj.end;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status filter
|
// Status filter - Filter by the approver's action status, not overall workflow status
|
||||||
let statusFilter = '';
|
let statusFilter = '';
|
||||||
if (status && status !== 'all') {
|
if (status && status !== 'all') {
|
||||||
if (status === 'pending') {
|
if (status === 'pending') {
|
||||||
statusFilter = `AND wf.status IN ('PENDING', 'IN_PROGRESS')`; // IN_PROGRESS legacy support
|
// Show requests where this approver is the current approver AND their level is pending
|
||||||
|
statusFilter = `AND al.status IN ('PENDING', 'IN_PROGRESS')`;
|
||||||
|
} else if (status === 'approved') {
|
||||||
|
// Show requests this approver has approved (regardless of overall workflow status)
|
||||||
|
statusFilter = `AND al.status = 'APPROVED'`;
|
||||||
|
} else if (status === 'rejected') {
|
||||||
|
// Show requests this approver has rejected
|
||||||
|
statusFilter = `AND al.status = 'REJECTED'`;
|
||||||
|
} else if (status === 'closed') {
|
||||||
|
// Show requests that are fully closed
|
||||||
|
statusFilter = `AND wf.status = 'CLOSED'`;
|
||||||
} else {
|
} else {
|
||||||
|
// For other statuses, filter by workflow status
|
||||||
statusFilter = `AND wf.status = :statusFilter`;
|
statusFilter = `AND wf.status = :statusFilter`;
|
||||||
replacements.statusFilter = status.toUpperCase();
|
replacements.statusFilter = status.toUpperCase();
|
||||||
}
|
}
|
||||||
@ -2400,6 +2583,10 @@ export class DashboardService {
|
|||||||
INNER JOIN approval_levels al ON wf.request_id = al.request_id
|
INNER JOIN approval_levels al ON wf.request_id = al.request_id
|
||||||
WHERE al.approver_id = :approverId
|
WHERE al.approver_id = :approverId
|
||||||
AND wf.is_draft = false
|
AND wf.is_draft = false
|
||||||
|
AND (
|
||||||
|
al.status IN ('APPROVED', 'REJECTED')
|
||||||
|
OR al.level_number <= wf.current_level
|
||||||
|
)
|
||||||
${dateFilter}
|
${dateFilter}
|
||||||
${statusFilter}
|
${statusFilter}
|
||||||
${priorityFilter}
|
${priorityFilter}
|
||||||
@ -2452,6 +2639,10 @@ export class DashboardService {
|
|||||||
LEFT JOIN users u ON wf.initiator_id = u.user_id
|
LEFT JOIN users u ON wf.initiator_id = u.user_id
|
||||||
WHERE al.approver_id = :approverId
|
WHERE al.approver_id = :approverId
|
||||||
AND wf.is_draft = false
|
AND wf.is_draft = false
|
||||||
|
AND (
|
||||||
|
al.status IN ('APPROVED', 'REJECTED')
|
||||||
|
OR al.level_number <= wf.current_level
|
||||||
|
)
|
||||||
${dateFilter}
|
${dateFilter}
|
||||||
${statusFilter}
|
${statusFilter}
|
||||||
${priorityFilter}
|
${priorityFilter}
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import webpush from 'web-push';
|
import webpush from 'web-push';
|
||||||
import logger from '@utils/logger';
|
import logger, { logNotificationEvent } from '@utils/logger';
|
||||||
import { Subscription } from '@models/Subscription';
|
import { Subscription } from '@models/Subscription';
|
||||||
import { Notification } from '@models/Notification';
|
import { Notification } from '@models/Notification';
|
||||||
|
|
||||||
@ -57,6 +57,22 @@ class NotificationService {
|
|||||||
logger.info(`Subscription stored for user ${userId}. Total: ${list.length}`);
|
logger.info(`Subscription stored for user ${userId}. Total: ${list.length}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all subscriptions for a user
|
||||||
|
*/
|
||||||
|
async getUserSubscriptions(userId: string) {
|
||||||
|
try {
|
||||||
|
const subscriptions = await Subscription.findAll({
|
||||||
|
where: { userId },
|
||||||
|
attributes: ['subscriptionId', 'endpoint', 'userAgent', 'createdAt']
|
||||||
|
});
|
||||||
|
return subscriptions;
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[Notification] Failed to get subscriptions for user ${userId}:`, error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove expired/invalid subscription from database and memory cache
|
* Remove expired/invalid subscription from database and memory cache
|
||||||
*/
|
*/
|
||||||
@ -92,77 +108,123 @@ class NotificationService {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Send notification to users - saves to DB and sends via push/socket
|
* Send notification to users - saves to DB and sends via push/socket
|
||||||
|
* Respects user notification preferences
|
||||||
*/
|
*/
|
||||||
async sendToUsers(userIds: string[], payload: NotificationPayload) {
|
async sendToUsers(userIds: string[], payload: NotificationPayload) {
|
||||||
const message = JSON.stringify(payload);
|
const message = JSON.stringify(payload);
|
||||||
const sentVia: string[] = ['IN_APP']; // Always save to DB for in-app display
|
const { User } = require('@models/User');
|
||||||
|
|
||||||
for (const userId of userIds) {
|
for (const userId of userIds) {
|
||||||
try {
|
try {
|
||||||
// 1. Save notification to database for in-app display
|
// Fetch user preferences
|
||||||
const notification = await Notification.create({
|
const user = await User.findByPk(userId, {
|
||||||
userId,
|
attributes: [
|
||||||
requestId: payload.requestId,
|
'userId',
|
||||||
notificationType: payload.type || 'general',
|
'emailNotificationsEnabled',
|
||||||
title: payload.title,
|
'pushNotificationsEnabled',
|
||||||
message: payload.body,
|
'inAppNotificationsEnabled'
|
||||||
isRead: false,
|
]
|
||||||
priority: payload.priority || 'MEDIUM',
|
});
|
||||||
actionUrl: payload.url,
|
|
||||||
actionRequired: payload.actionRequired || false,
|
|
||||||
metadata: {
|
|
||||||
requestNumber: payload.requestNumber,
|
|
||||||
...payload.metadata
|
|
||||||
},
|
|
||||||
sentVia,
|
|
||||||
emailSent: false,
|
|
||||||
smsSent: false,
|
|
||||||
pushSent: false
|
|
||||||
} as any);
|
|
||||||
|
|
||||||
logger.info(`[Notification] Created in-app notification for user ${userId}: ${payload.title}`);
|
if (!user) {
|
||||||
|
logger.warn(`[Notification] User ${userId} not found, skipping notification`);
|
||||||
// 2. Emit real-time socket event for immediate delivery
|
continue;
|
||||||
try {
|
|
||||||
const { emitToUser } = require('../realtime/socket');
|
|
||||||
if (emitToUser) {
|
|
||||||
emitToUser(userId, 'notification:new', {
|
|
||||||
notification: notification.toJSON(),
|
|
||||||
...payload
|
|
||||||
});
|
|
||||||
logger.info(`[Notification] Emitted socket event to user ${userId}`);
|
|
||||||
}
|
|
||||||
} catch (socketError) {
|
|
||||||
logger.warn(`[Notification] Socket emit failed (not critical):`, socketError);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Send push notification (if user has subscriptions)
|
const sentVia: string[] = [];
|
||||||
let subs = this.userIdToSubscriptions.get(userId) || [];
|
|
||||||
// Load from DB if memory empty
|
|
||||||
if (subs.length === 0) {
|
|
||||||
try {
|
|
||||||
const rows = await Subscription.findAll({ where: { userId } });
|
|
||||||
subs = rows.map((r: any) => ({ endpoint: r.endpoint, keys: { p256dh: r.p256dh, auth: r.auth } }));
|
|
||||||
} catch {}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (subs.length > 0) {
|
// 1. Save notification to database for in-app display (if enabled)
|
||||||
for (const sub of subs) {
|
if (user.inAppNotificationsEnabled) {
|
||||||
try {
|
const notification = await Notification.create({
|
||||||
await webpush.sendNotification(sub, message);
|
userId,
|
||||||
await notification.update({ pushSent: true });
|
requestId: payload.requestId,
|
||||||
logger.info(`[Notification] Push sent to user ${userId}`);
|
notificationType: payload.type || 'general',
|
||||||
} catch (err: any) {
|
title: payload.title,
|
||||||
// Check if subscription is expired/invalid
|
message: payload.body,
|
||||||
if (this.isExpiredSubscriptionError(err)) {
|
isRead: false,
|
||||||
logger.warn(`[Notification] Expired subscription detected for user ${userId}, removing...`);
|
priority: payload.priority || 'MEDIUM',
|
||||||
await this.removeExpiredSubscription(userId, sub.endpoint);
|
actionUrl: payload.url,
|
||||||
} else {
|
actionRequired: payload.actionRequired || false,
|
||||||
logger.error(`[Notification] Failed to send push to user ${userId}:`, err);
|
metadata: {
|
||||||
|
requestNumber: payload.requestNumber,
|
||||||
|
...payload.metadata
|
||||||
|
},
|
||||||
|
sentVia: ['IN_APP'],
|
||||||
|
emailSent: false,
|
||||||
|
smsSent: false,
|
||||||
|
pushSent: false
|
||||||
|
} as any);
|
||||||
|
|
||||||
|
sentVia.push('IN_APP');
|
||||||
|
logger.info(`[Notification] Created in-app notification for user ${userId}: ${payload.title}`);
|
||||||
|
|
||||||
|
// 2. Emit real-time socket event for immediate delivery
|
||||||
|
try {
|
||||||
|
const { emitToUser } = require('../realtime/socket');
|
||||||
|
if (emitToUser) {
|
||||||
|
emitToUser(userId, 'notification:new', {
|
||||||
|
notification: notification.toJSON(),
|
||||||
|
...payload
|
||||||
|
});
|
||||||
|
logger.info(`[Notification] Emitted socket event to user ${userId}`);
|
||||||
|
}
|
||||||
|
} catch (socketError) {
|
||||||
|
logger.warn(`[Notification] Socket emit failed (not critical):`, socketError);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Send push notification (if enabled and user has subscriptions)
|
||||||
|
if (user.pushNotificationsEnabled) {
|
||||||
|
let subs = this.userIdToSubscriptions.get(userId) || [];
|
||||||
|
// Load from DB if memory empty
|
||||||
|
if (subs.length === 0) {
|
||||||
|
try {
|
||||||
|
const rows = await Subscription.findAll({ where: { userId } });
|
||||||
|
subs = rows.map((r: any) => ({ endpoint: r.endpoint, keys: { p256dh: r.p256dh, auth: r.auth } }));
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (subs.length > 0) {
|
||||||
|
for (const sub of subs) {
|
||||||
|
try {
|
||||||
|
await webpush.sendNotification(sub, message);
|
||||||
|
await notification.update({ pushSent: true });
|
||||||
|
sentVia.push('PUSH');
|
||||||
|
logNotificationEvent('sent', {
|
||||||
|
userId,
|
||||||
|
channel: 'push',
|
||||||
|
type: payload.type,
|
||||||
|
requestId: payload.requestId,
|
||||||
|
});
|
||||||
|
} catch (err: any) {
|
||||||
|
// Check if subscription is expired/invalid
|
||||||
|
if (this.isExpiredSubscriptionError(err)) {
|
||||||
|
logger.warn(`[Notification] Expired subscription detected for user ${userId}, removing...`);
|
||||||
|
await this.removeExpiredSubscription(userId, sub.endpoint);
|
||||||
|
} else {
|
||||||
|
logNotificationEvent('failed', {
|
||||||
|
userId,
|
||||||
|
channel: 'push',
|
||||||
|
type: payload.type,
|
||||||
|
requestId: payload.requestId,
|
||||||
|
error: err,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
logger.info(`[Notification] Push notifications disabled for user ${userId}, skipping push`);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
logger.info(`[Notification] In-app notifications disabled for user ${userId}, skipping notification`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Email notifications (when implemented)
|
||||||
|
// if (user.emailNotificationsEnabled) {
|
||||||
|
// // Send email notification
|
||||||
|
// sentVia.push('EMAIL');
|
||||||
|
// }
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[Notification] Failed to create notification for user ${userId}:`, error);
|
logger.error(`[Notification] Failed to create notification for user ${userId}:`, error);
|
||||||
// Continue to next user even if one fails
|
// Continue to next user even if one fails
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import { calculateElapsedWorkingHours } from '@utils/tatTimeUtils';
|
|||||||
import { notificationService } from './notification.service';
|
import { notificationService } from './notification.service';
|
||||||
import { activityService } from './activity.service';
|
import { activityService } from './activity.service';
|
||||||
import dayjs from 'dayjs';
|
import dayjs from 'dayjs';
|
||||||
|
import { emitToRequestRoom } from '../realtime/socket';
|
||||||
|
|
||||||
export class PauseService {
|
export class PauseService {
|
||||||
/**
|
/**
|
||||||
@ -70,9 +71,12 @@ export class PauseService {
|
|||||||
throw new Error('No active approval level found to pause');
|
throw new Error('No active approval level found to pause');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify user is the approver for this level
|
// Verify user is either the approver for this level OR the initiator
|
||||||
if ((level as any).approverId !== userId) {
|
const isApprover = (level as any).approverId === userId;
|
||||||
throw new Error('Only the assigned approver can pause this workflow');
|
const isInitiator = (workflow as any).initiatorId === userId;
|
||||||
|
|
||||||
|
if (!isApprover && !isInitiator) {
|
||||||
|
throw new Error('Only the assigned approver or the initiator can pause this workflow');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if level is already paused
|
// Check if level is already paused
|
||||||
@ -167,7 +171,9 @@ export class PauseService {
|
|||||||
const requestNumber = (workflow as any).requestNumber;
|
const requestNumber = (workflow as any).requestNumber;
|
||||||
const title = (workflow as any).title;
|
const title = (workflow as any).title;
|
||||||
|
|
||||||
// Notify initiator
|
// Notify initiator only if someone else (approver) paused the request
|
||||||
|
// Skip notification if initiator paused their own request
|
||||||
|
if (!isInitiator) {
|
||||||
await notificationService.sendToUsers([(workflow as any).initiatorId], {
|
await notificationService.sendToUsers([(workflow as any).initiatorId], {
|
||||||
title: 'Workflow Paused',
|
title: 'Workflow Paused',
|
||||||
body: `Your request "${title}" has been paused by ${userName}. Reason: ${reason}. Will resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
body: `Your request "${title}" has been paused by ${userName}. Reason: ${reason}. Will resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||||
@ -178,8 +184,9 @@ export class PauseService {
|
|||||||
priority: 'HIGH',
|
priority: 'HIGH',
|
||||||
actionRequired: false
|
actionRequired: false
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Notify approver (self)
|
// Notify the user who paused (confirmation)
|
||||||
await notificationService.sendToUsers([userId], {
|
await notificationService.sendToUsers([userId], {
|
||||||
title: 'Workflow Paused Successfully',
|
title: 'Workflow Paused Successfully',
|
||||||
body: `You have paused request "${title}". It will automatically resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
body: `You have paused request "${title}". It will automatically resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||||
@ -191,6 +198,22 @@ export class PauseService {
|
|||||||
actionRequired: false
|
actionRequired: false
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// If initiator paused, notify the current approver
|
||||||
|
if (isInitiator && (level as any).approverId) {
|
||||||
|
const approver = await User.findByPk((level as any).approverId);
|
||||||
|
const approverUserId = (level as any).approverId;
|
||||||
|
await notificationService.sendToUsers([approverUserId], {
|
||||||
|
title: 'Workflow Paused by Initiator',
|
||||||
|
body: `Request "${title}" has been paused by the initiator (${userName}). Reason: ${reason}. Will resume on ${dayjs(resumeDate).format('MMM DD, YYYY')}.`,
|
||||||
|
requestId,
|
||||||
|
requestNumber,
|
||||||
|
url: `/request/${requestNumber}`,
|
||||||
|
type: 'workflow_paused',
|
||||||
|
priority: 'HIGH',
|
||||||
|
actionRequired: false
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Log activity
|
// Log activity
|
||||||
await activityService.log({
|
await activityService.log({
|
||||||
requestId,
|
requestId,
|
||||||
@ -208,6 +231,15 @@ export class PauseService {
|
|||||||
|
|
||||||
logger.info(`[Pause] Workflow ${requestId} paused at level ${(level as any).levelNumber} by ${userId}`);
|
logger.info(`[Pause] Workflow ${requestId} paused at level ${(level as any).levelNumber} by ${userId}`);
|
||||||
|
|
||||||
|
// Emit real-time update to all users viewing this request
|
||||||
|
emitToRequestRoom(requestId, 'request:updated', {
|
||||||
|
requestId,
|
||||||
|
requestNumber: (workflow as any).requestNumber,
|
||||||
|
action: 'PAUSE',
|
||||||
|
levelNumber: (level as any).levelNumber,
|
||||||
|
timestamp: now.toISOString()
|
||||||
|
});
|
||||||
|
|
||||||
return { workflow, level };
|
return { workflow, level };
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
logger.error(`[Pause] Failed to pause workflow:`, error);
|
logger.error(`[Pause] Failed to pause workflow:`, error);
|
||||||
@ -219,8 +251,9 @@ export class PauseService {
|
|||||||
* Resume a paused workflow
|
* Resume a paused workflow
|
||||||
* @param requestId - The workflow request ID
|
* @param requestId - The workflow request ID
|
||||||
* @param userId - The user ID who is resuming (optional, for manual resume)
|
* @param userId - The user ID who is resuming (optional, for manual resume)
|
||||||
|
* @param notes - Optional notes for the resume action
|
||||||
*/
|
*/
|
||||||
async resumeWorkflow(requestId: string, userId?: string): Promise<{ workflow: WorkflowRequest; level: ApprovalLevel | null }> {
|
async resumeWorkflow(requestId: string, userId?: string, notes?: string): Promise<{ workflow: WorkflowRequest; level: ApprovalLevel | null }> {
|
||||||
try {
|
try {
|
||||||
const now = new Date();
|
const now = new Date();
|
||||||
|
|
||||||
@ -249,14 +282,13 @@ export class PauseService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify user has permission (if manual resume)
|
// Verify user has permission (if manual resume)
|
||||||
// Note: Initiators cannot resume directly - they must use retrigger to request approver to resume
|
// Both initiator and current approver can resume the workflow
|
||||||
// Exception: When skipping approver (requirement 3.7), initiator can cancel pause
|
|
||||||
if (userId) {
|
if (userId) {
|
||||||
const pausedBy = (workflow as any).pausedBy;
|
const isApprover = (level as any).approverId === userId;
|
||||||
if (pausedBy !== userId) {
|
const isInitiator = (workflow as any).initiatorId === userId;
|
||||||
// Only the approver who paused can resume directly
|
|
||||||
// Initiators should use retrigger to request resume (requirement 3.5)
|
if (!isApprover && !isInitiator) {
|
||||||
throw new Error('Only the approver who paused this workflow can resume it. Initiators should use the retrigger option to request the approver to resume.');
|
throw new Error('Only the assigned approver or the initiator can resume this workflow');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,9 +377,15 @@ export class PauseService {
|
|||||||
|
|
||||||
const requestNumber = (workflow as any).requestNumber;
|
const requestNumber = (workflow as any).requestNumber;
|
||||||
const title = (workflow as any).title;
|
const title = (workflow as any).title;
|
||||||
|
const initiatorId = (workflow as any).initiatorId;
|
||||||
|
const approverId = (level as any).approverId;
|
||||||
|
const isResumedByInitiator = userId === initiatorId;
|
||||||
|
const isResumedByApprover = userId === approverId;
|
||||||
|
|
||||||
// Notify initiator
|
// Notify initiator only if someone else resumed (or auto-resume)
|
||||||
await notificationService.sendToUsers([(workflow as any).initiatorId], {
|
// Skip if initiator resumed their own request
|
||||||
|
if (!isResumedByInitiator) {
|
||||||
|
await notificationService.sendToUsers([initiatorId], {
|
||||||
title: 'Workflow Resumed',
|
title: 'Workflow Resumed',
|
||||||
body: `Your request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}.`,
|
body: `Your request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}.`,
|
||||||
requestId,
|
requestId,
|
||||||
@ -357,9 +395,12 @@ export class PauseService {
|
|||||||
priority: 'HIGH',
|
priority: 'HIGH',
|
||||||
actionRequired: false
|
actionRequired: false
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Notify approver
|
// Notify approver only if someone else resumed (or auto-resume)
|
||||||
await notificationService.sendToUsers([(level as any).approverId], {
|
// Skip if approver resumed the request themselves
|
||||||
|
if (!isResumedByApprover && approverId) {
|
||||||
|
await notificationService.sendToUsers([approverId], {
|
||||||
title: 'Workflow Resumed',
|
title: 'Workflow Resumed',
|
||||||
body: `Request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}. Please continue with your review.`,
|
body: `Request "${title}" has been resumed ${userId ? `by ${resumeUserName}` : 'automatically'}. Please continue with your review.`,
|
||||||
requestId,
|
requestId,
|
||||||
@ -369,24 +410,53 @@ export class PauseService {
|
|||||||
priority: 'HIGH',
|
priority: 'HIGH',
|
||||||
actionRequired: true
|
actionRequired: true
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Log activity
|
// Send confirmation to the user who resumed (if manual resume)
|
||||||
|
if (userId) {
|
||||||
|
await notificationService.sendToUsers([userId], {
|
||||||
|
title: 'Workflow Resumed Successfully',
|
||||||
|
body: `You have resumed request "${title}". ${isResumedByApprover ? 'Please continue with your review.' : ''}`,
|
||||||
|
requestId,
|
||||||
|
requestNumber,
|
||||||
|
url: `/request/${requestNumber}`,
|
||||||
|
type: 'workflow_resumed',
|
||||||
|
priority: 'MEDIUM',
|
||||||
|
actionRequired: isResumedByApprover
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log activity with notes
|
||||||
|
const resumeDetails = notes
|
||||||
|
? `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}. Notes: ${notes}`
|
||||||
|
: `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}.`;
|
||||||
|
|
||||||
await activityService.log({
|
await activityService.log({
|
||||||
requestId,
|
requestId,
|
||||||
type: 'resumed',
|
type: 'resumed',
|
||||||
user: userId ? { userId, name: resumeUserName } : undefined,
|
user: userId ? { userId, name: resumeUserName } : undefined,
|
||||||
timestamp: now.toISOString(),
|
timestamp: now.toISOString(),
|
||||||
action: 'Workflow Resumed',
|
action: 'Workflow Resumed',
|
||||||
details: `Workflow resumed ${userId ? `by ${resumeUserName}` : 'automatically'} at level ${(level as any).levelNumber}.`,
|
details: resumeDetails,
|
||||||
metadata: {
|
metadata: {
|
||||||
levelId: (level as any).levelId,
|
levelId: (level as any).levelId,
|
||||||
levelNumber: (level as any).levelNumber,
|
levelNumber: (level as any).levelNumber,
|
||||||
wasAutoResume: !userId
|
wasAutoResume: !userId,
|
||||||
|
notes: notes || null
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info(`[Pause] Workflow ${requestId} resumed ${userId ? `by ${userId}` : 'automatically'}`);
|
logger.info(`[Pause] Workflow ${requestId} resumed ${userId ? `by ${userId}` : 'automatically'}`);
|
||||||
|
|
||||||
|
// Emit real-time update to all users viewing this request
|
||||||
|
emitToRequestRoom(requestId, 'request:updated', {
|
||||||
|
requestId,
|
||||||
|
requestNumber: (workflow as any).requestNumber,
|
||||||
|
action: 'RESUME',
|
||||||
|
levelNumber: (level as any).levelNumber,
|
||||||
|
timestamp: now.toISOString()
|
||||||
|
});
|
||||||
|
|
||||||
return { workflow, level };
|
return { workflow, level };
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
logger.error(`[Pause] Failed to resume workflow:`, error);
|
logger.error(`[Pause] Failed to resume workflow:`, error);
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
import { RequestSummary, SharedSummary, WorkflowRequest, ApprovalLevel, User, ConclusionRemark } from '@models/index';
|
import { RequestSummary, SharedSummary, WorkflowRequest, ApprovalLevel, User, ConclusionRemark, Participant } from '@models/index';
|
||||||
import '@models/index'; // Ensure associations are loaded
|
import '@models/index'; // Ensure associations are loaded
|
||||||
import { Op } from 'sequelize';
|
import { Op } from 'sequelize';
|
||||||
import logger from '@utils/logger';
|
import logger from '@utils/logger';
|
||||||
@ -8,9 +8,27 @@ export class SummaryService {
|
|||||||
/**
|
/**
|
||||||
* Create a summary for a closed request
|
* Create a summary for a closed request
|
||||||
* Pulls data from workflow_requests, approval_levels, and conclusion_remarks
|
* Pulls data from workflow_requests, approval_levels, and conclusion_remarks
|
||||||
|
*
|
||||||
|
* Access Control:
|
||||||
|
* - 'system': Allows system-level auto-generation on final approval
|
||||||
|
* - initiator: The request initiator can create/regenerate
|
||||||
|
* - admin/management: Admin or management role users can create/regenerate via API
|
||||||
|
*
|
||||||
|
* @param requestId - The workflow request ID
|
||||||
|
* @param userId - The user ID requesting the summary (or 'system' for auto-generation)
|
||||||
|
* @param options - Optional parameters
|
||||||
|
* @param options.isSystemGeneration - Set to true for system-level auto-generation
|
||||||
|
* @param options.userRole - The role of the user (for admin access check)
|
||||||
|
* @param options.regenerate - Set to true to regenerate (delete existing and create new)
|
||||||
*/
|
*/
|
||||||
async createSummary(requestId: string, initiatorId: string): Promise<RequestSummary> {
|
async createSummary(
|
||||||
|
requestId: string,
|
||||||
|
userId: string,
|
||||||
|
options?: { isSystemGeneration?: boolean; userRole?: string; regenerate?: boolean }
|
||||||
|
): Promise<RequestSummary> {
|
||||||
try {
|
try {
|
||||||
|
const { isSystemGeneration = false, userRole, regenerate = false } = options || {};
|
||||||
|
|
||||||
// Check if request exists and is closed
|
// Check if request exists and is closed
|
||||||
const workflow = await WorkflowRequest.findByPk(requestId, {
|
const workflow = await WorkflowRequest.findByPk(requestId, {
|
||||||
include: [
|
include: [
|
||||||
@ -22,29 +40,36 @@ export class SummaryService {
|
|||||||
throw new Error('Workflow request not found');
|
throw new Error('Workflow request not found');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify request is closed
|
// Verify request is closed (APPROVED, REJECTED, or CLOSED)
|
||||||
const status = (workflow as any).status?.toUpperCase();
|
const status = (workflow as any).status?.toUpperCase();
|
||||||
if (status !== 'APPROVED' && status !== 'REJECTED' && status !== 'CLOSED') {
|
if (status !== 'APPROVED' && status !== 'REJECTED' && status !== 'CLOSED') {
|
||||||
throw new Error('Request must be closed (APPROVED, REJECTED, or CLOSED) before creating summary');
|
throw new Error('Request must be closed (APPROVED, REJECTED, or CLOSED) before creating summary');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify initiator owns the request
|
const initiatorId = (workflow as any).initiatorId;
|
||||||
if ((workflow as any).initiatorId !== initiatorId) {
|
const isInitiator = initiatorId === userId;
|
||||||
throw new Error('Only the initiator can create a summary for this request');
|
const isAdmin = userRole && ['admin', 'super_admin', 'management'].includes(userRole.toLowerCase());
|
||||||
|
|
||||||
|
// Access control: Allow system generation, initiator, or admin users
|
||||||
|
if (!isSystemGeneration && !isInitiator && !isAdmin) {
|
||||||
|
throw new Error('Only the initiator or admin users can create a summary for this request');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if summary already exists - return it if it does (idempotent behavior)
|
// Check if summary already exists
|
||||||
const existingSummary = await RequestSummary.findOne({
|
const existingSummary = await RequestSummary.findOne({
|
||||||
where: { requestId }
|
where: { requestId }
|
||||||
});
|
});
|
||||||
|
|
||||||
if (existingSummary) {
|
if (existingSummary) {
|
||||||
// Verify the existing summary belongs to the current initiator
|
// If regenerate is requested by initiator or admin, delete existing and create new
|
||||||
if ((existingSummary as any).initiatorId !== initiatorId) {
|
if (regenerate && (isInitiator || isAdmin)) {
|
||||||
throw new Error('Only the initiator can create a summary for this request');
|
logger.info(`[Summary] Regenerating summary for request ${requestId}`);
|
||||||
|
await existingSummary.destroy();
|
||||||
|
} else {
|
||||||
|
// Return existing summary (idempotent behavior)
|
||||||
|
logger.info(`Summary already exists for request ${requestId}, returning existing summary`);
|
||||||
|
return existingSummary as RequestSummary;
|
||||||
}
|
}
|
||||||
logger.info(`Summary already exists for request ${requestId}, returning existing summary`);
|
|
||||||
return existingSummary as RequestSummary;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get conclusion remarks
|
// Get conclusion remarks
|
||||||
@ -81,10 +106,10 @@ export class SummaryService {
|
|||||||
isAiGenerated = false;
|
isAiGenerated = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create summary
|
// Create summary - always use the actual initiator from the workflow
|
||||||
const summary = await RequestSummary.create({
|
const summary = await RequestSummary.create({
|
||||||
requestId,
|
requestId,
|
||||||
initiatorId,
|
initiatorId: initiatorId, // Use workflow's initiator, not the requesting user
|
||||||
title: (workflow as any).title || '',
|
title: (workflow as any).title || '',
|
||||||
description: (workflow as any).description || null,
|
description: (workflow as any).description || null,
|
||||||
closingRemarks,
|
closingRemarks,
|
||||||
@ -92,7 +117,8 @@ export class SummaryService {
|
|||||||
conclusionId
|
conclusionId
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info(`[Summary] Created summary ${(summary as any).summaryId} for request ${requestId}`);
|
const generationType = isSystemGeneration ? 'system' : (isAdmin ? 'admin' : 'initiator');
|
||||||
|
logger.info(`[Summary] Created summary ${(summary as any).summaryId} for request ${requestId} (generated by: ${generationType})`);
|
||||||
return summary;
|
return summary;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[Summary] Failed to create summary for request ${requestId}:`, error);
|
logger.error(`[Summary] Failed to create summary for request ${requestId}:`, error);
|
||||||
@ -216,15 +242,50 @@ export class SummaryService {
|
|||||||
const initiator = (request as any).initiator || {};
|
const initiator = (request as any).initiator || {};
|
||||||
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
||||||
|
|
||||||
|
// Get conclusion remark if available
|
||||||
|
let conclusionRemark = (summary as any).ConclusionRemark || (summary as any).conclusionRemark;
|
||||||
|
|
||||||
|
// If not loaded and we have conclusionId, fetch by conclusionId
|
||||||
|
if (!conclusionRemark && (summary as any).conclusionId) {
|
||||||
|
conclusionRemark = await ConclusionRemark.findByPk((summary as any).conclusionId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If still not found, fetch by requestId (summary may have been created before conclusion)
|
||||||
|
if (!conclusionRemark) {
|
||||||
|
conclusionRemark = await ConclusionRemark.findOne({
|
||||||
|
where: { requestId: (request as any).requestId }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine effective final remark:
|
||||||
|
// - If user edited: use finalRemark
|
||||||
|
// - If user closed without editing: use aiGeneratedRemark (becomes final)
|
||||||
|
// - Otherwise: use closingRemarks from summary snapshot
|
||||||
|
const effectiveFinalRemark = conclusionRemark?.finalRemark ||
|
||||||
|
conclusionRemark?.aiGeneratedRemark ||
|
||||||
|
(summary as any).closingRemarks ||
|
||||||
|
'—';
|
||||||
|
|
||||||
|
logger.info(`[Summary] SharedSummary ${sharedSummaryId}: Effective final remark length: ${effectiveFinalRemark?.length || 0} chars (isEdited: ${conclusionRemark?.isEdited}, hasAI: ${!!conclusionRemark?.aiGeneratedRemark}, hasFinal: ${!!conclusionRemark?.finalRemark})`);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
summaryId: (summary as any).summaryId,
|
summaryId: (summary as any).summaryId,
|
||||||
requestId: (request as any).requestId,
|
requestId: (request as any).requestId,
|
||||||
requestNumber: (request as any).requestNumber || 'N/A',
|
requestNumber: (request as any).requestNumber || 'N/A',
|
||||||
title: (summary as any).title || (request as any).title || '',
|
title: (summary as any).title || (request as any).title || '',
|
||||||
description: (summary as any).description || (request as any).description || '',
|
description: (summary as any).description || (request as any).description || '',
|
||||||
closingRemarks: (summary as any).closingRemarks || '—',
|
closingRemarks: effectiveFinalRemark, // ✅ Effective final remark (edited or AI)
|
||||||
isAiGenerated: (summary as any).isAiGenerated || false,
|
isAiGenerated: (summary as any).isAiGenerated || false,
|
||||||
createdAt: (summary as any).createdAt,
|
createdAt: (summary as any).createdAt,
|
||||||
|
// Include conclusion remark data for detailed view
|
||||||
|
conclusionRemark: conclusionRemark ? {
|
||||||
|
aiGeneratedRemark: conclusionRemark.aiGeneratedRemark,
|
||||||
|
finalRemark: conclusionRemark.finalRemark,
|
||||||
|
effectiveFinalRemark: effectiveFinalRemark, // ✅ Computed field for convenience
|
||||||
|
isEdited: conclusionRemark.isEdited,
|
||||||
|
generatedAt: conclusionRemark.generatedAt,
|
||||||
|
finalizedAt: conclusionRemark.finalizedAt
|
||||||
|
} : null,
|
||||||
initiator: {
|
initiator: {
|
||||||
name: initiator.displayName || 'Unknown',
|
name: initiator.displayName || 'Unknown',
|
||||||
designation: initiator.designation || 'N/A',
|
designation: initiator.designation || 'N/A',
|
||||||
@ -239,7 +300,8 @@ export class SummaryService {
|
|||||||
priority: (request as any).priority || 'STANDARD',
|
priority: (request as any).priority || 'STANDARD',
|
||||||
status: (request as any).status || 'CLOSED',
|
status: (request as any).status || 'CLOSED',
|
||||||
submissionDate: (request as any).submissionDate,
|
submissionDate: (request as any).submissionDate,
|
||||||
closureDate: (request as any).closureDate
|
closureDate: (request as any).closureDate,
|
||||||
|
conclusionRemark: effectiveFinalRemark // ✅ Use effective final remark
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@ -262,8 +324,20 @@ export class SummaryService {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check access: user must be initiator or have been shared with
|
// Check access: initiator, participants, management, or explicitly shared users
|
||||||
const isInitiator = (summary as any).initiatorId === userId;
|
const isInitiator = (summary as any).initiatorId === userId;
|
||||||
|
|
||||||
|
// Check if user is a participant (approver or spectator)
|
||||||
|
const isParticipant = await Participant.findOne({
|
||||||
|
where: { requestId, userId }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if user has management/admin role
|
||||||
|
const currentUser = await User.findByPk(userId);
|
||||||
|
const userRole = (currentUser as any)?.role?.toUpperCase();
|
||||||
|
const isManagement = userRole && ['ADMIN', 'SUPER_ADMIN', 'MANAGEMENT'].includes(userRole);
|
||||||
|
|
||||||
|
// Check if explicitly shared
|
||||||
const isShared = await SharedSummary.findOne({
|
const isShared = await SharedSummary.findOne({
|
||||||
where: {
|
where: {
|
||||||
summaryId: (summary as any).summaryId,
|
summaryId: (summary as any).summaryId,
|
||||||
@ -271,7 +345,7 @@ export class SummaryService {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!isInitiator && !isShared) {
|
if (!isInitiator && !isParticipant && !isManagement && !isShared) {
|
||||||
return null; // No access, return null instead of throwing error
|
return null; // No access, return null instead of throwing error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,8 +395,23 @@ export class SummaryService {
|
|||||||
throw new Error('Associated workflow request not found');
|
throw new Error('Associated workflow request not found');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check access: user must be initiator or have been shared with
|
// Check access: initiator, participants, management, or explicitly shared users
|
||||||
const isInitiator = (summary as any).initiatorId === userId;
|
const isInitiator = (summary as any).initiatorId === userId;
|
||||||
|
|
||||||
|
// Check if user is a participant (approver or spectator) in the request
|
||||||
|
const isParticipant = await Participant.findOne({
|
||||||
|
where: {
|
||||||
|
requestId: (request as any).requestId,
|
||||||
|
userId
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if user has management/admin role
|
||||||
|
const currentUser = await User.findByPk(userId);
|
||||||
|
const userRole = (currentUser as any)?.role?.toUpperCase();
|
||||||
|
const isManagement = userRole && ['ADMIN', 'SUPER_ADMIN', 'MANAGEMENT'].includes(userRole);
|
||||||
|
|
||||||
|
// Check if explicitly shared
|
||||||
const isShared = await SharedSummary.findOne({
|
const isShared = await SharedSummary.findOne({
|
||||||
where: {
|
where: {
|
||||||
summaryId,
|
summaryId,
|
||||||
@ -330,7 +419,7 @@ export class SummaryService {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!isInitiator && !isShared) {
|
if (!isInitiator && !isParticipant && !isManagement && !isShared) {
|
||||||
throw new Error('Access denied: You do not have permission to view this summary');
|
throw new Error('Access denied: You do not have permission to view this summary');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,15 +478,50 @@ export class SummaryService {
|
|||||||
const initiator = (request as any).initiator || {};
|
const initiator = (request as any).initiator || {};
|
||||||
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
const initiatorTimestamp = (request as any).submissionDate || (request as any).createdAt;
|
||||||
|
|
||||||
|
// Get conclusion remark if available
|
||||||
|
let conclusionRemark = (summary as any).ConclusionRemark || (summary as any).conclusionRemark;
|
||||||
|
|
||||||
|
// If not loaded and we have conclusionId, fetch by conclusionId
|
||||||
|
if (!conclusionRemark && (summary as any).conclusionId) {
|
||||||
|
conclusionRemark = await ConclusionRemark.findByPk((summary as any).conclusionId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If still not found, fetch by requestId (summary may have been created before conclusion)
|
||||||
|
if (!conclusionRemark) {
|
||||||
|
conclusionRemark = await ConclusionRemark.findOne({
|
||||||
|
where: { requestId: (request as any).requestId }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine effective final remark:
|
||||||
|
// - If user edited: use finalRemark
|
||||||
|
// - If user closed without editing: use aiGeneratedRemark (becomes final)
|
||||||
|
// - Otherwise: use closingRemarks from summary snapshot
|
||||||
|
const effectiveFinalRemark = conclusionRemark?.finalRemark ||
|
||||||
|
conclusionRemark?.aiGeneratedRemark ||
|
||||||
|
(summary as any).closingRemarks ||
|
||||||
|
'—';
|
||||||
|
|
||||||
|
logger.info(`[Summary] Summary ${summaryId}: Effective final remark length: ${effectiveFinalRemark?.length || 0} chars (isEdited: ${conclusionRemark?.isEdited}, hasAI: ${!!conclusionRemark?.aiGeneratedRemark}, hasFinal: ${!!conclusionRemark?.finalRemark})`);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
summaryId: (summary as any).summaryId,
|
summaryId: (summary as any).summaryId,
|
||||||
requestId: (request as any).requestId,
|
requestId: (request as any).requestId,
|
||||||
requestNumber: (request as any).requestNumber || 'N/A',
|
requestNumber: (request as any).requestNumber || 'N/A',
|
||||||
title: (summary as any).title || (request as any).title || '',
|
title: (summary as any).title || (request as any).title || '',
|
||||||
description: (summary as any).description || (request as any).description || '',
|
description: (summary as any).description || (request as any).description || '',
|
||||||
closingRemarks: (summary as any).closingRemarks || '—',
|
closingRemarks: effectiveFinalRemark, // ✅ Effective final remark (edited or AI)
|
||||||
isAiGenerated: (summary as any).isAiGenerated || false,
|
isAiGenerated: (summary as any).isAiGenerated || false,
|
||||||
createdAt: (summary as any).createdAt,
|
createdAt: (summary as any).createdAt,
|
||||||
|
// Include conclusion remark data for detailed view
|
||||||
|
conclusionRemark: conclusionRemark ? {
|
||||||
|
aiGeneratedRemark: conclusionRemark.aiGeneratedRemark,
|
||||||
|
finalRemark: conclusionRemark.finalRemark,
|
||||||
|
effectiveFinalRemark: effectiveFinalRemark, // ✅ Computed field: finalRemark || aiGeneratedRemark
|
||||||
|
isEdited: conclusionRemark.isEdited,
|
||||||
|
generatedAt: conclusionRemark.generatedAt,
|
||||||
|
finalizedAt: conclusionRemark.finalizedAt
|
||||||
|
} : null,
|
||||||
initiator: {
|
initiator: {
|
||||||
name: initiator.displayName || 'Unknown',
|
name: initiator.displayName || 'Unknown',
|
||||||
designation: initiator.designation || 'N/A',
|
designation: initiator.designation || 'N/A',
|
||||||
@ -412,7 +536,8 @@ export class SummaryService {
|
|||||||
priority: (request as any).priority || 'STANDARD',
|
priority: (request as any).priority || 'STANDARD',
|
||||||
status: (request as any).status || 'CLOSED',
|
status: (request as any).status || 'CLOSED',
|
||||||
submissionDate: (request as any).submissionDate,
|
submissionDate: (request as any).submissionDate,
|
||||||
closureDate: (request as any).closureDate
|
closureDate: (request as any).closureDate,
|
||||||
|
conclusionRemark: effectiveFinalRemark // ✅ Use effective final remark
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@ -2,7 +2,7 @@ import { tatQueue } from '../queues/tatQueue';
|
|||||||
import { calculateDelay, addWorkingHours, addWorkingHoursExpress } from '@utils/tatTimeUtils';
|
import { calculateDelay, addWorkingHours, addWorkingHoursExpress } from '@utils/tatTimeUtils';
|
||||||
import { getTatThresholds } from './configReader.service';
|
import { getTatThresholds } from './configReader.service';
|
||||||
import dayjs from 'dayjs';
|
import dayjs from 'dayjs';
|
||||||
import logger from '@utils/logger';
|
import logger, { logTATEvent } from '@utils/logger';
|
||||||
import { Priority } from '../types/common.types';
|
import { Priority } from '../types/common.types';
|
||||||
|
|
||||||
export class TatSchedulerService {
|
export class TatSchedulerService {
|
||||||
@ -140,7 +140,12 @@ export class TatSchedulerService {
|
|||||||
jobIndex++;
|
jobIndex++;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info(`[TAT Scheduler] ✅ TAT jobs scheduled for request ${requestId}`);
|
logTATEvent('warning', requestId, {
|
||||||
|
level: parseInt(levelId.split('-').pop() || '1'),
|
||||||
|
tatHours: tatDurationHours,
|
||||||
|
priority,
|
||||||
|
message: 'TAT jobs scheduled',
|
||||||
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[TAT Scheduler] Failed to schedule TAT jobs:`, error);
|
logger.error(`[TAT Scheduler] Failed to schedule TAT jobs:`, error);
|
||||||
throw error;
|
throw error;
|
||||||
|
|||||||
@ -220,61 +220,134 @@ export class UserService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch user from Okta by email
|
||||||
|
*/
|
||||||
|
async fetchUserFromOktaByEmail(email: string): Promise<OktaUser | null> {
|
||||||
|
try {
|
||||||
|
const oktaDomain = process.env.OKTA_DOMAIN;
|
||||||
|
const oktaApiToken = process.env.OKTA_API_TOKEN;
|
||||||
|
|
||||||
|
if (!oktaDomain || !oktaApiToken) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search Okta users by email (exact match)
|
||||||
|
const response = await axios.get(`${oktaDomain}/api/v1/users`, {
|
||||||
|
params: { search: `profile.email eq "${email}"`, limit: 1 },
|
||||||
|
headers: {
|
||||||
|
'Authorization': `SSWS ${oktaApiToken}`,
|
||||||
|
'Accept': 'application/json'
|
||||||
|
},
|
||||||
|
timeout: 5000
|
||||||
|
});
|
||||||
|
|
||||||
|
const users: OktaUser[] = response.data || [];
|
||||||
|
return users.length > 0 ? users[0] : null;
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error(`Failed to fetch user from Okta by email ${email}:`, error.message);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensure user exists in database (create if not exists)
|
* Ensure user exists in database (create if not exists)
|
||||||
* Used when tagging users from Okta search results
|
* Used when tagging users from Okta search results or when only email is provided
|
||||||
|
*
|
||||||
|
* @param oktaUserData - Can be just { email } or full user data
|
||||||
*/
|
*/
|
||||||
async ensureUserExists(oktaUserData: {
|
async ensureUserExists(oktaUserData: {
|
||||||
userId: string;
|
userId?: string;
|
||||||
email: string;
|
email: string;
|
||||||
displayName?: string;
|
displayName?: string;
|
||||||
firstName?: string;
|
firstName?: string;
|
||||||
lastName?: string;
|
lastName?: string;
|
||||||
department?: string;
|
department?: string;
|
||||||
phone?: string;
|
phone?: string;
|
||||||
|
designation?: string;
|
||||||
|
jobTitle?: string;
|
||||||
|
manager?: string;
|
||||||
|
employeeId?: string;
|
||||||
|
employeeNumber?: string;
|
||||||
|
secondEmail?: string;
|
||||||
|
mobilePhone?: string;
|
||||||
|
location?: string;
|
||||||
}): Promise<UserModel> {
|
}): Promise<UserModel> {
|
||||||
const email = oktaUserData.email.toLowerCase();
|
const email = oktaUserData.email.toLowerCase();
|
||||||
|
|
||||||
// Check if user already exists
|
// Check if user already exists in database
|
||||||
let user = await UserModel.findOne({
|
let user = await UserModel.findOne({
|
||||||
where: {
|
where: {
|
||||||
[Op.or]: [
|
[Op.or]: [
|
||||||
{ email },
|
{ email },
|
||||||
{ oktaSub: oktaUserData.userId }
|
...(oktaUserData.userId ? [{ oktaSub: oktaUserData.userId }] : [])
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (user) {
|
if (user) {
|
||||||
// Update existing user with latest info from Okta
|
// Update existing user with latest info from Okta (if provided)
|
||||||
await user.update({
|
const updateData: any = {
|
||||||
oktaSub: oktaUserData.userId,
|
|
||||||
email,
|
email,
|
||||||
firstName: oktaUserData.firstName || user.firstName,
|
|
||||||
lastName: oktaUserData.lastName || user.lastName,
|
|
||||||
displayName: oktaUserData.displayName || user.displayName,
|
|
||||||
department: oktaUserData.department || user.department,
|
|
||||||
phone: oktaUserData.phone || user.phone,
|
|
||||||
isActive: true,
|
isActive: true,
|
||||||
updatedAt: new Date()
|
updatedAt: new Date()
|
||||||
});
|
};
|
||||||
|
|
||||||
|
if (oktaUserData.userId) updateData.oktaSub = oktaUserData.userId;
|
||||||
|
if (oktaUserData.firstName) updateData.firstName = oktaUserData.firstName;
|
||||||
|
if (oktaUserData.lastName) updateData.lastName = oktaUserData.lastName;
|
||||||
|
if (oktaUserData.displayName) updateData.displayName = oktaUserData.displayName;
|
||||||
|
if (oktaUserData.department) updateData.department = oktaUserData.department;
|
||||||
|
if (oktaUserData.phone) updateData.phone = oktaUserData.phone;
|
||||||
|
if (oktaUserData.designation) updateData.designation = oktaUserData.designation;
|
||||||
|
if (oktaUserData.employeeId) updateData.employeeId = oktaUserData.employeeId;
|
||||||
|
|
||||||
|
await user.update(updateData);
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new user
|
// User not found in DB - try to fetch from Okta
|
||||||
|
if (!oktaUserData.userId) {
|
||||||
|
const oktaUser = await this.fetchUserFromOktaByEmail(email);
|
||||||
|
if (oktaUser) {
|
||||||
|
// Found in Okta - create with Okta data
|
||||||
|
user = await UserModel.create({
|
||||||
|
oktaSub: oktaUser.id,
|
||||||
|
email,
|
||||||
|
employeeId: null,
|
||||||
|
firstName: oktaUser.profile.firstName || null,
|
||||||
|
lastName: oktaUser.profile.lastName || null,
|
||||||
|
displayName: oktaUser.profile.displayName || `${oktaUser.profile.firstName || ''} ${oktaUser.profile.lastName || ''}`.trim() || email.split('@')[0],
|
||||||
|
department: oktaUser.profile.department || null,
|
||||||
|
designation: null,
|
||||||
|
phone: oktaUser.profile.mobilePhone || null,
|
||||||
|
isActive: oktaUser.status === 'ACTIVE',
|
||||||
|
role: 'USER',
|
||||||
|
lastLogin: undefined,
|
||||||
|
createdAt: new Date(),
|
||||||
|
updatedAt: new Date()
|
||||||
|
});
|
||||||
|
return user;
|
||||||
|
} else {
|
||||||
|
// Not found in Okta either
|
||||||
|
throw new Error(`User with email '${email}' not found in organization directory`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new user with provided data
|
||||||
user = await UserModel.create({
|
user = await UserModel.create({
|
||||||
oktaSub: oktaUserData.userId,
|
oktaSub: oktaUserData.userId,
|
||||||
email,
|
email,
|
||||||
employeeId: null, // Will be updated on first login
|
employeeId: oktaUserData.employeeId || null,
|
||||||
firstName: oktaUserData.firstName || null,
|
firstName: oktaUserData.firstName || null,
|
||||||
lastName: oktaUserData.lastName || null,
|
lastName: oktaUserData.lastName || null,
|
||||||
displayName: oktaUserData.displayName || email.split('@')[0],
|
displayName: oktaUserData.displayName || email.split('@')[0],
|
||||||
department: oktaUserData.department || null,
|
department: oktaUserData.department || null,
|
||||||
designation: null,
|
designation: oktaUserData.designation || oktaUserData.jobTitle || null,
|
||||||
phone: oktaUserData.phone || null,
|
phone: oktaUserData.phone || oktaUserData.mobilePhone || null,
|
||||||
isActive: true,
|
isActive: true,
|
||||||
role: 'USER',
|
role: 'USER',
|
||||||
lastLogin: undefined, // Not logged in yet, just created for tagging
|
lastLogin: undefined,
|
||||||
createdAt: new Date(),
|
createdAt: new Date(),
|
||||||
updatedAt: new Date()
|
updatedAt: new Date()
|
||||||
});
|
});
|
||||||
|
|||||||
207
src/services/userEnrichment.service.ts
Normal file
207
src/services/userEnrichment.service.ts
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
/**
|
||||||
|
* User Enrichment Service
|
||||||
|
*
|
||||||
|
* Handles automatic user lookup/creation and data enrichment for workflow creation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { User } from '@models/User';
|
||||||
|
import logger from '@utils/logger';
|
||||||
|
import { UserService } from './user.service';
|
||||||
|
|
||||||
|
const userService = new UserService();
|
||||||
|
|
||||||
|
interface SimplifiedApprovalLevel {
|
||||||
|
email: string;
|
||||||
|
tatHours: number;
|
||||||
|
isFinalApprover?: boolean;
|
||||||
|
levelNumber?: number;
|
||||||
|
levelName?: string;
|
||||||
|
approverId?: string;
|
||||||
|
approverEmail?: string;
|
||||||
|
approverName?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface EnrichedApprovalLevel {
|
||||||
|
levelNumber: number;
|
||||||
|
levelName: string;
|
||||||
|
approverId: string;
|
||||||
|
approverEmail: string;
|
||||||
|
approverName: string;
|
||||||
|
tatHours: number;
|
||||||
|
isFinalApprover: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SimplifiedSpectator {
|
||||||
|
email: string;
|
||||||
|
userId?: string;
|
||||||
|
userEmail?: string;
|
||||||
|
userName?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface EnrichedSpectator {
|
||||||
|
userId: string;
|
||||||
|
userEmail: string;
|
||||||
|
userName: string;
|
||||||
|
participantType: 'SPECTATOR';
|
||||||
|
canComment: boolean;
|
||||||
|
canViewDocuments: boolean;
|
||||||
|
canDownloadDocuments: boolean;
|
||||||
|
notificationEnabled: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enrich approval levels with user data from database/AD
|
||||||
|
* @param approvalLevels - Simplified approval levels (only email + tatHours required)
|
||||||
|
* @returns Enriched approval levels with full user data
|
||||||
|
*/
|
||||||
|
export async function enrichApprovalLevels(
|
||||||
|
approvalLevels: SimplifiedApprovalLevel[]
|
||||||
|
): Promise<EnrichedApprovalLevel[]> {
|
||||||
|
const enriched: EnrichedApprovalLevel[] = [];
|
||||||
|
const processedEmails = new Set<string>();
|
||||||
|
|
||||||
|
for (let i = 0; i < approvalLevels.length; i++) {
|
||||||
|
const level = approvalLevels[i];
|
||||||
|
const email = level.email.toLowerCase();
|
||||||
|
|
||||||
|
// Check for duplicate emails
|
||||||
|
if (processedEmails.has(email)) {
|
||||||
|
throw new Error(`Duplicate approver email found: ${email}. Each approver must have a unique email.`);
|
||||||
|
}
|
||||||
|
processedEmails.add(email);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Find or create user from AD
|
||||||
|
let user = await User.findOne({ where: { email } });
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
logger.info(`[UserEnrichment] User not found in DB, attempting to sync from AD: ${email}`);
|
||||||
|
// Try to fetch and create user from AD
|
||||||
|
try {
|
||||||
|
user = await userService.ensureUserExists({ email }) as any;
|
||||||
|
} catch (adError: any) {
|
||||||
|
logger.error(`[UserEnrichment] Failed to sync user from AD: ${email}`, adError);
|
||||||
|
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const userId = (user as any).userId;
|
||||||
|
const displayName = (user as any).displayName || (user as any).email;
|
||||||
|
const designation = (user as any).designation || (user as any).jobTitle;
|
||||||
|
const department = (user as any).department;
|
||||||
|
|
||||||
|
// Auto-generate level name
|
||||||
|
let levelName = level.levelName;
|
||||||
|
if (!levelName) {
|
||||||
|
if (designation) {
|
||||||
|
levelName = `${designation} Approval`;
|
||||||
|
} else if (department) {
|
||||||
|
levelName = `${department} Approval`;
|
||||||
|
} else {
|
||||||
|
levelName = `Level ${i + 1} Approval`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-detect final approver (last level)
|
||||||
|
const isFinalApprover = level.isFinalApprover !== undefined
|
||||||
|
? level.isFinalApprover
|
||||||
|
: (i === approvalLevels.length - 1);
|
||||||
|
|
||||||
|
enriched.push({
|
||||||
|
levelNumber: level.levelNumber || (i + 1),
|
||||||
|
levelName,
|
||||||
|
approverId: userId,
|
||||||
|
approverEmail: email,
|
||||||
|
approverName: displayName,
|
||||||
|
tatHours: level.tatHours,
|
||||||
|
isFinalApprover,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info(`[UserEnrichment] Enriched approval level ${i + 1}: ${email} -> ${displayName} (${levelName})`);
|
||||||
|
} catch (error: any) {
|
||||||
|
logger.error(`[UserEnrichment] Failed to enrich approval level for ${email}:`, error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return enriched;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enrich spectators with user data from database/AD
|
||||||
|
* @param spectators - Simplified spectators (only email required)
|
||||||
|
* @returns Enriched spectators with full user data
|
||||||
|
*/
|
||||||
|
export async function enrichSpectators(
|
||||||
|
spectators: SimplifiedSpectator[]
|
||||||
|
): Promise<EnrichedSpectator[]> {
|
||||||
|
if (!spectators || spectators.length === 0) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const enriched: EnrichedSpectator[] = [];
|
||||||
|
const processedEmails = new Set<string>();
|
||||||
|
|
||||||
|
for (const spectator of spectators) {
|
||||||
|
const email = spectator.email.toLowerCase();
|
||||||
|
|
||||||
|
// Check for duplicate emails
|
||||||
|
if (processedEmails.has(email)) {
|
||||||
|
throw new Error(`Duplicate spectator email found: ${email}. Each spectator must have a unique email.`);
|
||||||
|
}
|
||||||
|
processedEmails.add(email);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Find or create user from AD
|
||||||
|
let user = await User.findOne({ where: { email } });
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
logger.info(`[UserEnrichment] User not found in DB, attempting to sync from AD: ${email}`);
|
||||||
|
try {
|
||||||
|
user = await userService.ensureUserExists({ email }) as any;
|
||||||
|
} catch (adError: any) {
|
||||||
|
logger.error(`[UserEnrichment] Failed to sync user from AD: ${email}`, adError);
|
||||||
|
throw new Error(`Spectator email '${email}' not found in organization directory. Please verify the email address.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const userId = (user as any).userId;
|
||||||
|
const displayName = (user as any).displayName || (user as any).email;
|
||||||
|
|
||||||
|
enriched.push({
|
||||||
|
userId,
|
||||||
|
userEmail: email,
|
||||||
|
userName: displayName,
|
||||||
|
participantType: 'SPECTATOR',
|
||||||
|
canComment: true,
|
||||||
|
canViewDocuments: true,
|
||||||
|
canDownloadDocuments: false,
|
||||||
|
notificationEnabled: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info(`[UserEnrichment] Enriched spectator: ${email} -> ${displayName}`);
|
||||||
|
} catch (error: any) {
|
||||||
|
logger.error(`[UserEnrichment] Failed to enrich spectator ${email}:`, error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return enriched;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate and ensure initiator exists in database
|
||||||
|
* @param initiatorId - User ID of the initiator
|
||||||
|
* @returns User object if valid
|
||||||
|
* @throws Error if initiator not found or invalid
|
||||||
|
*/
|
||||||
|
export async function validateInitiator(initiatorId: string): Promise<any> {
|
||||||
|
const user = await User.findByPk(initiatorId);
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
throw new Error(`Invalid initiator: User with ID '${initiatorId}' not found. Please ensure you are logged in with a valid account.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return user;
|
||||||
|
}
|
||||||
|
|
||||||
@ -8,7 +8,7 @@ import { Document } from '@models/Document';
|
|||||||
import '@models/index';
|
import '@models/index';
|
||||||
import { CreateWorkflowRequest, UpdateWorkflowRequest } from '../types/workflow.types';
|
import { CreateWorkflowRequest, UpdateWorkflowRequest } from '../types/workflow.types';
|
||||||
import { generateRequestNumber, calculateTATDays } from '@utils/helpers';
|
import { generateRequestNumber, calculateTATDays } from '@utils/helpers';
|
||||||
import logger from '@utils/logger';
|
import logger, { logWorkflowEvent, logWithContext } from '@utils/logger';
|
||||||
import { WorkflowStatus, ParticipantType, ApprovalStatus } from '../types/common.types';
|
import { WorkflowStatus, ParticipantType, ApprovalStatus } from '../types/common.types';
|
||||||
import { Op, QueryTypes } from 'sequelize';
|
import { Op, QueryTypes } from 'sequelize';
|
||||||
import { sequelize } from '@config/database';
|
import { sequelize } from '@config/database';
|
||||||
@ -18,6 +18,7 @@ import dayjs from 'dayjs';
|
|||||||
import { notificationService } from './notification.service';
|
import { notificationService } from './notification.service';
|
||||||
import { activityService } from './activity.service';
|
import { activityService } from './activity.service';
|
||||||
import { tatSchedulerService } from './tatScheduler.service';
|
import { tatSchedulerService } from './tatScheduler.service';
|
||||||
|
import { emitToRequestRoom } from '../realtime/socket';
|
||||||
|
|
||||||
export class WorkflowService {
|
export class WorkflowService {
|
||||||
/**
|
/**
|
||||||
@ -40,13 +41,24 @@ export class WorkflowService {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a new approver to an existing workflow
|
* Add a new approver to an existing workflow
|
||||||
|
* Auto-creates user from Okta/AD if not in database
|
||||||
*/
|
*/
|
||||||
async addApprover(requestId: string, email: string, addedBy: string): Promise<any> {
|
async addApprover(requestId: string, email: string, addedBy: string): Promise<any> {
|
||||||
try {
|
try {
|
||||||
// Find user by email
|
const emailLower = email.toLowerCase();
|
||||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
|
||||||
|
// Find or create user from AD
|
||||||
|
let user = await User.findOne({ where: { email: emailLower } });
|
||||||
if (!user) {
|
if (!user) {
|
||||||
throw new Error('User not found with this email');
|
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||||
|
const { UserService } = await import('./user.service');
|
||||||
|
const userService = new UserService();
|
||||||
|
try {
|
||||||
|
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||||
|
} catch (adError: any) {
|
||||||
|
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||||
|
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const userId = (user as any).userId;
|
const userId = (user as any).userId;
|
||||||
@ -143,52 +155,9 @@ export class WorkflowService {
|
|||||||
throw new Error('Cannot skip future approval levels');
|
throw new Error('Cannot skip future approval levels');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel pause if workflow is paused (requirement 3.7)
|
// Block skip if workflow is paused - must resume first
|
||||||
// When initiator skips a paused approver, the pause is negated and workflow resumes automatically
|
|
||||||
if ((workflow as any).isPaused || (workflow as any).status === 'PAUSED') {
|
if ((workflow as any).isPaused || (workflow as any).status === 'PAUSED') {
|
||||||
try {
|
throw new Error('Cannot skip approver while workflow is paused. Please resume the workflow first before skipping.');
|
||||||
// Get the paused level (should be the level being skipped)
|
|
||||||
const pausedLevel = await ApprovalLevel.findOne({
|
|
||||||
where: {
|
|
||||||
requestId,
|
|
||||||
isPaused: true
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Cancel pause on the workflow (the level will be marked as skipped below)
|
|
||||||
const previousStatus = (workflow as any).pauseTatSnapshot?.previousStatus || WorkflowStatus.PENDING;
|
|
||||||
await workflow.update({
|
|
||||||
isPaused: false,
|
|
||||||
pausedAt: null as any,
|
|
||||||
pausedBy: null as any,
|
|
||||||
pauseReason: null as any,
|
|
||||||
pauseResumeDate: null as any,
|
|
||||||
pauseTatSnapshot: null as any,
|
|
||||||
status: previousStatus // Restore previous status (should be PENDING)
|
|
||||||
});
|
|
||||||
|
|
||||||
// If the paused level is the one being skipped, clear its pause fields
|
|
||||||
// (it will be marked as SKIPPED below, so no need to restore to PENDING)
|
|
||||||
if (pausedLevel && (pausedLevel as any).levelId === levelId) {
|
|
||||||
await pausedLevel.update({
|
|
||||||
isPaused: false,
|
|
||||||
pausedAt: null as any,
|
|
||||||
pausedBy: null as any,
|
|
||||||
pauseReason: null as any,
|
|
||||||
pauseResumeDate: null as any,
|
|
||||||
pauseTatStartTime: null as any,
|
|
||||||
pauseElapsedHours: null as any
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(`[Workflow] Pause cancelled and workflow resumed when approver was skipped for request ${requestId}`);
|
|
||||||
|
|
||||||
// Reload workflow to get updated state after resume
|
|
||||||
await workflow.reload();
|
|
||||||
} catch (pauseError) {
|
|
||||||
logger.warn(`[Workflow] Failed to cancel pause when skipping approver:`, pauseError);
|
|
||||||
// Continue with skip even if pause cancellation fails
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark as skipped
|
// Mark as skipped
|
||||||
@ -277,6 +246,17 @@ export class WorkflowService {
|
|||||||
});
|
});
|
||||||
|
|
||||||
logger.info(`[Workflow] Skipped approver at level ${levelNumber} for request ${requestId}`);
|
logger.info(`[Workflow] Skipped approver at level ${levelNumber} for request ${requestId}`);
|
||||||
|
|
||||||
|
// Emit real-time update to all users viewing this request
|
||||||
|
const wfForEmit = await WorkflowRequest.findByPk(requestId);
|
||||||
|
emitToRequestRoom(requestId, 'request:updated', {
|
||||||
|
requestId,
|
||||||
|
requestNumber: (wfForEmit as any)?.requestNumber,
|
||||||
|
action: 'SKIP',
|
||||||
|
levelNumber: levelNumber,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
});
|
||||||
|
|
||||||
return level;
|
return level;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`[Workflow] Failed to skip approver:`, error);
|
logger.error(`[Workflow] Failed to skip approver:`, error);
|
||||||
@ -286,6 +266,7 @@ export class WorkflowService {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a new approver at specific level (with level shifting)
|
* Add a new approver at specific level (with level shifting)
|
||||||
|
* Auto-creates user from Okta/AD if not in database
|
||||||
*/
|
*/
|
||||||
async addApproverAtLevel(
|
async addApproverAtLevel(
|
||||||
requestId: string,
|
requestId: string,
|
||||||
@ -295,14 +276,26 @@ export class WorkflowService {
|
|||||||
addedBy: string
|
addedBy: string
|
||||||
): Promise<any> {
|
): Promise<any> {
|
||||||
try {
|
try {
|
||||||
// Find user by email
|
const emailLower = email.toLowerCase();
|
||||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
|
||||||
|
// Find or create user from AD
|
||||||
|
let user = await User.findOne({ where: { email: emailLower } });
|
||||||
if (!user) {
|
if (!user) {
|
||||||
throw new Error('User not found with this email');
|
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||||
|
const { UserService } = await import('./user.service');
|
||||||
|
const userService = new UserService();
|
||||||
|
try {
|
||||||
|
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||||
|
} catch (adError: any) {
|
||||||
|
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||||
|
throw new Error(`Approver email '${email}' not found in organization directory. Please verify the email address.`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const userId = (user as any).userId;
|
const userId = (user as any).userId;
|
||||||
const userName = (user as any).displayName || (user as any).email;
|
const userName = (user as any).displayName || (user as any).email;
|
||||||
|
const designation = (user as any).designation || (user as any).jobTitle;
|
||||||
|
const department = (user as any).department;
|
||||||
|
|
||||||
// Check if user is already a participant
|
// Check if user is already a participant
|
||||||
const existing = await Participant.findOne({
|
const existing = await Participant.findOne({
|
||||||
@ -338,27 +331,39 @@ export class WorkflowService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Shift existing levels at and after target level
|
// Shift existing levels at and after target level
|
||||||
const levelsToShift = allLevels.filter(l => (l as any).levelNumber >= targetLevel);
|
// IMPORTANT: Shift in REVERSE order to avoid unique constraint violations
|
||||||
|
const levelsToShift = allLevels
|
||||||
|
.filter(l => (l as any).levelNumber >= targetLevel)
|
||||||
|
.sort((a, b) => (b as any).levelNumber - (a as any).levelNumber); // Sort descending
|
||||||
|
|
||||||
for (const levelToShift of levelsToShift) {
|
for (const levelToShift of levelsToShift) {
|
||||||
const newLevelNumber = (levelToShift as any).levelNumber + 1;
|
const oldLevelNumber = (levelToShift as any).levelNumber;
|
||||||
|
const newLevelNumber = oldLevelNumber + 1;
|
||||||
await levelToShift.update({
|
await levelToShift.update({
|
||||||
levelNumber: newLevelNumber,
|
levelNumber: newLevelNumber,
|
||||||
levelName: `Level ${newLevelNumber}`
|
levelName: `Level ${newLevelNumber}`
|
||||||
});
|
});
|
||||||
logger.info(`[Workflow] Shifted level ${(levelToShift as any).levelNumber - 1} → ${newLevelNumber}`);
|
logger.info(`[Workflow] Shifted level ${oldLevelNumber} → ${newLevelNumber}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update total levels in workflow
|
// Update total levels in workflow
|
||||||
await workflow.update({ totalLevels: allLevels.length + 1 });
|
await workflow.update({ totalLevels: allLevels.length + 1 });
|
||||||
|
|
||||||
|
// Auto-generate smart level name
|
||||||
|
let levelName = `Level ${targetLevel}`;
|
||||||
|
if (designation) {
|
||||||
|
levelName = `${designation} Approval`;
|
||||||
|
} else if (department) {
|
||||||
|
levelName = `${department} Approval`;
|
||||||
|
}
|
||||||
|
|
||||||
// Create new approval level at target position
|
// Create new approval level at target position
|
||||||
const newLevel = await ApprovalLevel.create({
|
const newLevel = await ApprovalLevel.create({
|
||||||
requestId,
|
requestId,
|
||||||
levelNumber: targetLevel,
|
levelNumber: targetLevel,
|
||||||
levelName: `Level ${targetLevel}`,
|
levelName,
|
||||||
approverId: userId,
|
approverId: userId,
|
||||||
approverEmail: email.toLowerCase(),
|
approverEmail: emailLower,
|
||||||
approverName: userName,
|
approverName: userName,
|
||||||
tatHours,
|
tatHours,
|
||||||
// tatDays is auto-calculated by database as a generated column
|
// tatDays is auto-calculated by database as a generated column
|
||||||
@ -437,13 +442,24 @@ export class WorkflowService {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a new spectator to an existing workflow
|
* Add a new spectator to an existing workflow
|
||||||
|
* Auto-creates user from Okta/AD if not in database
|
||||||
*/
|
*/
|
||||||
async addSpectator(requestId: string, email: string, addedBy: string): Promise<any> {
|
async addSpectator(requestId: string, email: string, addedBy: string): Promise<any> {
|
||||||
try {
|
try {
|
||||||
// Find user by email
|
const emailLower = email.toLowerCase();
|
||||||
const user = await User.findOne({ where: { email: email.toLowerCase() } });
|
|
||||||
|
// Find or create user from AD
|
||||||
|
let user = await User.findOne({ where: { email: emailLower } });
|
||||||
if (!user) {
|
if (!user) {
|
||||||
throw new Error('User not found with this email');
|
logger.info(`[Workflow] User not found in DB, syncing from AD: ${emailLower}`);
|
||||||
|
const { UserService } = await import('./user.service');
|
||||||
|
const userService = new UserService();
|
||||||
|
try {
|
||||||
|
user = await userService.ensureUserExists({ email: emailLower }) as any;
|
||||||
|
} catch (adError: any) {
|
||||||
|
logger.error(`[Workflow] Failed to sync user from AD: ${emailLower}`, adError);
|
||||||
|
throw new Error(`Spectator email '${email}' not found in organization directory. Please verify the email address.`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const userId = (user as any).userId;
|
const userId = (user as any).userId;
|
||||||
@ -2271,7 +2287,12 @@ export class WorkflowService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info(`Workflow created: ${requestNumber}`);
|
logWorkflowEvent('created', workflow.requestId, {
|
||||||
|
requestNumber,
|
||||||
|
priority: workflowData.priority,
|
||||||
|
userId: initiatorId,
|
||||||
|
status: workflow.status,
|
||||||
|
});
|
||||||
|
|
||||||
// Get initiator details
|
// Get initiator details
|
||||||
const initiator = await User.findByPk(initiatorId);
|
const initiator = await User.findByPk(initiatorId);
|
||||||
@ -2326,7 +2347,11 @@ export class WorkflowService {
|
|||||||
|
|
||||||
return workflow;
|
return workflow;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to create workflow:', error);
|
logWithContext('error', 'Failed to create workflow', {
|
||||||
|
userId: initiatorId,
|
||||||
|
priority: workflowData.priority,
|
||||||
|
error,
|
||||||
|
});
|
||||||
throw new Error('Failed to create workflow');
|
throw new Error('Failed to create workflow');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,50 +1,442 @@
|
|||||||
import winston from 'winston';
|
import winston from 'winston';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
import os from 'os';
|
||||||
|
|
||||||
const logDir = process.env.LOG_FILE_PATH || './logs';
|
const logDir = process.env.LOG_FILE_PATH || './logs';
|
||||||
|
const isProduction = process.env.NODE_ENV === 'production';
|
||||||
|
|
||||||
// Create logger instance
|
// ============ SENSITIVE DATA PATTERNS ============
|
||||||
const logger = winston.createLogger({
|
const SENSITIVE_KEYS = [
|
||||||
level: process.env.LOG_LEVEL || 'info',
|
'password', 'secret', 'token', 'key', 'apikey', 'api_key', 'api-key',
|
||||||
format: winston.format.combine(
|
'authorization', 'auth', 'credential', 'private', 'access_token',
|
||||||
winston.format.timestamp({
|
'refresh_token', 'jwt', 'bearer', 'session', 'cookie', 'csrf',
|
||||||
format: 'YYYY-MM-DD HH:mm:ss',
|
'vapid', 'smtp_password', 'db_password', 'redis_url', 'connection_string'
|
||||||
}),
|
];
|
||||||
winston.format.errors({ stack: true }),
|
|
||||||
winston.format.json()
|
|
||||||
),
|
|
||||||
defaultMeta: { service: 're-workflow-backend' },
|
|
||||||
transports: [
|
|
||||||
// Write all logs with level 'error' and below to error.log
|
|
||||||
new winston.transports.File({
|
|
||||||
filename: path.join(logDir, 'error.log'),
|
|
||||||
level: 'error',
|
|
||||||
maxsize: 5242880, // 5MB
|
|
||||||
maxFiles: 5,
|
|
||||||
}),
|
|
||||||
// Write all logs with level 'info' and below to combined.log
|
|
||||||
new winston.transports.File({
|
|
||||||
filename: path.join(logDir, 'combined.log'),
|
|
||||||
maxsize: 5242880, // 5MB
|
|
||||||
maxFiles: 5,
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
// If we're not in production, log to the console as well
|
const SENSITIVE_PATTERN = new RegExp(
|
||||||
if (process.env.NODE_ENV !== 'production') {
|
`(${SENSITIVE_KEYS.join('|')})\\s*[=:]\\s*['"]?([^'\"\\s,}\\]]+)['"]?`,
|
||||||
logger.add(
|
'gi'
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mask sensitive values in strings (API keys, passwords, tokens)
|
||||||
|
*/
|
||||||
|
const maskSensitiveData = (value: any): any => {
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
// Mask patterns like "API_KEY = abc123" or "password: secret"
|
||||||
|
let masked = value.replace(SENSITIVE_PATTERN, (match, key, val) => {
|
||||||
|
if (val && val.length > 0) {
|
||||||
|
const maskedVal = val.length > 4
|
||||||
|
? val.substring(0, 2) + '***' + val.substring(val.length - 2)
|
||||||
|
: '***';
|
||||||
|
return `${key}=${maskedVal}`;
|
||||||
|
}
|
||||||
|
return match;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mask standalone tokens/keys (long alphanumeric strings that look like secrets)
|
||||||
|
// e.g., "sk-abc123xyz789..." or "ghp_xxxx..."
|
||||||
|
masked = masked.replace(
|
||||||
|
/\b(sk-|ghp_|gho_|github_pat_|xox[baprs]-|Bearer\s+)([A-Za-z0-9_-]{20,})/gi,
|
||||||
|
(match, prefix, token) => `${prefix}${'*'.repeat(8)}...`
|
||||||
|
);
|
||||||
|
|
||||||
|
return masked;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(value)) {
|
||||||
|
return value.map(maskSensitiveData);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value && typeof value === 'object') {
|
||||||
|
const masked: any = {};
|
||||||
|
for (const [k, v] of Object.entries(value)) {
|
||||||
|
const keyLower = k.toLowerCase();
|
||||||
|
// Check if key itself is sensitive
|
||||||
|
if (SENSITIVE_KEYS.some(sk => keyLower.includes(sk))) {
|
||||||
|
masked[k] = typeof v === 'string' && v.length > 0 ? '***REDACTED***' : v;
|
||||||
|
} else {
|
||||||
|
masked[k] = maskSensitiveData(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return masked;
|
||||||
|
}
|
||||||
|
|
||||||
|
return value;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ============ COMMON LABELS/METADATA ============
|
||||||
|
const appMeta = {
|
||||||
|
app: 're-workflow',
|
||||||
|
service: 'backend',
|
||||||
|
environment: process.env.NODE_ENV || 'development',
|
||||||
|
version: process.env.APP_VERSION || '1.2.0',
|
||||||
|
};
|
||||||
|
|
||||||
|
// ============ TRANSPORTS ============
|
||||||
|
const transports: winston.transport[] = [
|
||||||
|
// Local file transport - Error logs
|
||||||
|
new winston.transports.File({
|
||||||
|
filename: path.join(logDir, 'error.log'),
|
||||||
|
level: 'error',
|
||||||
|
maxsize: 10 * 1024 * 1024, // 10MB
|
||||||
|
maxFiles: 10,
|
||||||
|
tailable: true,
|
||||||
|
}),
|
||||||
|
// Local file transport - Combined logs
|
||||||
|
new winston.transports.File({
|
||||||
|
filename: path.join(logDir, 'combined.log'),
|
||||||
|
maxsize: 10 * 1024 * 1024, // 10MB
|
||||||
|
maxFiles: 10,
|
||||||
|
tailable: true,
|
||||||
|
}),
|
||||||
|
];
|
||||||
|
|
||||||
|
// ============ LOKI TRANSPORT (Grafana) ============
|
||||||
|
if (process.env.LOKI_HOST) {
|
||||||
|
try {
|
||||||
|
const LokiTransport = require('winston-loki');
|
||||||
|
|
||||||
|
const lokiTransportOptions: any = {
|
||||||
|
host: process.env.LOKI_HOST,
|
||||||
|
labels: appMeta,
|
||||||
|
json: true,
|
||||||
|
format: winston.format.combine(
|
||||||
|
winston.format.timestamp(),
|
||||||
|
winston.format.json()
|
||||||
|
),
|
||||||
|
replaceTimestamp: true,
|
||||||
|
onConnectionError: (err: Error) => {
|
||||||
|
console.error('[Loki] Connection error:', err.message);
|
||||||
|
},
|
||||||
|
batching: true,
|
||||||
|
interval: 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (process.env.LOKI_USER && process.env.LOKI_PASSWORD) {
|
||||||
|
lokiTransportOptions.basicAuth = `${process.env.LOKI_USER}:${process.env.LOKI_PASSWORD}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
transports.push(new LokiTransport(lokiTransportOptions));
|
||||||
|
console.log(`[Logger] ✅ Loki transport enabled: ${process.env.LOKI_HOST}`);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn('[Logger] ⚠️ Failed to initialize Loki transport:', (error as Error).message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ CONSOLE TRANSPORT (Development) ============
|
||||||
|
if (!isProduction) {
|
||||||
|
transports.push(
|
||||||
new winston.transports.Console({
|
new winston.transports.Console({
|
||||||
format: winston.format.combine(
|
format: winston.format.combine(
|
||||||
winston.format.colorize(),
|
winston.format.colorize(),
|
||||||
winston.format.simple()
|
winston.format.printf(({ level, message, timestamp, ...meta }) => {
|
||||||
|
const metaStr = Object.keys(meta).length && !meta.service
|
||||||
|
? ` ${JSON.stringify(meta)}`
|
||||||
|
: '';
|
||||||
|
return `${timestamp} [${level}]: ${message}${metaStr}`;
|
||||||
|
})
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ============ ERROR SANITIZER ============
|
||||||
|
/**
|
||||||
|
* Sanitize error objects for logging - prevents huge Axios error dumps
|
||||||
|
*/
|
||||||
|
const sanitizeError = (error: any): object => {
|
||||||
|
// Handle Axios errors specifically
|
||||||
|
if (error?.isAxiosError || error?.name === 'AxiosError') {
|
||||||
|
return {
|
||||||
|
name: error.name,
|
||||||
|
message: error.message,
|
||||||
|
code: error.code,
|
||||||
|
status: error.response?.status,
|
||||||
|
statusText: error.response?.statusText,
|
||||||
|
url: error.config?.url,
|
||||||
|
method: error.config?.method,
|
||||||
|
responseData: error.response?.data,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle standard errors
|
||||||
|
if (error instanceof Error) {
|
||||||
|
return {
|
||||||
|
name: error.name,
|
||||||
|
message: error.message,
|
||||||
|
stack: error.stack,
|
||||||
|
...(error as any).statusCode && { statusCode: (error as any).statusCode },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback for unknown error types
|
||||||
|
return {
|
||||||
|
message: String(error),
|
||||||
|
type: typeof error,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// Custom format to sanitize errors and mask sensitive data before logging
|
||||||
|
const sanitizeFormat = winston.format((info) => {
|
||||||
|
// Sanitize error objects
|
||||||
|
if (info.error && typeof info.error === 'object') {
|
||||||
|
info.error = sanitizeError(info.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If message is an error object, sanitize it
|
||||||
|
if (info.message && typeof info.message === 'object' && (info.message as any).stack) {
|
||||||
|
info.error = sanitizeError(info.message);
|
||||||
|
info.message = (info.message as Error).message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mask sensitive data in message
|
||||||
|
if (typeof info.message === 'string') {
|
||||||
|
info.message = maskSensitiveData(info.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mask sensitive data in all metadata
|
||||||
|
for (const key of Object.keys(info)) {
|
||||||
|
if (key !== 'level' && key !== 'timestamp' && key !== 'service') {
|
||||||
|
info[key] = maskSensitiveData(info[key]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============ CREATE LOGGER ============
|
||||||
|
const logger = winston.createLogger({
|
||||||
|
level: process.env.LOG_LEVEL || (isProduction ? 'info' : 'debug'),
|
||||||
|
format: winston.format.combine(
|
||||||
|
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||||
|
winston.format.errors({ stack: true }),
|
||||||
|
sanitizeFormat(),
|
||||||
|
winston.format.json()
|
||||||
|
),
|
||||||
|
defaultMeta: {
|
||||||
|
service: 're-workflow-backend',
|
||||||
|
hostname: os.hostname(),
|
||||||
|
},
|
||||||
|
transports,
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============ HELPER METHODS FOR STRUCTURED LOGGING ============
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log with additional context labels (will appear in Grafana)
|
||||||
|
*/
|
||||||
|
export const logWithContext = (
|
||||||
|
level: 'info' | 'warn' | 'error' | 'debug',
|
||||||
|
message: string,
|
||||||
|
context: {
|
||||||
|
// Domain labels
|
||||||
|
requestId?: string;
|
||||||
|
userId?: string;
|
||||||
|
priority?: 'STANDARD' | 'EXPRESS';
|
||||||
|
status?: string;
|
||||||
|
department?: string;
|
||||||
|
// API labels
|
||||||
|
endpoint?: string;
|
||||||
|
method?: string;
|
||||||
|
statusCode?: number;
|
||||||
|
duration?: number;
|
||||||
|
// Error context
|
||||||
|
errorType?: string;
|
||||||
|
error?: any;
|
||||||
|
stack?: string;
|
||||||
|
// Custom data
|
||||||
|
[key: string]: any;
|
||||||
|
}
|
||||||
|
) => {
|
||||||
|
// Sanitize error if present
|
||||||
|
const sanitizedContext = { ...context };
|
||||||
|
if (sanitizedContext.error) {
|
||||||
|
sanitizedContext.error = sanitizeError(sanitizedContext.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log(level, message, sanitizedContext);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log API request (use in middleware or controllers)
|
||||||
|
*/
|
||||||
|
export const logApiRequest = (
|
||||||
|
method: string,
|
||||||
|
endpoint: string,
|
||||||
|
statusCode: number,
|
||||||
|
duration: number,
|
||||||
|
userId?: string,
|
||||||
|
error?: string
|
||||||
|
) => {
|
||||||
|
const level = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
|
||||||
|
logger.log(level, `${method} ${endpoint} ${statusCode} ${duration}ms`, {
|
||||||
|
endpoint,
|
||||||
|
method,
|
||||||
|
statusCode,
|
||||||
|
duration,
|
||||||
|
userId,
|
||||||
|
...(error && { error }),
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log workflow events
|
||||||
|
*/
|
||||||
|
export const logWorkflowEvent = (
|
||||||
|
event: 'created' | 'submitted' | 'approved' | 'rejected' | 'closed' | 'paused' | 'resumed' | 'updated',
|
||||||
|
requestId: string,
|
||||||
|
details: {
|
||||||
|
priority?: string;
|
||||||
|
status?: string;
|
||||||
|
department?: string;
|
||||||
|
userId?: string;
|
||||||
|
userName?: string;
|
||||||
|
message?: string;
|
||||||
|
level?: number;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
logger.info(`Workflow ${event}: ${requestId}`, {
|
||||||
|
workflowEvent: event,
|
||||||
|
requestId,
|
||||||
|
...details,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log TAT/SLA events
|
||||||
|
*/
|
||||||
|
export const logTATEvent = (
|
||||||
|
event: 'approaching' | 'breached' | 'resolved' | 'warning',
|
||||||
|
requestId: string,
|
||||||
|
details: {
|
||||||
|
priority?: string;
|
||||||
|
threshold?: number;
|
||||||
|
elapsedHours?: number;
|
||||||
|
tatHours?: number;
|
||||||
|
level?: number;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
const level = event === 'breached' ? 'error' : event === 'approaching' || event === 'warning' ? 'warn' : 'info';
|
||||||
|
logger.log(level, `TAT ${event}: ${requestId}`, {
|
||||||
|
tatEvent: event,
|
||||||
|
requestId,
|
||||||
|
...details,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log authentication events
|
||||||
|
*/
|
||||||
|
export const logAuthEvent = (
|
||||||
|
event: 'login' | 'logout' | 'token_refresh' | 'token_exchange' | 'auth_failure' | 'sso_callback',
|
||||||
|
userId: string | undefined,
|
||||||
|
details: {
|
||||||
|
email?: string;
|
||||||
|
role?: string;
|
||||||
|
ip?: string;
|
||||||
|
userAgent?: string;
|
||||||
|
error?: any;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
const level = event === 'auth_failure' ? 'warn' : 'info';
|
||||||
|
|
||||||
|
// Sanitize error if present
|
||||||
|
const sanitizedDetails = { ...details };
|
||||||
|
if (sanitizedDetails.error) {
|
||||||
|
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log(level, `Auth ${event}${userId ? `: ${userId}` : ''}`, {
|
||||||
|
authEvent: event,
|
||||||
|
userId,
|
||||||
|
...sanitizedDetails,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log document events
|
||||||
|
*/
|
||||||
|
export const logDocumentEvent = (
|
||||||
|
event: 'uploaded' | 'downloaded' | 'deleted' | 'previewed',
|
||||||
|
documentId: string,
|
||||||
|
details: {
|
||||||
|
requestId?: string;
|
||||||
|
userId?: string;
|
||||||
|
fileName?: string;
|
||||||
|
fileType?: string;
|
||||||
|
fileSize?: number;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
logger.info(`Document ${event}: ${documentId}`, {
|
||||||
|
documentEvent: event,
|
||||||
|
documentId,
|
||||||
|
...details,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log notification events
|
||||||
|
*/
|
||||||
|
export const logNotificationEvent = (
|
||||||
|
event: 'sent' | 'failed' | 'queued',
|
||||||
|
details: {
|
||||||
|
type?: string;
|
||||||
|
userId?: string;
|
||||||
|
requestId?: string;
|
||||||
|
channel?: 'push' | 'email' | 'in-app';
|
||||||
|
error?: any;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
const level = event === 'failed' ? 'error' : 'info';
|
||||||
|
|
||||||
|
// Sanitize error if present
|
||||||
|
const sanitizedDetails = { ...details };
|
||||||
|
if (sanitizedDetails.error) {
|
||||||
|
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log(level, `Notification ${event}`, {
|
||||||
|
notificationEvent: event,
|
||||||
|
...sanitizedDetails,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log AI service events
|
||||||
|
*/
|
||||||
|
export const logAIEvent = (
|
||||||
|
event: 'request' | 'response' | 'error' | 'fallback',
|
||||||
|
details: {
|
||||||
|
provider?: string;
|
||||||
|
model?: string;
|
||||||
|
requestId?: string;
|
||||||
|
duration?: number;
|
||||||
|
error?: any;
|
||||||
|
[key: string]: any;
|
||||||
|
} = {}
|
||||||
|
) => {
|
||||||
|
const level = event === 'error' ? 'error' : 'info';
|
||||||
|
|
||||||
|
// Sanitize error if present
|
||||||
|
const sanitizedDetails = { ...details };
|
||||||
|
if (sanitizedDetails.error) {
|
||||||
|
sanitizedDetails.error = sanitizeError(sanitizedDetails.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log(level, `AI ${event}`, {
|
||||||
|
aiEvent: event,
|
||||||
|
...sanitizedDetails,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
// ============ MORGAN STREAM ============
|
||||||
// Create a stream object for Morgan HTTP logging
|
// Create a stream object for Morgan HTTP logging
|
||||||
// Use type assertion to bypass TypeScript's strict checking for the stream property
|
|
||||||
const loggerWithStream = logger as any;
|
const loggerWithStream = logger as any;
|
||||||
loggerWithStream.stream = {
|
loggerWithStream.stream = {
|
||||||
write: (message: string) => {
|
write: (message: string) => {
|
||||||
@ -52,4 +444,6 @@ loggerWithStream.stream = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Export helper functions and logger
|
||||||
|
export { sanitizeError };
|
||||||
export default loggerWithStream as winston.Logger;
|
export default loggerWithStream as winston.Logger;
|
||||||
|
|||||||
13
src/validators/userPreference.validator.ts
Normal file
13
src/validators/userPreference.validator.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
export const updateNotificationPreferencesSchema = z.object({
|
||||||
|
emailNotificationsEnabled: z.boolean().optional(),
|
||||||
|
pushNotificationsEnabled: z.boolean().optional(),
|
||||||
|
inAppNotificationsEnabled: z.boolean().optional()
|
||||||
|
}).refine(
|
||||||
|
(data) => Object.keys(data).length > 0,
|
||||||
|
{ message: 'At least one notification preference must be provided' }
|
||||||
|
);
|
||||||
|
|
||||||
|
export type UpdateNotificationPreferencesRequest = z.infer<typeof updateNotificationPreferencesSchema>;
|
||||||
|
|
||||||
@ -1,29 +1,49 @@
|
|||||||
import { z } from 'zod';
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
// Simplified approval level schema - only requires email and tatHours
|
||||||
|
// Backend will enrich with user details (approverId, approverName, levelName)
|
||||||
|
const simplifiedApprovalLevelSchema = z.object({
|
||||||
|
email: z.string().email('Valid email is required'),
|
||||||
|
tatHours: z.number().positive('TAT hours must be positive'),
|
||||||
|
isFinalApprover: z.boolean().optional(),
|
||||||
|
// Optional fields that backend will auto-populate if not provided
|
||||||
|
levelNumber: z.number().int().min(1).max(10).optional(),
|
||||||
|
levelName: z.string().optional(),
|
||||||
|
approverId: z.string().uuid().optional(),
|
||||||
|
approverEmail: z.string().email().optional(),
|
||||||
|
approverName: z.string().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Simplified spectator schema - only requires email
|
||||||
|
const simplifiedSpectatorSchema = z.object({
|
||||||
|
email: z.string().email('Valid email is required').optional(),
|
||||||
|
// Optional fields that backend will auto-populate if not provided
|
||||||
|
userId: z.string().uuid().optional(),
|
||||||
|
userEmail: z.string().email().optional(),
|
||||||
|
userName: z.string().optional(),
|
||||||
|
participantType: z.enum(['INITIATOR', 'APPROVER', 'SPECTATOR'] as const).optional(),
|
||||||
|
canComment: z.boolean().optional(),
|
||||||
|
canViewDocuments: z.boolean().optional(),
|
||||||
|
canDownloadDocuments: z.boolean().optional(),
|
||||||
|
notificationEnabled: z.boolean().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
export const createWorkflowSchema = z.object({
|
export const createWorkflowSchema = z.object({
|
||||||
templateType: z.enum(['CUSTOM', 'TEMPLATE']),
|
templateType: z.enum(['CUSTOM', 'TEMPLATE']),
|
||||||
title: z.string().min(1, 'Title is required').max(500, 'Title too long'),
|
title: z.string().min(1, 'Title is required').max(500, 'Title too long'),
|
||||||
description: z.string().min(1, 'Description is required'),
|
description: z.string().min(1, 'Description is required'),
|
||||||
priority: z.enum(['STANDARD', 'EXPRESS'] as const),
|
priority: z.enum(['STANDARD', 'EXPRESS'] as const),
|
||||||
approvalLevels: z.array(z.object({
|
approvalLevels: z.array(simplifiedApprovalLevelSchema)
|
||||||
levelNumber: z.number().int().min(1).max(10),
|
.min(1, 'At least one approval level is required')
|
||||||
levelName: z.string().optional(),
|
.max(10, 'Maximum 10 approval levels allowed'),
|
||||||
approverId: z.string().uuid(),
|
participants: z.array(simplifiedSpectatorSchema).optional(),
|
||||||
approverEmail: z.string().email(),
|
spectators: z.array(simplifiedSpectatorSchema).optional(), // Alias for participants
|
||||||
approverName: z.string().min(1),
|
// Additional frontend compatibility fields
|
||||||
tatHours: z.number().positive(),
|
approverCount: z.number().optional(),
|
||||||
isFinalApprover: z.boolean().optional(),
|
approvers: z.array(z.any()).optional(),
|
||||||
})).min(1, 'At least one approval level is required').max(10, 'Maximum 10 approval levels allowed'),
|
priorityUi: z.string().optional(),
|
||||||
participants: z.array(z.object({
|
templateId: z.string().optional(),
|
||||||
userId: z.string().uuid(),
|
ccList: z.array(z.any()).optional(),
|
||||||
userEmail: z.string().email(),
|
|
||||||
userName: z.string().min(1),
|
|
||||||
participantType: z.enum(['INITIATOR', 'APPROVER', 'SPECTATOR'] as const),
|
|
||||||
canComment: z.boolean().optional(),
|
|
||||||
canViewDocuments: z.boolean().optional(),
|
|
||||||
canDownloadDocuments: z.boolean().optional(),
|
|
||||||
notificationEnabled: z.boolean().optional(),
|
|
||||||
})).optional(),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
export const updateWorkflowSchema = z.object({
|
export const updateWorkflowSchema = z.object({
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user