Compare commits

...

4 Commits

27 changed files with 1652 additions and 720 deletions

View File

@ -0,0 +1,63 @@
# Migration Merge Complete ✅
## Status: All Conflicts Resolved
Both migration files have been successfully merged with all conflicts resolved.
## Files Merged
### 1. `src/scripts/auto-setup.ts`
- **Status**: Clean, no conflict markers
- **Migrations**: All 40 migrations in correct order
- **Format**: Uses `require()` for CommonJS compatibility
### 2. `src/scripts/migrate.ts`
- **Status**: Clean, no conflict markers
- **Migrations**: All 40 migrations in correct order
- **Format**: Uses ES6 `import * as` syntax
## Migration Order (Final)
### Base Branch Migrations (m0-m29)
1. m0-m27: Core system migrations
2. m28: `20250130-migrate-to-vertex-ai`
3. m29: `20251203-add-user-notification-preferences`
### Dealer Claim Branch Migrations (m30-m39)
4. m30: `20251210-add-workflow-type-support`
5. m31: `20251210-enhance-workflow-templates`
6. m32: `20251210-add-template-id-foreign-key`
7. m33: `20251210-create-dealer-claim-tables`
8. m34: `20251210-create-proposal-cost-items-table`
9. m35: `20251211-create-internal-orders-table`
10. m36: `20251211-create-claim-budget-tracking-table`
11. m37: `20251213-drop-claim-details-invoice-columns`
12. m38: `20251213-create-claim-invoice-credit-note-tables`
13. m39: `20251214-create-dealer-completion-expenses`
## Verification
✅ No conflict markers (`<<<<<<<`, `=======`, `>>>>>>>`) found
✅ All migrations properly ordered
✅ Base branch migrations come first
✅ Dealer claim migrations follow
✅ Both files synchronized
## Next Steps
1. **If you see conflicts in your IDE/Git client:**
- Refresh your IDE/editor
- Run `git status` to check Git state
- If conflicts show in Git, run: `git add src/scripts/auto-setup.ts src/scripts/migrate.ts`
2. **Test the migrations:**
```bash
npm run migrate
# or
npm run setup
```
## Files Are Ready ✅
Both files are properly merged and ready to use. All 40 migrations are in the correct order with base branch migrations first, followed by dealer claim branch migrations.

View File

@ -1,2 +1,7 @@
<<<<<<<< HEAD:build/assets/conclusionApi-uNxtglEr.js
import{a as t}from"./index-9cOIFSn9.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BmvKDhMD.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
//# sourceMappingURL=conclusionApi-uNxtglEr.js.map
========
import{a as t}from"./index-CogACwP9.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BpFwwBOf.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
//# sourceMappingURL=conclusionApi-CGl-93sb.js.map
>>>>>>>> ee361a0c4ba611c87efe7f97d044a7c711024d1b:build/assets/conclusionApi-CGl-93sb.js

View File

@ -1 +1,5 @@
<<<<<<<< HEAD:build/assets/conclusionApi-uNxtglEr.js.map
{"version":3,"file":"conclusionApi-uNxtglEr.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
========
{"version":3,"file":"conclusionApi-CGl-93sb.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
>>>>>>>> ee361a0c4ba611c87efe7f97d044a7c711024d1b:build/assets/conclusionApi-CGl-93sb.js.map

View File

@ -0,0 +1,7 @@
<<<<<<<< HEAD:build/assets/conclusionApi-uNxtglEr.js
import{a as t}from"./index-9cOIFSn9.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BmvKDhMD.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
//# sourceMappingURL=conclusionApi-uNxtglEr.js.map
========
import{a as t}from"./index-CogACwP9.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BpFwwBOf.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
//# sourceMappingURL=conclusionApi-CGl-93sb.js.map
>>>>>>>> ee361a0c4ba611c87efe7f97d044a7c711024d1b:build/assets/conclusionApi-CGl-93sb.js

View File

@ -0,0 +1,5 @@
<<<<<<<< HEAD:build/assets/conclusionApi-uNxtglEr.js.map
{"version":3,"file":"conclusionApi-uNxtglEr.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
========
{"version":3,"file":"conclusionApi-CGl-93sb.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}
>>>>>>>> ee361a0c4ba611c87efe7f97d044a7c711024d1b:build/assets/conclusionApi-CGl-93sb.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -52,15 +52,15 @@
transition: transform 0.2s ease;
}
</style>
<script type="module" crossorigin src="/assets/index-CogACwP9.js"></script>
<script type="module" crossorigin src="/assets/index-9cOIFSn9.js"></script>
<link rel="modulepreload" crossorigin href="/assets/charts-vendor-Cji9-Yri.js">
<link rel="modulepreload" crossorigin href="/assets/radix-vendor-C2EbRL2a.js">
<link rel="modulepreload" crossorigin href="/assets/utils-vendor-DHm03ykU.js">
<link rel="modulepreload" crossorigin href="/assets/ui-vendor-BpFwwBOf.js">
<link rel="modulepreload" crossorigin href="/assets/ui-vendor-BmvKDhMD.js">
<link rel="modulepreload" crossorigin href="/assets/socket-vendor-TjCxX7sJ.js">
<link rel="modulepreload" crossorigin href="/assets/redux-vendor-tbZCm13o.js">
<link rel="modulepreload" crossorigin href="/assets/router-vendor-CRr9x_Jp.js">
<link rel="stylesheet" crossorigin href="/assets/index-BDQMGM0H.css">
<link rel="stylesheet" crossorigin href="/assets/index-BmOYs32D.css">
</head>
<body>
<div id="root"></div>

View File

@ -0,0 +1,202 @@
# Vertex AI Gemini Integration
## Overview
The AI service has been migrated from multi-provider support (Claude, OpenAI, Gemini API) to **Google Cloud Vertex AI Gemini** using service account authentication. This provides better enterprise-grade security and uses the same credentials as Google Cloud Storage.
## Changes Made
### 1. Package Dependencies
**Removed:**
- `@anthropic-ai/sdk` (Claude SDK)
- `@google/generative-ai` (Gemini API SDK)
- `openai` (OpenAI SDK)
**Added:**
- `@google-cloud/vertexai` (Vertex AI SDK)
### 2. Service Account Authentication
The AI service now uses the same service account JSON file as GCS:
- **Location**: `credentials/re-platform-workflow-dealer-3d5738fcc1f9.json`
- **Project ID**: `re-platform-workflow-dealer`
- **Default Region**: `us-central1`
### 3. Configuration
**Environment Variables:**
```env
# Required (already configured for GCS)
GCP_PROJECT_ID=re-platform-workflow-dealer
GCP_KEY_FILE=./credentials/re-platform-workflow-dealer-3d5738fcc1f9.json
# Optional (defaults provided)
VERTEX_AI_MODEL=gemini-2.5-flash
VERTEX_AI_LOCATION=asia-south1
AI_ENABLED=true
```
**Admin Panel Configuration:**
- `AI_ENABLED` - Enable/disable AI features
- `VERTEX_AI_MODEL` - Model name (default: `gemini-2.5-flash`)
- `AI_MAX_REMARK_LENGTH` - Maximum characters for conclusion remarks (default: 2000)
### 4. Available Models
| Model Name | Description | Use Case |
|------------|-------------|----------|
| `gemini-2.5-flash` | Latest fast model (default) | General purpose, quick responses |
| `gemini-1.5-flash` | Previous fast model | General purpose |
| `gemini-1.5-pro` | Advanced model | Complex tasks, better quality |
| `gemini-1.5-pro-latest` | Latest Pro version | Best quality, complex reasoning |
### 5. Supported Regions
| Region Code | Location | Availability |
|-------------|----------|--------------|
| `us-central1` | Iowa, USA | ✅ Default |
| `us-east1` | South Carolina, USA | ✅ |
| `us-west1` | Oregon, USA | ✅ |
| `europe-west1` | Belgium | ✅ |
| `asia-south1` | Mumbai, India | ✅ |
## Setup Instructions
### Step 1: Install Dependencies
```bash
cd Re_Backend
npm install
```
This will install `@google-cloud/vertexai` and remove old AI SDKs.
### Step 2: Verify Service Account Permissions
Ensure your service account (`re-bridge-workflow@re-platform-workflow-dealer.iam.gserviceaccount.com`) has:
- **Vertex AI User** role (`roles/aiplatform.user`)
### Step 3: Enable Vertex AI API
1. Go to [Google Cloud Console](https://console.cloud.google.com/)
2. Navigate to **APIs & Services** > **Library**
3. Search for **"Vertex AI API"**
4. Click **Enable**
### Step 4: Verify Configuration
Check that your `.env` file has:
```env
GCP_PROJECT_ID=re-platform-workflow-dealer
GCP_KEY_FILE=./credentials/re-platform-workflow-dealer-3d5738fcc1f9.json
VERTEX_AI_MODEL=gemini-2.5-flash
VERTEX_AI_LOCATION=us-central1
```
### Step 5: Test the Integration
Start the backend and check logs for:
```
[AI Service] ✅ Vertex AI provider initialized successfully with model: gemini-2.5-flash
```
## API Interface (Unchanged)
The public API remains the same - no changes needed in controllers or routes:
```typescript
// Check if AI is available
aiService.isAvailable(): boolean
// Get provider name
aiService.getProviderName(): string
// Generate conclusion remark
aiService.generateConclusionRemark(context): Promise<{
remark: string;
confidence: number;
keyPoints: string[];
provider: string;
}>
// Reinitialize (after config changes)
aiService.reinitialize(): Promise<void>
```
## Troubleshooting
### Error: "Module not found: @google-cloud/vertexai"
**Solution:**
```bash
npm install @google-cloud/vertexai
```
### Error: "Service account key file not found"
**Solution:**
- Verify file exists at: `credentials/re-platform-workflow-dealer-3d5738fcc1f9.json`
- Check `GCP_KEY_FILE` path in `.env` is correct
- Ensure file has read permissions
### Error: "Model was not found or your project does not have access"
**Solution:**
- Verify Vertex AI API is enabled in Google Cloud Console
- Check model name is correct (e.g., `gemini-2.5-flash`)
- Ensure model is available in your selected region
- Verify service account has `roles/aiplatform.user` role
### Error: "Permission denied"
**Solution:**
- Verify service account has Vertex AI User role
- Check service account key hasn't been revoked
- Regenerate service account key if needed
### Error: "API not enabled"
**Solution:**
- Enable Vertex AI API in Google Cloud Console
- Wait a few minutes for propagation
- Restart the backend service
## Migration Notes
### What Changed
- ✅ Removed multi-provider support (Claude, OpenAI, Gemini API)
- ✅ Now uses Vertex AI Gemini exclusively
- ✅ Uses service account authentication (same as GCS)
- ✅ Simplified configuration (no API keys needed)
### What Stayed the Same
- ✅ Public API interface (`aiService` methods)
- ✅ Conclusion generation functionality
- ✅ Admin panel configuration structure
- ✅ Error handling and logging
### Backward Compatibility
- ✅ Existing code using `aiService` will work without changes
- ✅ Conclusion controller unchanged
- ✅ Admin panel can still enable/disable AI features
- ✅ Configuration cache system still works
## Verification Checklist
- [ ] `@google-cloud/vertexai` package installed
- [ ] Service account key file exists and is valid
- [ ] Vertex AI API is enabled in Google Cloud Console
- [ ] Service account has `Vertex AI User` role
- [ ] `.env` file has correct `GCP_PROJECT_ID` and `GCP_KEY_FILE`
- [ ] Backend logs show successful initialization
- [ ] AI conclusion generation works for test requests
## Support
For issues or questions:
1. Check backend logs for detailed error messages
2. Verify Google Cloud Console settings
3. Ensure service account permissions are correct
4. Test with a simple request to isolate issues

View File

@ -38,16 +38,12 @@ SMTP_USER=notifications@royalenfield.com
SMTP_PASSWORD=your_smtp_password
EMAIL_FROM=RE Workflow System <notifications@royalenfield.com>
# AI Service (for conclusion generation)
# Note: API keys are configured in the admin panel (database), not in environment variables
# AI Provider: claude, openai, or gemini
AI_PROVIDER=claude
# AI Model Configuration (optional - defaults used if not set)
# These can be overridden via environment variables or admin panel
CLAUDE_MODEL=claude-sonnet-4-20250514
OPENAI_MODEL=gpt-4o
GEMINI_MODEL=gemini-2.0-flash-lite
# AI Service (for conclusion generation) - Vertex AI Gemini
# Uses service account credentials from GCP_KEY_FILE
# Vertex AI Model Configuration (optional - defaults used if not set)
VERTEX_AI_MODEL=gemini-2.5-flash
VERTEX_AI_LOCATION=asia-south1
# Note: GCP_PROJECT_ID and GCP_KEY_FILE are already configured above for GCS
# Logging
LOG_LEVEL=info

62
package-lock.json generated
View File

@ -8,9 +8,8 @@
"name": "re-workflow-backend",
"version": "1.0.0",
"dependencies": {
"@anthropic-ai/sdk": "^0.68.0",
"@google-cloud/storage": "^7.18.0",
"@google/generative-ai": "^0.24.1",
"@google-cloud/vertexai": "^1.10.0",
"@types/nodemailer": "^7.0.4",
"@types/uuid": "^8.3.4",
"axios": "^1.7.9",
@ -78,26 +77,6 @@
"npm": ">=10.0.0"
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.68.0",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.68.0.tgz",
"integrity": "sha512-SMYAmbbiprG8k1EjEPMTwaTqssDT7Ae+jxcR5kWXiqTlbwMR2AthXtscEVWOHkRfyAV5+y3PFYTJRNa3OJWIEw==",
"license": "MIT",
"dependencies": {
"json-schema-to-ts": "^3.1.1"
},
"bin": {
"anthropic-ai-sdk": "bin/cli"
},
"peerDependencies": {
"zod": "^3.25.0 || ^4.0.0"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
},
"node_modules/@aws-crypto/sha256-browser": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/@aws-crypto/sha256-browser/-/sha256-browser-5.2.0.tgz",
@ -1296,15 +1275,6 @@
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/runtime": {
"version": "7.28.4",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
"integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/template": {
"version": "7.27.2",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
@ -1681,11 +1651,14 @@
"node": ">=14"
}
},
"node_modules/@google/generative-ai": {
"version": "0.24.1",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.24.1.tgz",
"integrity": "sha512-MqO+MLfM6kjxcKoy0p1wRzG3b4ZZXtPI+z2IE26UogS2Cm/XHO+7gGRBh6gcJsOiIVoH93UwKvW4HdgiOZCy9Q==",
"node_modules/@google-cloud/vertexai": {
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-1.10.0.tgz",
"integrity": "sha512-HqYqoivNtkq59po8m7KI0n+lWKdz4kabENncYQXZCX/hBWJfXtKAfR/2nUQsP+TwSfHKoA7zDL2RrJYIv/j3VQ==",
"license": "Apache-2.0",
"dependencies": {
"google-auth-library": "^9.1.0"
},
"engines": {
"node": ">=18.0.0"
}
@ -8185,19 +8158,6 @@
"dev": true,
"license": "MIT"
},
"node_modules/json-schema-to-ts": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-3.1.1.tgz",
"integrity": "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.18.3",
"ts-algebra": "^2.0.0"
},
"engines": {
"node": ">=16"
}
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
@ -11100,12 +11060,6 @@
"node": ">= 14.0.0"
}
},
"node_modules/ts-algebra": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-2.0.0.tgz",
"integrity": "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==",
"license": "MIT"
},
"node_modules/ts-api-utils": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",

View File

@ -22,9 +22,8 @@
"cleanup:dealer-claims": "ts-node -r tsconfig-paths/register src/scripts/cleanup-dealer-claims.ts"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.68.0",
"@google-cloud/storage": "^7.18.0",
"@google/generative-ai": "^0.24.1",
"@google-cloud/vertexai": "^1.10.0",
"@types/nodemailer": "^7.0.4",
"@types/uuid": "^8.3.4",
"axios": "^1.7.9",

View File

@ -112,8 +112,15 @@ create_env_file() {
read -p "Enter GCP_PROJECT_ID (or press Enter to skip): " GCP_PROJECT_ID
read -p "Enter GCP_BUCKET_NAME (or press Enter to skip): " GCP_BUCKET_NAME
read -p "Enter CLAUDE_MODEL [default: claude-sonnet-4-20250514]: " CLAUDE_MODEL
CLAUDE_MODEL=${CLAUDE_MODEL:-claude-sonnet-4-20250514}
# Vertex AI Configuration
echo ""
echo "--- Vertex AI Gemini Configuration (Optional) ---"
echo "Note: These have defaults and are optional. Service account credentials are required."
read -p "Enter VERTEX_AI_MODEL [default: gemini-2.5-flash]: " VERTEX_AI_MODEL
VERTEX_AI_MODEL=${VERTEX_AI_MODEL:-gemini-2.5-flash}
read -p "Enter VERTEX_AI_LOCATION [default: us-central1]: " VERTEX_AI_LOCATION
VERTEX_AI_LOCATION=${VERTEX_AI_LOCATION:-us-central1}
# Create .env file
cat > "$file_name" << EOF
@ -157,8 +164,10 @@ SMTP_USER=${SMTP_USER}
SMTP_PASSWORD=${SMTP_PASSWORD}
EMAIL_FROM=RE Workflow System <notifications@royalenfield.com>
# AI Service (for conclusion generation) mandatory for claude
CLAUDE_MODEL=${CLAUDE_MODEL}
# Vertex AI Gemini Configuration (for conclusion generation)
# Service account credentials should be placed in ./credentials/ folder
VERTEX_AI_MODEL=${VERTEX_AI_MODEL}
VERTEX_AI_LOCATION=${VERTEX_AI_LOCATION}
# Logging
LOG_LEVEL=info

View File

@ -433,7 +433,7 @@ export const updateConfiguration = async (req: Request, res: Response): Promise<
}
// If AI config was updated, reinitialize AI service
const aiConfigKeys = ['AI_PROVIDER', 'CLAUDE_API_KEY', 'OPENAI_API_KEY', 'GEMINI_API_KEY', 'CLAUDE_MODEL', 'OPENAI_MODEL', 'GEMINI_MODEL', 'AI_ENABLED'];
const aiConfigKeys = ['AI_ENABLED'];
if (aiConfigKeys.includes(configKey)) {
try {
const { aiService } = require('../services/ai.service');

View File

@ -65,7 +65,7 @@ export class ConclusionController {
logger.warn(`[Conclusion] AI service unavailable for request ${requestId}`);
return res.status(503).json({
error: 'AI service not available',
message: 'AI features are currently unavailable. Please configure an AI provider (Claude, OpenAI, or Gemini) in the admin panel, or write the conclusion manually.',
message: 'AI features are currently unavailable. Please verify Vertex AI configuration and service account credentials, or write the conclusion manually.',
canContinueManually: true
});
}

View File

@ -0,0 +1,199 @@
import { QueryInterface, QueryTypes } from 'sequelize';
/**
* Migration to migrate from multi-provider AI to Vertex AI Gemini
*
* Removes:
* - AI_PROVIDER
* - CLAUDE_API_KEY, OPENAI_API_KEY, GEMINI_API_KEY
* - CLAUDE_MODEL, OPENAI_MODEL, GEMINI_MODEL
* - VERTEX_AI_MODEL (moved to environment variable only)
* - VERTEX_AI_LOCATION (moved to environment variable only)
*
* Note: Both VERTEX_AI_MODEL and VERTEX_AI_LOCATION are now configured via
* environment variables only (not in admin settings).
*
* This migration is idempotent - it will only delete configs that exist.
*/
export async function up(queryInterface: QueryInterface): Promise<void> {
// Remove old AI provider configurations
await queryInterface.sequelize.query(`
DELETE FROM admin_configurations
WHERE config_key IN (
'AI_PROVIDER',
'CLAUDE_API_KEY',
'OPENAI_API_KEY',
'GEMINI_API_KEY',
'CLAUDE_MODEL',
'OPENAI_MODEL',
'GEMINI_MODEL',
'VERTEX_AI_MODEL',
'VERTEX_AI_LOCATION'
)
`, { type: QueryTypes.DELETE });
}
export async function down(queryInterface: QueryInterface): Promise<void> {
// This migration only removes configs, so down migration would restore them
// However, we don't restore them as they're now environment-only
console.log('[Migration] Down migration skipped - AI configs are now environment-only');
// Restore old configurations (for rollback)
await queryInterface.sequelize.query(`
INSERT INTO admin_configurations (
config_id, config_key, config_category, config_value, value_type,
display_name, description, default_value, is_editable, is_sensitive,
validation_rules, ui_component, options, sort_order, requires_restart,
last_modified_by, last_modified_at, created_at, updated_at
) VALUES
(
gen_random_uuid(),
'AI_PROVIDER',
'AI_CONFIGURATION',
'claude',
'STRING',
'AI Provider',
'Active AI provider for conclusion generation (claude, openai, or gemini)',
'claude',
true,
false,
'{"enum": ["claude", "openai", "gemini"], "required": true}'::jsonb,
'select',
'["claude", "openai", "gemini"]'::jsonb,
22,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'CLAUDE_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Claude API Key',
'API key for Claude (Anthropic) - Get from console.anthropic.com',
'',
true,
true,
'{"pattern": "^sk-ant-", "minLength": 40}'::jsonb,
'input',
NULL,
23,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'OpenAI API Key',
'API key for OpenAI (GPT-4) - Get from platform.openai.com',
'',
true,
true,
'{"pattern": "^sk-", "minLength": 40}'::jsonb,
'input',
NULL,
24,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Gemini API Key',
'API key for Gemini (Google) - Get from ai.google.dev',
'',
true,
true,
'{"minLength": 20}'::jsonb,
'input',
NULL,
25,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'CLAUDE_MODEL',
'AI_CONFIGURATION',
'claude-sonnet-4-20250514',
'STRING',
'Claude Model',
'Claude (Anthropic) model to use for AI generation',
'claude-sonnet-4-20250514',
true,
false,
'{}'::jsonb,
'input',
NULL,
27,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_MODEL',
'AI_CONFIGURATION',
'gpt-4o',
'STRING',
'OpenAI Model',
'OpenAI model to use for AI generation',
'gpt-4o',
true,
false,
'{}'::jsonb,
'input',
NULL,
28,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_MODEL',
'AI_CONFIGURATION',
'gemini-2.0-flash-lite',
'STRING',
'Gemini Model',
'Gemini (Google) model to use for AI generation',
'gemini-2.0-flash-lite',
true,
false,
'{}'::jsonb,
'input',
NULL,
29,
false,
NULL,
NULL,
NOW(),
NOW()
)
ON CONFLICT (config_key) DO NOTHING
`, { type: QueryTypes.INSERT });
}

View File

@ -1,12 +1,42 @@
import { QueryInterface, DataTypes } from 'sequelize';
/**
* Helper function to check if a column exists in a table
*/
async function columnExists(
queryInterface: QueryInterface,
tableName: string,
columnName: string
): Promise<boolean> {
try {
const tableDescription = await queryInterface.describeTable(tableName);
return columnName in tableDescription;
} catch (error) {
return false;
}
}
export async function up(queryInterface: QueryInterface): Promise<void> {
await queryInterface.removeColumn('dealer_claim_details', 'dms_number');
await queryInterface.removeColumn('dealer_claim_details', 'e_invoice_number');
await queryInterface.removeColumn('dealer_claim_details', 'e_invoice_date');
await queryInterface.removeColumn('dealer_claim_details', 'credit_note_number');
await queryInterface.removeColumn('dealer_claim_details', 'credit_note_date');
await queryInterface.removeColumn('dealer_claim_details', 'credit_note_amount');
const columnsToRemove = [
'dms_number',
'e_invoice_number',
'e_invoice_date',
'credit_note_number',
'credit_note_date',
'credit_note_amount',
];
// Only remove columns if they exist
// This handles the case where dealer_claim_details was created without these columns
for (const columnName of columnsToRemove) {
const exists = await columnExists(queryInterface, 'dealer_claim_details', columnName);
if (exists) {
await queryInterface.removeColumn('dealer_claim_details', columnName);
console.log(` ✅ Removed column: ${columnName}`);
} else {
console.log(` ⏭️ Column ${columnName} does not exist, skipping...`);
}
}
}
export async function down(queryInterface: QueryInterface): Promise<void> {

View File

@ -118,14 +118,20 @@ async function runMigrations(): Promise<void> {
const m25 = require('../migrations/20250126-add-pause-fields-to-workflow-requests');
const m26 = require('../migrations/20250126-add-pause-fields-to-approval-levels');
const m27 = require('../migrations/20250127-migrate-in-progress-to-pending');
const m28 = require('../migrations/20251203-add-user-notification-preferences');
const m29 = require('../migrations/20251210-add-workflow-type-support');
const m30 = require('../migrations/20251210-enhance-workflow-templates');
const m31 = require('../migrations/20251210-add-template-id-foreign-key');
const m32 = require('../migrations/20251210-create-dealer-claim-tables');
const m33 = require('../migrations/20251210-create-proposal-cost-items-table');
const m34 = require('../migrations/20251211-create-internal-orders-table');
const m35 = require('../migrations/20251211-create-claim-budget-tracking-table');
// Base branch migrations (m28-m29)
const m28 = require('../migrations/20250130-migrate-to-vertex-ai');
const m29 = require('../migrations/20251203-add-user-notification-preferences');
// Dealer claim branch migrations (m30-m39)
const m30 = require('../migrations/20251210-add-workflow-type-support');
const m31 = require('../migrations/20251210-enhance-workflow-templates');
const m32 = require('../migrations/20251210-add-template-id-foreign-key');
const m33 = require('../migrations/20251210-create-dealer-claim-tables');
const m34 = require('../migrations/20251210-create-proposal-cost-items-table');
const m35 = require('../migrations/20251211-create-internal-orders-table');
const m36 = require('../migrations/20251211-create-claim-budget-tracking-table');
const m37 = require('../migrations/20251213-drop-claim-details-invoice-columns');
const m38 = require('../migrations/20251213-create-claim-invoice-credit-note-tables');
const m39 = require('../migrations/20251214-create-dealer-completion-expenses');
const migrations = [
{ name: '2025103000-create-users', module: m0 },
@ -156,14 +162,20 @@ async function runMigrations(): Promise<void> {
{ name: '20250126-add-pause-fields-to-workflow-requests', module: m25 },
{ name: '20250126-add-pause-fields-to-approval-levels', module: m26 },
{ name: '20250127-migrate-in-progress-to-pending', module: m27 },
{ name: '20251203-add-user-notification-preferences', module: m28 },
{ name: '20251210-add-workflow-type-support', module: m29 },
{ name: '20251210-enhance-workflow-templates', module: m30 },
{ name: '20251210-add-template-id-foreign-key', module: m31 },
{ name: '20251210-create-dealer-claim-tables', module: m32 },
{ name: '20251210-create-proposal-cost-items-table', module: m33 },
{ name: '20251211-create-internal-orders-table', module: m34 },
{ name: '20251211-create-claim-budget-tracking-table', module: m35 },
// Base branch migrations (m28-m29)
{ name: '20250130-migrate-to-vertex-ai', module: m28 },
{ name: '20251203-add-user-notification-preferences', module: m29 },
// Dealer claim branch migrations (m30-m39)
{ name: '20251210-add-workflow-type-support', module: m30 },
{ name: '20251210-enhance-workflow-templates', module: m31 },
{ name: '20251210-add-template-id-foreign-key', module: m32 },
{ name: '20251210-create-dealer-claim-tables', module: m33 },
{ name: '20251210-create-proposal-cost-items-table', module: m34 },
{ name: '20251211-create-internal-orders-table', module: m35 },
{ name: '20251211-create-claim-budget-tracking-table', module: m36 },
{ name: '20251213-drop-claim-details-invoice-columns', module: m37 },
{ name: '20251213-create-claim-invoice-credit-note-tables', module: m38 },
{ name: '20251214-create-dealer-completion-expenses', module: m39 },
];
const queryInterface = sequelize.getQueryInterface();

View File

@ -22,15 +22,26 @@ import * as m18 from '../migrations/20251118-add-breach-reason-to-approval-level
import * as m19 from '../migrations/20251121-add-ai-model-configs';
import * as m20 from '../migrations/20250122-create-request-summaries';
import * as m21 from '../migrations/20250122-create-shared-summaries';
import * as m22 from '../migrations/20251210-add-workflow-type-support';
import * as m23 from '../migrations/20251210-enhance-workflow-templates';
import * as m24 from '../migrations/20251210-add-template-id-foreign-key';
import * as m25 from '../migrations/20251210-create-dealer-claim-tables';
import * as m26 from '../migrations/20251210-create-proposal-cost-items-table';
import * as m27 from '../migrations/20251211-create-internal-orders-table';
import * as m28 from '../migrations/20251211-create-claim-budget-tracking-table';
import * as m29 from '../migrations/20251213-create-claim-invoice-credit-note-tables';
import * as m30 from '../migrations/20251214-create-dealer-completion-expenses';
import * as m22 from '../migrations/20250123-update-request-number-format';
import * as m23 from '../migrations/20250126-add-paused-to-enum';
import * as m24 from '../migrations/20250126-add-paused-to-workflow-status-enum';
import * as m25 from '../migrations/20250126-add-pause-fields-to-workflow-requests';
import * as m26 from '../migrations/20250126-add-pause-fields-to-approval-levels';
import * as m27 from '../migrations/20250127-migrate-in-progress-to-pending';
// Base branch migrations (m28-m29)
import * as m28 from '../migrations/20250130-migrate-to-vertex-ai';
import * as m29 from '../migrations/20251203-add-user-notification-preferences';
// Dealer claim branch migrations (m30-m39)
import * as m30 from '../migrations/20251210-add-workflow-type-support';
import * as m31 from '../migrations/20251210-enhance-workflow-templates';
import * as m32 from '../migrations/20251210-add-template-id-foreign-key';
import * as m33 from '../migrations/20251210-create-dealer-claim-tables';
import * as m34 from '../migrations/20251210-create-proposal-cost-items-table';
import * as m35 from '../migrations/20251211-create-internal-orders-table';
import * as m36 from '../migrations/20251211-create-claim-budget-tracking-table';
import * as m37 from '../migrations/20251213-drop-claim-details-invoice-columns';
import * as m38 from '../migrations/20251213-create-claim-invoice-credit-note-tables';
import * as m39 from '../migrations/20251214-create-dealer-completion-expenses';
interface Migration {
name: string;
@ -67,15 +78,26 @@ const migrations: Migration[] = [
{ name: '20251121-add-ai-model-configs', module: m19 },
{ name: '20250122-create-request-summaries', module: m20 },
{ name: '20250122-create-shared-summaries', module: m21 },
{ name: '20251210-add-workflow-type-support', module: m22 },
{ name: '20251210-enhance-workflow-templates', module: m23 },
{ name: '20251210-add-template-id-foreign-key', module: m24 },
{ name: '20251210-create-dealer-claim-tables', module: m25 },
{ name: '20251210-create-proposal-cost-items-table', module: m26 },
{ name: '20251211-create-internal-orders-table', module: m27 },
{ name: '20251211-create-claim-budget-tracking-table', module: m28 },
{ name: '20251213-create-claim-invoice-credit-note-tables', module: m29 },
{ name: '20251214-create-dealer-completion-expenses', module: m30 },
{ name: '20250123-update-request-number-format', module: m22 },
{ name: '20250126-add-paused-to-enum', module: m23 },
{ name: '20250126-add-paused-to-workflow-status-enum', module: m24 },
{ name: '20250126-add-pause-fields-to-workflow-requests', module: m25 },
{ name: '20250126-add-pause-fields-to-approval-levels', module: m26 },
{ name: '20250127-migrate-in-progress-to-pending', module: m27 },
// Base branch migrations (m28-m29)
{ name: '20250130-migrate-to-vertex-ai', module: m28 },
{ name: '20251203-add-user-notification-preferences', module: m29 },
// Dealer claim branch migrations (m30-m39)
{ name: '20251210-add-workflow-type-support', module: m30 },
{ name: '20251210-enhance-workflow-templates', module: m31 },
{ name: '20251210-add-template-id-foreign-key', module: m32 },
{ name: '20251210-create-dealer-claim-tables', module: m33 },
{ name: '20251210-create-proposal-cost-items-table', module: m34 },
{ name: '20251211-create-internal-orders-table', module: m35 },
{ name: '20251211-create-claim-budget-tracking-table', module: m36 },
{ name: '20251213-drop-claim-details-invoice-columns', module: m37 },
{ name: '20251213-create-claim-invoice-credit-note-tables', module: m38 },
{ name: '20251214-create-dealer-completion-expenses', module: m39 },
];
/**

View File

@ -368,79 +368,7 @@ async function seedAdminConfigurations() {
NOW()
),
-- AI Configuration (from migration 20251111-add-ai-provider-configs)
(
gen_random_uuid(),
'AI_PROVIDER',
'AI_CONFIGURATION',
'claude',
'STRING',
'AI Provider',
'Active AI provider for conclusion generation (claude, openai, or gemini)',
'claude',
true,
false,
'{"enum": ["claude", "openai", "gemini"], "required": true}'::jsonb,
'select',
100,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'CLAUDE_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Claude API Key',
'API key for Claude (Anthropic) - Get from console.anthropic.com',
'',
true,
true,
'{"pattern": "^sk-ant-", "minLength": 40}'::jsonb,
'input',
101,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'OpenAI API Key',
'API key for OpenAI (GPT-4) - Get from platform.openai.com',
'',
true,
true,
'{"pattern": "^sk-", "minLength": 40}'::jsonb,
'input',
102,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Gemini API Key',
'API key for Gemini (Google) - Get from ai.google.dev',
'',
true,
true,
'{"minLength": 20}'::jsonb,
'input',
103,
false,
NOW(),
NOW()
),
-- AI Configuration (Vertex AI Gemini)
(
gen_random_uuid(),
'AI_ENABLED',
@ -454,61 +382,7 @@ async function seedAdminConfigurations() {
false,
'{"type": "boolean"}'::jsonb,
'toggle',
104,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'CLAUDE_MODEL',
'AI_CONFIGURATION',
'claude-sonnet-4-20250514',
'STRING',
'Claude Model',
'Claude (Anthropic) model to use for AI generation',
'claude-sonnet-4-20250514',
true,
false,
'{}'::jsonb,
'input',
105,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_MODEL',
'AI_CONFIGURATION',
'gpt-4o',
'STRING',
'OpenAI Model',
'OpenAI model to use for AI generation',
'gpt-4o',
true,
false,
'{}'::jsonb,
'input',
106,
false,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_MODEL',
'AI_CONFIGURATION',
'gemini-2.0-flash-lite',
'STRING',
'Gemini Model',
'Gemini (Google) model to use for AI generation',
'gemini-2.0-flash-lite',
true,
false,
'{}'::jsonb,
'input',
107,
100,
false,
NOW(),
NOW()
@ -526,7 +400,7 @@ async function seedAdminConfigurations() {
false,
'{"type": "boolean"}'::jsonb,
'toggle',
108,
101,
false,
NOW(),
NOW()
@ -544,7 +418,7 @@ async function seedAdminConfigurations() {
false,
'{"type": "number", "min": 500, "max": 5000}'::jsonb,
'number',
109,
104,
false,
NOW(),
NOW()

View File

@ -1,186 +1,19 @@
import logger, { logAIEvent } from '@utils/logger';
import { getAIProviderConfig } from './configReader.service';
import { getConfigValue } from './configReader.service';
import { VertexAI } from '@google-cloud/vertexai';
import { resolve } from 'path';
// Provider-specific interfaces
interface AIProvider {
generateText(prompt: string): Promise<string>;
isAvailable(): boolean;
getProviderName(): string;
}
// Claude Provider
class ClaudeProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - claude-sonnet-4-20250514 (default - latest Claude Sonnet 4)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.CLAUDE_MODEL || 'claude-sonnet-4-20250514';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.CLAUDE_API_KEY || process.env.ANTHROPIC_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
// Dynamic import to avoid hard dependency
const Anthropic = require('@anthropic-ai/sdk');
this.client = new Anthropic({ apiKey: key });
logger.info(`[AI Service] ✅ Claude provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] Claude SDK not installed. Run: npm install @anthropic-ai/sdk');
} else {
logger.error('[AI Service] Failed to initialize Claude:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('Claude client not initialized');
logAIEvent('request', { provider: 'claude', model: this.model });
const response = await this.client.messages.create({
model: this.model,
max_tokens: 2048, // Increased for longer conclusions
temperature: 0.3,
messages: [{ role: 'user', content: prompt }]
});
const content = response.content[0];
return content.type === 'text' ? content.text : '';
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'Claude (Anthropic)';
}
}
// OpenAI Provider
class OpenAIProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - gpt-4o (default - latest GPT-4 Optimized)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.OPENAI_MODEL || 'gpt-4o';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.OPENAI_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
const OpenAI = require('openai');
this.client = new OpenAI({ apiKey: key });
logger.info(`[AI Service] ✅ OpenAI provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] OpenAI SDK not installed. Run: npm install openai');
} else {
logger.error('[AI Service] Failed to initialize OpenAI:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('OpenAI client not initialized');
logAIEvent('request', { provider: 'openai', model: this.model });
const response = await this.client.chat.completions.create({
model: this.model,
messages: [{ role: 'user', content: prompt }],
max_tokens: 1024,
temperature: 0.3
});
return response.choices[0]?.message?.content || '';
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'OpenAI (GPT-4)';
}
}
// Gemini Provider (Google)
class GeminiProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - gemini-2.0-flash-lite (default - latest Gemini Flash Lite)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
const { GoogleGenerativeAI } = require('@google/generative-ai');
this.client = new GoogleGenerativeAI(key);
logger.info(`[AI Service] ✅ Gemini provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] Gemini SDK not installed. Run: npm install @google/generative-ai');
} else {
logger.error('[AI Service] Failed to initialize Gemini:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('Gemini client not initialized');
logAIEvent('request', { provider: 'gemini', model: this.model });
const model = this.client.getGenerativeModel({ model: this.model });
const result = await model.generateContent(prompt);
const response = await result.response;
return response.text();
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'Gemini (Google)';
}
}
// Vertex AI Configuration
const PROJECT_ID = process.env.GCP_PROJECT_ID || 're-platform-workflow-dealer';
const LOCATION = process.env.VERTEX_AI_LOCATION || 'asia-south1';
const KEY_FILE_PATH = process.env.GCP_KEY_FILE || resolve(__dirname, '../../credentials/re-platform-workflow-dealer-3d5738fcc1f9.json');
const DEFAULT_MODEL = 'gemini-2.5-flash';
class AIService {
private provider: AIProvider | null = null;
private providerName: string = 'None';
private vertexAI: VertexAI | null = null;
private model: string = DEFAULT_MODEL;
private isInitialized: boolean = false;
private providerName: string = 'Vertex AI (Gemini)';
constructor() {
// Initialization happens asynchronously
@ -188,110 +21,52 @@ class AIService {
}
/**
* Initialize AI provider from database configuration
* Initialize Vertex AI client
*/
async initialize(): Promise<void> {
try {
// Read AI configuration from database (with env fallback)
const config = await getAIProviderConfig();
// Check if AI is enabled from config
const { getConfigBoolean } = require('./configReader.service');
const enabled = await getConfigBoolean('AI_ENABLED', true);
if (!config.enabled) {
if (!enabled) {
logger.warn('[AI Service] AI features disabled in admin configuration');
this.isInitialized = true;
return;
}
const preferredProvider = config.provider.toLowerCase();
logger.info(`[AI Service] Preferred provider from config: ${preferredProvider}`);
// Get model and location from environment variables only
this.model = process.env.VERTEX_AI_MODEL || DEFAULT_MODEL;
const location = process.env.VERTEX_AI_LOCATION || LOCATION;
// Try to initialize the preferred provider first
let initialized = false;
logger.info(`[AI Service] Initializing Vertex AI with project: ${PROJECT_ID}, location: ${location}, model: ${this.model}`);
switch (preferredProvider) {
case 'openai':
case 'gpt':
initialized = this.tryProvider(new OpenAIProvider(config.openaiKey, config.openaiModel));
break;
case 'gemini':
case 'google':
initialized = this.tryProvider(new GeminiProvider(config.geminiKey, config.geminiModel));
break;
case 'claude':
case 'anthropic':
default:
initialized = this.tryProvider(new ClaudeProvider(config.claudeKey, config.claudeModel));
break;
}
// Fallback: Try other providers if preferred one failed
if (!initialized) {
logger.warn('[AI Service] Preferred provider unavailable. Trying fallbacks...');
const fallbackProviders = [
new ClaudeProvider(config.claudeKey, config.claudeModel),
new OpenAIProvider(config.openaiKey, config.openaiModel),
new GeminiProvider(config.geminiKey, config.geminiModel)
];
for (const provider of fallbackProviders) {
if (this.tryProvider(provider)) {
logger.info(`[AI Service] ✅ Using fallback provider: ${this.providerName}`);
break;
}
}
}
if (!this.provider) {
logger.warn('[AI Service] ⚠️ No AI provider available. AI features will be disabled.');
logger.warn('[AI Service] To enable AI: Configure API keys in admin panel or set environment variables.');
logger.warn('[AI Service] Supported providers: Claude (CLAUDE_API_KEY), OpenAI (OPENAI_API_KEY), Gemini (GEMINI_API_KEY)');
}
// Initialize Vertex AI client with service account credentials
this.vertexAI = new VertexAI({
project: PROJECT_ID,
location: location,
googleAuthOptions: {
keyFilename: KEY_FILE_PATH,
},
});
logger.info(`[AI Service] ✅ Vertex AI provider initialized successfully with model: ${this.model}`);
this.isInitialized = true;
} catch (error) {
logger.error('[AI Service] Failed to initialize from config:', error);
// Fallback to environment variables
try {
this.initializeFromEnv();
} catch (envError) {
logger.error('[AI Service] Environment fallback also failed:', envError);
this.isInitialized = true; // Mark as initialized even if failed
}
}
}
} catch (error: any) {
logger.error('[AI Service] Failed to initialize Vertex AI:', error);
/**
* Fallback initialization from environment variables
*/
private initializeFromEnv(): void {
try {
const preferredProvider = (process.env.AI_PROVIDER || 'claude').toLowerCase();
logger.info(`[AI Service] Using environment variable configuration`);
switch (preferredProvider) {
case 'openai':
case 'gpt':
this.tryProvider(new OpenAIProvider(undefined, process.env.OPENAI_MODEL));
break;
case 'gemini':
case 'google':
this.tryProvider(new GeminiProvider(undefined, process.env.GEMINI_MODEL));
break;
case 'claude':
case 'anthropic':
default:
this.tryProvider(new ClaudeProvider(undefined, process.env.CLAUDE_MODEL));
break;
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] @google-cloud/vertexai package not installed. Run: npm install @google-cloud/vertexai');
} else if (error.message?.includes('ENOENT') || error.message?.includes('not found')) {
logger.error(`[AI Service] Service account key file not found at: ${KEY_FILE_PATH}`);
logger.error('[AI Service] Please ensure the credentials file exists and GCP_KEY_FILE path is correct');
} else if (error.message?.includes('Could not load the default credentials')) {
logger.error('[AI Service] Failed to load service account credentials. Please verify the key file is valid.');
} else {
logger.error(`[AI Service] Initialization error: ${error.message}`);
}
if (!this.provider) {
logger.warn('[AI Service] ⚠️ No provider available from environment variables either.');
}
this.isInitialized = true;
} catch (error) {
logger.error('[AI Service] Environment initialization failed:', error);
this.isInitialized = true; // Still mark as initialized to prevent infinite loops
this.isInitialized = true; // Mark as initialized even if failed to prevent infinite loops
}
}
@ -299,23 +74,12 @@ class AIService {
* Reinitialize AI provider (call after admin updates config)
*/
async reinitialize(): Promise<void> {
logger.info('[AI Service] Reinitializing AI provider from updated configuration...');
this.provider = null;
this.providerName = 'None';
logger.info('[AI Service] Reinitializing Vertex AI provider from updated configuration...');
this.vertexAI = null;
this.isInitialized = false;
await this.initialize();
}
private tryProvider(provider: AIProvider): boolean {
if (provider.isAvailable()) {
this.provider = provider;
this.providerName = provider.getProviderName();
logger.info(`[AI Service] ✅ Active provider: ${this.providerName}`);
return true;
}
return false;
}
/**
* Get current AI provider name
*/
@ -323,6 +87,58 @@ class AIService {
return this.providerName;
}
/**
* Generate text using Vertex AI Gemini
*/
private async generateText(prompt: string): Promise<string> {
if (!this.vertexAI) {
throw new Error('Vertex AI client not initialized');
}
logAIEvent('request', { provider: 'vertex-ai', model: this.model });
try {
// Get the generative model
const generativeModel = this.vertexAI.getGenerativeModel({
model: this.model,
generationConfig: {
maxOutputTokens: 2048,
temperature: 0.3,
},
});
// Generate content
const request = {
contents: [{ role: 'user', parts: [{ text: prompt }] }],
};
const streamingResp = await generativeModel.generateContent(request);
const response = streamingResp.response;
// Extract text from response
const text = response.candidates?.[0]?.content?.parts?.[0]?.text || '';
if (!text) {
throw new Error('Empty response from Vertex AI');
}
return text;
} catch (error: any) {
logger.error('[AI Service] Vertex AI generation error:', error);
// Provide more specific error messages
if (error.message?.includes('Model was not found')) {
throw new Error(`Model ${this.model} not found or not available in region ${LOCATION}. Please check model name and region.`);
} else if (error.message?.includes('Permission denied')) {
throw new Error('Permission denied. Please verify service account has Vertex AI User role.');
} else if (error.message?.includes('API not enabled')) {
throw new Error('Vertex AI API is not enabled. Please enable it in Google Cloud Console.');
}
throw new Error(`Vertex AI generation failed: ${error.message}`);
}
}
/**
* Generate conclusion remark for a workflow request
* @param context - All relevant data for generating the conclusion
@ -358,29 +174,28 @@ class AIService {
details: string;
timestamp: string;
}>;
}): Promise<{ remark: string; confidence: number; keyPoints: string[]; provider: string }> {
}): Promise<{ remark: string; confidence: number; keyPoints: string[]; provider: string }> {
// Ensure initialization is complete
if (!this.isInitialized) {
logger.warn('[AI Service] Not yet initialized, attempting initialization...');
await this.initialize();
}
if (!this.provider) {
logger.error('[AI Service] No AI provider available');
throw new Error('AI features are currently unavailable. Please configure an AI provider (Claude, OpenAI, or Gemini) in the admin panel, or write the conclusion manually.');
if (!this.vertexAI) {
logger.error('[AI Service] Vertex AI not available');
throw new Error('AI features are currently unavailable. Please verify Vertex AI configuration and service account credentials.');
}
try {
// Build context prompt with max length from config
const prompt = await this.buildConclusionPrompt(context);
logger.info(`[AI Service] Generating conclusion for request ${context.requestNumber} using ${this.providerName}...`);
logger.info(`[AI Service] Generating conclusion for request ${context.requestNumber} using ${this.providerName} (${this.model})...`);
// Use provider's generateText method
let remarkText = await this.provider.generateText(prompt);
// Use Vertex AI to generate text
let remarkText = await this.generateText(prompt);
// Get max length from config for logging
const { getConfigValue } = require('./configReader.service');
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10);
@ -410,7 +225,7 @@ class AIService {
}
/**
* Build the prompt for Claude to generate a professional conclusion remark
* Build the prompt for Vertex AI to generate a professional conclusion remark
*/
private async buildConclusionPrompt(context: any): Promise<string> {
const {
@ -427,7 +242,6 @@ class AIService {
} = context;
// Get max remark length from admin configuration
const { getConfigValue } = require('./configReader.service');
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10);
const targetWordCount = Math.floor(maxLength / 6); // Approximate words (avg 6 chars per word)
@ -600,7 +414,7 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
* Calculate confidence score based on response quality
*/
private calculateConfidence(remark: string, context: any): number {
let score = 0.6; // Base score (slightly higher for new prompt)
let score = 0.6; // Base score
// Check if remark has good length (100-400 chars - more realistic)
if (remark.length >= 100 && remark.length <= 400) {
@ -624,9 +438,8 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
* Check if AI service is available
*/
isAvailable(): boolean {
return this.provider !== null;
return this.vertexAI !== null;
}
}
export const aiService = new AIService();

View File

@ -148,28 +148,13 @@ export async function preloadConfigurations(): Promise<void> {
}
/**
* Get AI provider configurations
* Get Vertex AI configurations
*/
export async function getAIProviderConfig(): Promise<{
provider: string;
claudeKey: string;
openaiKey: string;
geminiKey: string;
claudeModel: string;
openaiModel: string;
geminiModel: string;
export async function getVertexAIConfig(): Promise<{
enabled: boolean;
}> {
const provider = await getConfigValue('AI_PROVIDER', 'claude');
const claudeKey = await getConfigValue('CLAUDE_API_KEY', '');
const openaiKey = await getConfigValue('OPENAI_API_KEY', '');
const geminiKey = await getConfigValue('GEMINI_API_KEY', '');
// Get models from database config, fallback to env, then to defaults
const claudeModel = await getConfigValue('CLAUDE_MODEL', process.env.CLAUDE_MODEL || 'claude-sonnet-4-20250514');
const openaiModel = await getConfigValue('OPENAI_MODEL', process.env.OPENAI_MODEL || 'gpt-4o');
const geminiModel = await getConfigValue('GEMINI_MODEL', process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite');
const enabled = await getConfigBoolean('AI_ENABLED', true);
return { provider, claudeKey, openaiKey, geminiKey, claudeModel, openaiModel, geminiModel, enabled };
return { enabled };
}

View File

@ -260,112 +260,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
NOW(),
NOW()
),
-- AI Configuration
(
gen_random_uuid(),
'AI_REMARK_GENERATION_ENABLED',
'AI_CONFIGURATION',
'true',
'BOOLEAN',
'Enable AI Remark Generation',
'Toggle AI-generated conclusion remarks for workflow closures',
'true',
true,
false,
'{}'::jsonb,
'toggle',
NULL,
20,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'AI_PROVIDER',
'AI_CONFIGURATION',
'claude',
'STRING',
'AI Provider',
'Active AI provider for conclusion generation (claude, openai, or gemini)',
'claude',
true,
false,
'{"enum": ["claude", "openai", "gemini"], "required": true}'::jsonb,
'select',
'["claude", "openai", "gemini"]'::jsonb,
22,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'CLAUDE_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Claude API Key',
'API key for Claude (Anthropic) - Get from console.anthropic.com',
'',
true,
true,
'{"pattern": "^sk-ant-", "minLength": 40}'::jsonb,
'input',
NULL,
23,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'OpenAI API Key',
'API key for OpenAI (GPT-4) - Get from platform.openai.com',
'',
true,
true,
'{"pattern": "^sk-", "minLength": 40}'::jsonb,
'input',
NULL,
24,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_API_KEY',
'AI_CONFIGURATION',
'',
'STRING',
'Gemini API Key',
'API key for Gemini (Google) - Get from ai.google.dev',
'',
true,
true,
'{"minLength": 20}'::jsonb,
'input',
NULL,
25,
false,
NULL,
NULL,
NOW(),
NOW()
),
-- AI Configuration (Vertex AI Gemini)
(
gen_random_uuid(),
'AI_ENABLED',
@ -380,7 +275,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
'{"type": "boolean"}'::jsonb,
'toggle',
NULL,
26,
20,
false,
NULL,
NULL,
@ -389,61 +284,19 @@ export async function seedDefaultConfigurations(): Promise<void> {
),
(
gen_random_uuid(),
'CLAUDE_MODEL',
'AI_REMARK_GENERATION_ENABLED',
'AI_CONFIGURATION',
'claude-sonnet-4-20250514',
'STRING',
'Claude Model',
'Claude (Anthropic) model to use for AI generation',
'claude-sonnet-4-20250514',
'true',
'BOOLEAN',
'Enable AI Remark Generation',
'Toggle AI-generated conclusion remarks for workflow closures',
'true',
true,
false,
'{}'::jsonb,
'input',
'toggle',
NULL,
27,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'OPENAI_MODEL',
'AI_CONFIGURATION',
'gpt-4o',
'STRING',
'OpenAI Model',
'OpenAI model to use for AI generation',
'gpt-4o',
true,
false,
'{}'::jsonb,
'input',
NULL,
28,
false,
NULL,
NULL,
NOW(),
NOW()
),
(
gen_random_uuid(),
'GEMINI_MODEL',
'AI_CONFIGURATION',
'gemini-2.0-flash-lite',
'STRING',
'Gemini Model',
'Gemini (Google) model to use for AI generation',
'gemini-2.0-flash-lite',
true,
false,
'{}'::jsonb,
'input',
NULL,
29,
21,
false,
NULL,
NULL,
@ -464,7 +317,7 @@ export async function seedDefaultConfigurations(): Promise<void> {
'{"min": 500, "max": 5000}'::jsonb,
'number',
NULL,
30,
24,
false,
NULL,
NULL,