added email reinitaialize and api token limit

This commit is contained in:
laxmanhalaki 2026-01-21 18:41:10 +05:30
parent 9285c97d4b
commit d1ae0ffaec
10 changed files with 552 additions and 521 deletions

View File

@ -1,2 +1,2 @@
import{a as t}from"./index-XMUlTorM.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BmvKDhMD.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion}; import{a as t}from"./index-D5U31xpx.js";import"./radix-vendor-C2EbRL2a.js";import"./charts-vendor-Cji9-Yri.js";import"./utils-vendor-DHm03ykU.js";import"./ui-vendor-BmvKDhMD.js";import"./socket-vendor-TjCxX7sJ.js";import"./redux-vendor-tbZCm13o.js";import"./router-vendor-CRr9x_Jp.js";async function m(n){return(await t.post(`/conclusions/${n}/generate`)).data.data}async function d(n,o){return(await t.post(`/conclusions/${n}/finalize`,{finalRemark:o})).data.data}async function f(n){return(await t.get(`/conclusions/${n}`)).data.data}export{d as finalizeConclusion,m as generateConclusion,f as getConclusion};
//# sourceMappingURL=conclusionApi-muWiQD3D.js.map //# sourceMappingURL=conclusionApi-xBwvOJP0.js.map

View File

@ -1 +1 @@
{"version":3,"file":"conclusionApi-muWiQD3D.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"} {"version":3,"file":"conclusionApi-xBwvOJP0.js","sources":["../../src/services/conclusionApi.ts"],"sourcesContent":["import apiClient from './authApi';\r\n\r\nexport interface ConclusionRemark {\r\n conclusionId: string;\r\n requestId: string;\r\n aiGeneratedRemark: string | null;\r\n aiModelUsed: string | null;\r\n aiConfidenceScore: number | null;\r\n finalRemark: string | null;\r\n editedBy: string | null;\r\n isEdited: boolean;\r\n editCount: number;\r\n approvalSummary: any;\r\n documentSummary: any;\r\n keyDiscussionPoints: string[];\r\n generatedAt: string | null;\r\n finalizedAt: string | null;\r\n createdAt: string;\r\n updatedAt: string;\r\n}\r\n\r\n/**\r\n * Generate AI-powered conclusion remark\r\n */\r\nexport async function generateConclusion(requestId: string): Promise<{\r\n conclusionId: string;\r\n aiGeneratedRemark: string;\r\n keyDiscussionPoints: string[];\r\n confidence: number;\r\n generatedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/generate`);\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Update conclusion remark (edit by initiator)\r\n */\r\nexport async function updateConclusion(requestId: string, finalRemark: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.put(`/conclusions/${requestId}`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Finalize conclusion and close request\r\n */\r\nexport async function finalizeConclusion(requestId: string, finalRemark: string): Promise<{\r\n conclusionId: string;\r\n requestNumber: string;\r\n status: string;\r\n finalRemark: string;\r\n finalizedAt: string;\r\n}> {\r\n const response = await apiClient.post(`/conclusions/${requestId}/finalize`, { finalRemark });\r\n return response.data.data;\r\n}\r\n\r\n/**\r\n * Get conclusion for a request\r\n */\r\nexport async function getConclusion(requestId: string): Promise<ConclusionRemark> {\r\n const response = await apiClient.get(`/conclusions/${requestId}`);\r\n return response.data.data;\r\n}\r\n\r\n"],"names":["generateConclusion","requestId","apiClient","finalizeConclusion","finalRemark","getConclusion"],"mappings":"6RAwBA,eAAsBA,EAAmBC,EAMtC,CAED,OADiB,MAAMC,EAAU,KAAK,gBAAgBD,CAAS,WAAW,GAC1D,KAAK,IACvB,CAaA,eAAsBE,EAAmBF,EAAmBG,EAMzD,CAED,OADiB,MAAMF,EAAU,KAAK,gBAAgBD,CAAS,YAAa,CAAE,YAAAG,EAAa,GAC3E,KAAK,IACvB,CAKA,eAAsBC,EAAcJ,EAA8C,CAEhF,OADiB,MAAMC,EAAU,IAAI,gBAAgBD,CAAS,EAAE,GAChD,KAAK,IACvB"}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -52,7 +52,7 @@
transition: transform 0.2s ease; transition: transform 0.2s ease;
} }
</style> </style>
<script type="module" crossorigin src="/assets/index-XMUlTorM.js"></script> <script type="module" crossorigin src="/assets/index-D5U31xpx.js"></script>
<link rel="modulepreload" crossorigin href="/assets/charts-vendor-Cji9-Yri.js"> <link rel="modulepreload" crossorigin href="/assets/charts-vendor-Cji9-Yri.js">
<link rel="modulepreload" crossorigin href="/assets/radix-vendor-C2EbRL2a.js"> <link rel="modulepreload" crossorigin href="/assets/radix-vendor-C2EbRL2a.js">
<link rel="modulepreload" crossorigin href="/assets/utils-vendor-DHm03ykU.js"> <link rel="modulepreload" crossorigin href="/assets/utils-vendor-DHm03ykU.js">

View File

@ -10,6 +10,7 @@ import { seedDefaultConfigurations } from './services/configSeed.service';
import { startPauseResumeJob } from './jobs/pauseResumeJob'; import { startPauseResumeJob } from './jobs/pauseResumeJob';
import './queues/pauseResumeWorker'; // Initialize pause resume worker import './queues/pauseResumeWorker'; // Initialize pause resume worker
import { initializeQueueMetrics, stopQueueMetrics } from './utils/queueMetrics'; import { initializeQueueMetrics, stopQueueMetrics } from './utils/queueMetrics';
import { emailService } from './services/email.service';
const PORT: number = parseInt(process.env.PORT || '5000', 10); const PORT: number = parseInt(process.env.PORT || '5000', 10);
@ -19,30 +20,39 @@ const startServer = async (): Promise<void> => {
// Initialize Google Secret Manager before starting server // Initialize Google Secret Manager before starting server
// This will merge secrets from GCS into process.env if enabled // This will merge secrets from GCS into process.env if enabled
await initializeSecrets(); await initializeSecrets();
// Re-initialize email service after secrets are loaded (in case SMTP credentials were loaded)
// This ensures the email service uses production SMTP if credentials are available
try {
await emailService.initialize();
console.log('📧 Email service re-initialized after secrets loaded');
} catch (error) {
console.warn('⚠️ Email service re-initialization warning (will use test account if SMTP not configured):', error);
}
const server = http.createServer(app); const server = http.createServer(app);
initSocket(server); initSocket(server);
// Seed default configurations if table is empty // Seed default configurations if table is empty
try { try {
await seedDefaultConfigurations(); await seedDefaultConfigurations();
} catch (error) { } catch (error) {
console.error('⚠️ Configuration seeding error:', error); console.error('⚠️ Configuration seeding error:', error);
} }
// Initialize holidays cache for TAT calculations // Initialize holidays cache for TAT calculations
try { try {
await initializeHolidaysCache(); await initializeHolidaysCache();
} catch (error) { } catch (error) {
// Silently fall back to weekends-only TAT calculation // Silently fall back to weekends-only TAT calculation
} }
// Start scheduled jobs // Start scheduled jobs
startPauseResumeJob(); startPauseResumeJob();
// Initialize queue metrics collection for Prometheus // Initialize queue metrics collection for Prometheus
initializeQueueMetrics(); initializeQueueMetrics();
server.listen(PORT, () => { server.listen(PORT, () => {
console.log(`🚀 Server running on port ${PORT} | ${process.env.NODE_ENV || 'development'}`); console.log(`🚀 Server running on port ${PORT} | ${process.env.NODE_ENV || 'development'}`);
}); });

View File

@ -28,7 +28,7 @@ class AIService {
// Check if AI is enabled from config // Check if AI is enabled from config
const { getConfigBoolean } = require('./configReader.service'); const { getConfigBoolean } = require('./configReader.service');
const enabled = await getConfigBoolean('AI_ENABLED', true); const enabled = await getConfigBoolean('AI_ENABLED', true);
if (!enabled) { if (!enabled) {
logger.warn('[AI Service] AI features disabled in admin configuration'); logger.warn('[AI Service] AI features disabled in admin configuration');
this.isInitialized = true; this.isInitialized = true;
@ -54,7 +54,7 @@ class AIService {
this.isInitialized = true; this.isInitialized = true;
} catch (error: any) { } catch (error: any) {
logger.error('[AI Service] Failed to initialize Vertex AI:', error); logger.error('[AI Service] Failed to initialize Vertex AI:', error);
if (error.code === 'MODULE_NOT_FOUND') { if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] @google-cloud/vertexai package not installed. Run: npm install @google-cloud/vertexai'); logger.warn('[AI Service] @google-cloud/vertexai package not installed. Run: npm install @google-cloud/vertexai');
} else if (error.message?.includes('ENOENT') || error.message?.includes('not found')) { } else if (error.message?.includes('ENOENT') || error.message?.includes('not found')) {
@ -65,7 +65,7 @@ class AIService {
} else { } else {
logger.error(`[AI Service] Initialization error: ${error.message}`); logger.error(`[AI Service] Initialization error: ${error.message}`);
} }
this.isInitialized = true; // Mark as initialized even if failed to prevent infinite loops this.isInitialized = true; // Mark as initialized even if failed to prevent infinite loops
} }
} }
@ -99,10 +99,11 @@ class AIService {
try { try {
// Get the generative model // Get the generative model
// Increase maxOutputTokens to handle longer conclusions (up to ~4000 tokens ≈ 3000 words)
const generativeModel = this.vertexAI.getGenerativeModel({ const generativeModel = this.vertexAI.getGenerativeModel({
model: this.model, model: this.model,
generationConfig: { generationConfig: {
maxOutputTokens: 2048, maxOutputTokens: 4096, // Increased from 2048 to handle longer conclusions
temperature: 0.3, temperature: 0.3,
}, },
}); });
@ -114,7 +115,7 @@ class AIService {
const streamingResp = await generativeModel.generateContent(request); const streamingResp = await generativeModel.generateContent(request);
const response = streamingResp.response; const response = streamingResp.response;
// Log full response structure for debugging if empty // Log full response structure for debugging if empty
if (!response.candidates || response.candidates.length === 0) { if (!response.candidates || response.candidates.length === 0) {
logger.error('[AI Service] No candidates in Vertex AI response:', { logger.error('[AI Service] No candidates in Vertex AI response:', {
@ -124,12 +125,12 @@ class AIService {
}); });
throw new Error('Vertex AI returned no candidates. The response may have been blocked by safety filters.'); throw new Error('Vertex AI returned no candidates. The response may have been blocked by safety filters.');
} }
const candidate = response.candidates[0]; const candidate = response.candidates[0];
// Check for safety ratings or blocked reasons // Check for safety ratings or blocked reasons
if (candidate.safetyRatings && candidate.safetyRatings.length > 0) { if (candidate.safetyRatings && candidate.safetyRatings.length > 0) {
const blockedRatings = candidate.safetyRatings.filter((rating: any) => const blockedRatings = candidate.safetyRatings.filter((rating: any) =>
rating.probability === 'HIGH' || rating.probability === 'MEDIUM' rating.probability === 'HIGH' || rating.probability === 'MEDIUM'
); );
if (blockedRatings.length > 0) { if (blockedRatings.length > 0) {
@ -142,7 +143,7 @@ class AIService {
}); });
} }
} }
// Check finish reason // Check finish reason
if (candidate.finishReason && candidate.finishReason !== 'STOP') { if (candidate.finishReason && candidate.finishReason !== 'STOP') {
logger.warn('[AI Service] Vertex AI finish reason:', { logger.warn('[AI Service] Vertex AI finish reason:', {
@ -150,10 +151,23 @@ class AIService {
safetyRatings: candidate.safetyRatings safetyRatings: candidate.safetyRatings
}); });
} }
// Extract text from response // Extract text from response
const text = candidate.content?.parts?.[0]?.text || ''; const text = candidate.content?.parts?.[0]?.text || '';
// Handle MAX_TOKENS finish reason - accept whatever response we got
// We trust the AI's response - no truncation on our side
if (candidate.finishReason === 'MAX_TOKENS' && text) {
// Accept the response as-is - AI was instructed to stay within limits
// If it hit the limit, we still use what we got (no truncation on our side)
logger.info('[AI Service] Vertex AI response hit token limit, but content received is preserved as-is:', {
textLength: text.length,
finishReason: candidate.finishReason
});
// Return the response without any truncation - trust what AI generated
return text;
}
if (!text) { if (!text) {
// Log detailed response structure for debugging // Log detailed response structure for debugging
logger.error('[AI Service] Empty text in Vertex AI response:', { logger.error('[AI Service] Empty text in Vertex AI response:', {
@ -164,12 +178,12 @@ class AIService {
promptPreview: prompt.substring(0, 200) + '...', promptPreview: prompt.substring(0, 200) + '...',
model: this.model model: this.model
}); });
// Provide more helpful error message // Provide more helpful error message
if (candidate.finishReason === 'SAFETY') { if (candidate.finishReason === 'SAFETY') {
throw new Error('Vertex AI blocked the response due to safety filters. The prompt may contain content that violates safety policies.'); throw new Error('Vertex AI blocked the response due to safety filters. The prompt may contain content that violates safety policies.');
} else if (candidate.finishReason === 'MAX_TOKENS') { } else if (candidate.finishReason === 'MAX_TOKENS') {
throw new Error('Vertex AI response was truncated due to token limit.'); throw new Error('Vertex AI response was truncated due to token limit. The prompt may be too long or the response limit was exceeded.');
} else if (candidate.finishReason === 'RECITATION') { } else if (candidate.finishReason === 'RECITATION') {
throw new Error('Vertex AI blocked the response due to recitation concerns.'); throw new Error('Vertex AI blocked the response due to recitation concerns.');
} else { } else {
@ -180,7 +194,7 @@ class AIService {
return text; return text;
} catch (error: any) { } catch (error: any) {
logger.error('[AI Service] Vertex AI generation error:', error); logger.error('[AI Service] Vertex AI generation error:', error);
// Provide more specific error messages // Provide more specific error messages
if (error.message?.includes('Model was not found')) { if (error.message?.includes('Model was not found')) {
throw new Error(`Model ${this.model} not found or not available in region ${LOCATION}. Please check model name and region.`); throw new Error(`Model ${this.model} not found or not available in region ${LOCATION}. Please check model name and region.`);
@ -189,7 +203,7 @@ class AIService {
} else if (error.message?.includes('API not enabled')) { } else if (error.message?.includes('API not enabled')) {
throw new Error('Vertex AI API is not enabled. Please enable it in Google Cloud Console.'); throw new Error('Vertex AI API is not enabled. Please enable it in Google Cloud Console.');
} }
throw new Error(`Vertex AI generation failed: ${error.message}`); throw new Error(`Vertex AI generation failed: ${error.message}`);
} }
} }
@ -254,9 +268,10 @@ class AIService {
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000'); const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10); const maxLength = parseInt(maxLengthStr || '2000', 10);
// Log length (no trimming - preserve complete AI-generated content) // Trust AI's response - do not truncate anything
// AI is instructed to stay within limit, but we accept whatever it generates
if (remarkText.length > maxLength) { if (remarkText.length > maxLength) {
logger.warn(`[AI Service] ⚠️ AI exceeded suggested limit (${remarkText.length} > ${maxLength}). Content preserved to avoid incomplete information.`); logger.info(`[AI Service] AI generated ${remarkText.length} characters (suggested limit: ${maxLength}). Full content preserved as-is.`);
} }
// Extract key points (look for bullet points or numbered items) // Extract key points (look for bullet points or numbered items)
@ -300,7 +315,7 @@ class AIService {
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000'); const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10); const maxLength = parseInt(maxLengthStr || '2000', 10);
const targetWordCount = Math.floor(maxLength / 6); // Approximate words (avg 6 chars per word) const targetWordCount = Math.floor(maxLength / 6); // Approximate words (avg 6 chars per word)
logger.info(`[AI Service] Using max remark length: ${maxLength} characters (≈${targetWordCount} words) from admin config`); logger.info(`[AI Service] Using max remark length: ${maxLength} characters (≈${targetWordCount} words) from admin config`);
// Check if this is a rejected request // Check if this is a rejected request
@ -318,11 +333,11 @@ class AIService {
const approvalSummary = approvalFlow const approvalSummary = approvalFlow
.filter((a: any) => a.status === 'APPROVED' || a.status === 'REJECTED') .filter((a: any) => a.status === 'APPROVED' || a.status === 'REJECTED')
.map((a: any) => { .map((a: any) => {
const tatPercentage = a.tatPercentageUsed !== undefined && a.tatPercentageUsed !== null const tatPercentage = a.tatPercentageUsed !== undefined && a.tatPercentageUsed !== null
? Number(a.tatPercentageUsed) ? Number(a.tatPercentageUsed)
: (a.elapsedHours && a.tatHours ? (Number(a.elapsedHours) / Number(a.tatHours)) * 100 : 0); : (a.elapsedHours && a.tatHours ? (Number(a.elapsedHours) / Number(a.tatHours)) * 100 : 0);
const riskStatus = getTATRiskStatus(tatPercentage); const riskStatus = getTATRiskStatus(tatPercentage);
const tatInfo = a.elapsedHours && a.tatHours const tatInfo = a.elapsedHours && a.tatHours
? ` (completed in ${a.elapsedHours.toFixed(1)}h of ${a.tatHours}h TAT, ${tatPercentage.toFixed(1)}% used)` ? ` (completed in ${a.elapsedHours.toFixed(1)}h of ${a.tatHours}h TAT, ${tatPercentage.toFixed(1)}% used)`
: ''; : '';
const riskInfo = riskStatus !== 'ON_TRACK' ? ` [${riskStatus}]` : ''; const riskInfo = riskStatus !== 'ON_TRACK' ? ` [${riskStatus}]` : '';
@ -336,13 +351,14 @@ class AIService {
.map((wn: any) => `- ${wn.userName}: "${wn.message.substring(0, 150)}${wn.message.length > 150 ? '...' : ''}"`) .map((wn: any) => `- ${wn.userName}: "${wn.message.substring(0, 150)}${wn.message.length > 150 ? '...' : ''}"`)
.join('\n'); .join('\n');
// Summarize documents // Summarize documents (limit to reduce token usage)
const documentSummary = documents const documentSummary = documents
.slice(0, 10) // Limit to first 10 documents
.map((d: any) => `- ${d.fileName} (by ${d.uploadedBy})`) .map((d: any) => `- ${d.fileName} (by ${d.uploadedBy})`)
.join('\n'); .join('\n');
// Build rejection context if applicable // Build rejection context if applicable
const rejectionContext = isRejected const rejectionContext = isRejected
? `\n**Rejection Details:**\n- Rejected by: ${rejectedBy || 'Approver'}\n- Rejection reason: ${rejectionReason || 'Not specified'}` ? `\n**Rejection Details:**\n- Rejected by: ${rejectedBy || 'Approver'}\n- Rejection reason: ${rejectionReason || 'Not specified'}`
: ''; : '';
@ -364,8 +380,8 @@ ${documentSummary || 'No documents'}
**YOUR TASK:** **YOUR TASK:**
Write a brief, professional conclusion (approximately ${targetWordCount} words, max ${maxLength} characters) that: Write a brief, professional conclusion (approximately ${targetWordCount} words, max ${maxLength} characters) that:
${isRejected ${isRejected
? `- Summarizes what was requested and explains that it was rejected ? `- Summarizes what was requested and explains that it was rejected
- Mentions who rejected it and the rejection reason - Mentions who rejected it and the rejection reason
- Notes the outcome and any learnings or next steps - Notes the outcome and any learnings or next steps
- Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable) - Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable)
@ -373,7 +389,7 @@ ${isRejected
- Is suitable for permanent archiving and future reference - Is suitable for permanent archiving and future reference
- Sounds natural and human-written (not AI-generated) - Sounds natural and human-written (not AI-generated)
- Maintains a professional and constructive tone even for rejections` - Maintains a professional and constructive tone even for rejections`
: `- Summarizes what was requested and the final decision : `- Summarizes what was requested and the final decision
- Mentions who approved it and any key comments - Mentions who approved it and any key comments
- Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable) - Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable)
- Notes the outcome and next steps (if applicable) - Notes the outcome and next steps (if applicable)
@ -382,15 +398,17 @@ ${isRejected
- Sounds natural and human-written (not AI-generated)`} - Sounds natural and human-written (not AI-generated)`}
**CRITICAL CHARACTER LIMIT - STRICT REQUIREMENT:** **CRITICAL CHARACTER LIMIT - STRICT REQUIREMENT:**
- Your response MUST be EXACTLY within ${maxLength} characters (not words, CHARACTERS including spaces) - Your response MUST stay within ${maxLength} characters (not words, CHARACTERS including spaces including HTML tags)
- Count your characters carefully before responding - This is a HARD LIMIT - you must count your characters and ensure your complete response fits within ${maxLength} characters
- Count your characters carefully before responding - include all HTML tags in your count
- If you have too much content, PRIORITIZE the most important information: - If you have too much content, PRIORITIZE the most important information:
1. Final decision (approved/rejected) 1. Final decision (approved/rejected)
2. Key approvers and their decisions 2. Key approvers and their decisions
3. Critical TAT breaches (if any) 3. Critical TAT breaches (if any)
4. Brief summary of the request 4. Brief summary of the request
- OMIT less important details to fit within the limit rather than exceeding it - OMIT less important details to fit within the limit rather than exceeding it
- Better to be concise than to exceed the limit - Better to be concise and complete within the limit than to exceed it
- IMPORTANT: Generate your complete response within this limit - do not generate partial content that exceeds the limit
**WRITING GUIDELINES:** **WRITING GUIDELINES:**
- Be concise and direct - every word must add value - Be concise and direct - every word must add value
@ -432,13 +450,13 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
*/ */
private extractKeyPoints(remark: string): string[] { private extractKeyPoints(remark: string): string[] {
const keyPoints: string[] = []; const keyPoints: string[] = [];
// Look for bullet points (-, •, *) or numbered items (1., 2., etc.) // Look for bullet points (-, •, *) or numbered items (1., 2., etc.)
const lines = remark.split('\n'); const lines = remark.split('\n');
for (const line of lines) { for (const line of lines) {
const trimmed = line.trim(); const trimmed = line.trim();
// Match bullet points // Match bullet points
if (trimmed.match(/^[-•*]\s+(.+)$/)) { if (trimmed.match(/^[-•*]\s+(.+)$/)) {
const point = trimmed.replace(/^[-•*]\s+/, ''); const point = trimmed.replace(/^[-•*]\s+/, '');
@ -446,7 +464,7 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
keyPoints.push(point); keyPoints.push(point);
} }
} }
// Match numbered items // Match numbered items
if (trimmed.match(/^\d+\.\s+(.+)$/)) { if (trimmed.match(/^\d+\.\s+(.+)$/)) {
const point = trimmed.replace(/^\d+\.\s+/, ''); const point = trimmed.replace(/^\d+\.\s+/, '');
@ -455,13 +473,13 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
} }
} }
} }
// If no bullet points found, extract first few sentences // If no bullet points found, extract first few sentences
if (keyPoints.length === 0) { if (keyPoints.length === 0) {
const sentences = remark.split(/[.!?]+/).filter(s => s.trim().length > 20); const sentences = remark.split(/[.!?]+/).filter(s => s.trim().length > 20);
keyPoints.push(...sentences.slice(0, 3).map(s => s.trim())); keyPoints.push(...sentences.slice(0, 3).map(s => s.trim()));
} }
return keyPoints.slice(0, 5); // Max 5 key points return keyPoints.slice(0, 5); // Max 5 key points
} }
@ -470,22 +488,22 @@ Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters m
*/ */
private calculateConfidence(remark: string, context: any): number { private calculateConfidence(remark: string, context: any): number {
let score = 0.6; // Base score let score = 0.6; // Base score
// Check if remark has good length (100-400 chars - more realistic) // Check if remark has good length (100-400 chars - more realistic)
if (remark.length >= 100 && remark.length <= 400) { if (remark.length >= 100 && remark.length <= 400) {
score += 0.2; score += 0.2;
} }
// Check if remark mentions key elements // Check if remark mentions key elements
if (remark.toLowerCase().includes('approv')) { if (remark.toLowerCase().includes('approv')) {
score += 0.1; score += 0.1;
} }
// Check if remark is not too generic // Check if remark is not too generic
if (remark.length > 80 && !remark.toLowerCase().includes('lorem ipsum')) { if (remark.length > 80 && !remark.toLowerCase().includes('lorem ipsum')) {
score += 0.1; score += 0.1;
} }
return Math.min(1.0, score); return Math.min(1.0, score);
} }

View File

@ -72,7 +72,7 @@ export class EmailService {
private async initializeTestAccount(): Promise<void> { private async initializeTestAccount(): Promise<void> {
try { try {
this.testAccountInfo = await nodemailer.createTestAccount(); this.testAccountInfo = await nodemailer.createTestAccount();
this.transporter = nodemailer.createTransport({ this.transporter = nodemailer.createTransport({
host: this.testAccountInfo.smtp.host, host: this.testAccountInfo.smtp.host,
port: this.testAccountInfo.smtp.port, port: this.testAccountInfo.smtp.port,
@ -100,6 +100,18 @@ export class EmailService {
await this.initialize(); await this.initialize();
} }
// If using test account, check if SMTP credentials are now available and re-initialize
if (this.useTestAccount) {
const smtpHost = process.env.SMTP_HOST;
const smtpUser = process.env.SMTP_USER;
const smtpPassword = process.env.SMTP_PASSWORD;
if (smtpHost && smtpUser && smtpPassword) {
logger.info('📧 SMTP credentials detected - re-initializing email service with production SMTP');
await this.initialize();
}
}
const recipients = Array.isArray(options.to) ? options.to.join(', ') : options.to; const recipients = Array.isArray(options.to) ? options.to.join(', ') : options.to;
const fromAddress = process.env.EMAIL_FROM || 'RE Flow <noreply@royalenfield.com>'; const fromAddress = process.env.EMAIL_FROM || 'RE Flow <noreply@royalenfield.com>';
@ -120,11 +132,11 @@ export class EmailService {
for (let attempt = 1; attempt <= maxRetries; attempt++) { for (let attempt = 1; attempt <= maxRetries; attempt++) {
try { try {
const info = await this.transporter!.sendMail(mailOptions); const info = await this.transporter!.sendMail(mailOptions);
if (!info || !info.messageId) { if (!info || !info.messageId) {
throw new Error('Email sent but no messageId returned'); throw new Error('Email sent but no messageId returned');
} }
const result: { messageId: string; previewUrl?: string } = { const result: { messageId: string; previewUrl?: string } = {
messageId: info.messageId messageId: info.messageId
}; };
@ -133,10 +145,10 @@ export class EmailService {
if (this.useTestAccount) { if (this.useTestAccount) {
try { try {
const previewUrl = nodemailer.getTestMessageUrl(info); const previewUrl = nodemailer.getTestMessageUrl(info);
if (previewUrl) { if (previewUrl) {
result.previewUrl = previewUrl; result.previewUrl = previewUrl;
// Always log to console for visibility // Always log to console for visibility
console.log('\n' + '='.repeat(80)); console.log('\n' + '='.repeat(80));
console.log(`📧 EMAIL PREVIEW (${options.subject})`); console.log(`📧 EMAIL PREVIEW (${options.subject})`);
@ -144,7 +156,7 @@ export class EmailService {
console.log(`Preview URL: ${previewUrl}`); console.log(`Preview URL: ${previewUrl}`);
console.log(`Message ID: ${info.messageId}`); console.log(`Message ID: ${info.messageId}`);
console.log('='.repeat(80) + '\n'); console.log('='.repeat(80) + '\n');
logger.info(`✅ Email sent (TEST MODE) to ${recipients}`); logger.info(`✅ Email sent (TEST MODE) to ${recipients}`);
logger.info(`📧 Preview URL: ${previewUrl}`); logger.info(`📧 Preview URL: ${previewUrl}`);
} else { } else {
@ -166,7 +178,7 @@ export class EmailService {
} catch (error) { } catch (error) {
lastError = error; lastError = error;
logger.error(`❌ Email send attempt ${attempt}/${maxRetries} failed:`, error); logger.error(`❌ Email send attempt ${attempt}/${maxRetries} failed:`, error);
if (attempt < maxRetries) { if (attempt < maxRetries) {
const delay = parseInt(process.env.EMAIL_RETRY_DELAY || '5000') * attempt; const delay = parseInt(process.env.EMAIL_RETRY_DELAY || '5000') * attempt;
logger.info(`⏳ Retrying in ${delay}ms...`); logger.info(`⏳ Retrying in ${delay}ms...`);
@ -185,22 +197,22 @@ export class EmailService {
*/ */
async sendBatch(emails: EmailOptions[]): Promise<void> { async sendBatch(emails: EmailOptions[]): Promise<void> {
logger.info(`📧 Sending batch of ${emails.length} emails`); logger.info(`📧 Sending batch of ${emails.length} emails`);
const batchSize = parseInt(process.env.EMAIL_BATCH_SIZE || '10'); const batchSize = parseInt(process.env.EMAIL_BATCH_SIZE || '10');
for (let i = 0; i < emails.length; i += batchSize) { for (let i = 0; i < emails.length; i += batchSize) {
const batch = emails.slice(i, i + batchSize); const batch = emails.slice(i, i + batchSize);
await Promise.allSettled( await Promise.allSettled(
batch.map(email => this.sendEmail(email)) batch.map(email => this.sendEmail(email))
); );
// Small delay between batches to avoid rate limiting // Small delay between batches to avoid rate limiting
if (i + batchSize < emails.length) { if (i + batchSize < emails.length) {
await new Promise(resolve => setTimeout(resolve, 1000)); await new Promise(resolve => setTimeout(resolve, 1000));
} }
} }
logger.info(`✅ Batch email sending complete`); logger.info(`✅ Batch email sending complete`);
} }
@ -233,6 +245,8 @@ export class EmailService {
export const emailService = new EmailService(); export const emailService = new EmailService();
// Initialize on import (will use test account if SMTP not configured) // Initialize on import (will use test account if SMTP not configured)
// Note: If secrets are loaded later, the service will re-initialize automatically
// when sendEmail is called (if SMTP credentials become available)
emailService.initialize().catch(error => { emailService.initialize().catch(error => {
logger.error('Failed to initialize email service:', error); logger.error('Failed to initialize email service:', error);
}); });

File diff suppressed because it is too large Load Diff