Re_Backend/src/services/ai.service.ts

654 lines
23 KiB
TypeScript

import logger, { logAIEvent } from '@utils/logger';
import { getAIProviderConfig } from './configReader.service';
// Provider-specific interfaces
interface AIProvider {
generateText(prompt: string): Promise<string>;
isAvailable(): boolean;
getProviderName(): string;
}
// Claude Provider
class ClaudeProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - claude-sonnet-4-20250514 (default - latest Claude Sonnet 4)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.CLAUDE_MODEL || 'claude-sonnet-4-20250514';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.CLAUDE_API_KEY || process.env.ANTHROPIC_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
// Dynamic import to avoid hard dependency
const Anthropic = require('@anthropic-ai/sdk');
this.client = new Anthropic({ apiKey: key });
logger.info(`[AI Service] ✅ Claude provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] Claude SDK not installed. Run: npm install @anthropic-ai/sdk');
} else {
logger.error('[AI Service] Failed to initialize Claude:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('Claude client not initialized');
logAIEvent('request', { provider: 'claude', model: this.model });
const response = await this.client.messages.create({
model: this.model,
max_tokens: 2048, // Increased for longer conclusions
temperature: 0.3,
messages: [{ role: 'user', content: prompt }]
});
const content = response.content[0];
return content.type === 'text' ? content.text : '';
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'Claude (Anthropic)';
}
}
// OpenAI Provider
class OpenAIProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - gpt-4o (default - latest GPT-4 Optimized)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.OPENAI_MODEL || 'gpt-4o';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.OPENAI_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
const OpenAI = require('openai');
this.client = new OpenAI({ apiKey: key });
logger.info(`[AI Service] ✅ OpenAI provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] OpenAI SDK not installed. Run: npm install openai');
} else {
logger.error('[AI Service] Failed to initialize OpenAI:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('OpenAI client not initialized');
logAIEvent('request', { provider: 'openai', model: this.model });
const response = await this.client.chat.completions.create({
model: this.model,
messages: [{ role: 'user', content: prompt }],
max_tokens: 1024,
temperature: 0.3
});
return response.choices[0]?.message?.content || '';
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'OpenAI (GPT-4)';
}
}
// Gemini Provider (Google)
class GeminiProvider implements AIProvider {
private client: any = null;
private model: string;
constructor(apiKey?: string, model?: string) {
// Allow model override via parameter, environment variable, or default
// Current models (November 2025):
// - gemini-2.0-flash-lite (default - latest Gemini Flash Lite)
// Priority: 1. Provided model parameter, 2. Environment variable, 3. Default
this.model = model || process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite';
try {
// Priority: 1. Provided key, 2. Environment variable
const key = apiKey || process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY;
if (!key || key.trim() === '') {
return; // Silently skip if no key available
}
const { GoogleGenerativeAI } = require('@google/generative-ai');
this.client = new GoogleGenerativeAI(key);
logger.info(`[AI Service] ✅ Gemini provider initialized with model: ${this.model}`);
} catch (error: any) {
// Handle missing package gracefully
if (error.code === 'MODULE_NOT_FOUND') {
logger.warn('[AI Service] Gemini SDK not installed. Run: npm install @google/generative-ai');
} else {
logger.error('[AI Service] Failed to initialize Gemini:', error.message);
}
}
}
async generateText(prompt: string): Promise<string> {
if (!this.client) throw new Error('Gemini client not initialized');
logAIEvent('request', { provider: 'gemini', model: this.model });
const model = this.client.getGenerativeModel({ model: this.model });
const result = await model.generateContent(prompt);
const response = await result.response;
return response.text();
}
isAvailable(): boolean {
return this.client !== null;
}
getProviderName(): string {
return 'Gemini (Google)';
}
}
class AIService {
private provider: AIProvider | null = null;
private providerName: string = 'None';
private isInitialized: boolean = false;
constructor() {
// Initialization happens asynchronously
this.initialize();
}
/**
* Initialize AI provider from database configuration
*/
async initialize(): Promise<void> {
try {
// Read AI configuration from database (with env fallback)
const config = await getAIProviderConfig();
if (!config.enabled) {
logger.warn('[AI Service] AI features disabled in admin configuration');
return;
}
const preferredProvider = config.provider.toLowerCase();
logger.info(`[AI Service] Preferred provider from config: ${preferredProvider}`);
// Try to initialize the preferred provider first
let initialized = false;
switch (preferredProvider) {
case 'openai':
case 'gpt':
initialized = this.tryProvider(new OpenAIProvider(config.openaiKey, config.openaiModel));
break;
case 'gemini':
case 'google':
initialized = this.tryProvider(new GeminiProvider(config.geminiKey, config.geminiModel));
break;
case 'claude':
case 'anthropic':
default:
initialized = this.tryProvider(new ClaudeProvider(config.claudeKey, config.claudeModel));
break;
}
// Fallback: Try other providers if preferred one failed
if (!initialized) {
logger.warn('[AI Service] Preferred provider unavailable. Trying fallbacks...');
const fallbackProviders = [
new ClaudeProvider(config.claudeKey, config.claudeModel),
new OpenAIProvider(config.openaiKey, config.openaiModel),
new GeminiProvider(config.geminiKey, config.geminiModel)
];
for (const provider of fallbackProviders) {
if (this.tryProvider(provider)) {
logger.info(`[AI Service] ✅ Using fallback provider: ${this.providerName}`);
break;
}
}
}
if (!this.provider) {
logger.warn('[AI Service] ⚠️ No AI provider available. AI features will be disabled.');
logger.warn('[AI Service] To enable AI: Configure API keys in admin panel or set environment variables.');
logger.warn('[AI Service] Supported providers: Claude (CLAUDE_API_KEY), OpenAI (OPENAI_API_KEY), Gemini (GEMINI_API_KEY)');
}
this.isInitialized = true;
} catch (error) {
logger.error('[AI Service] Failed to initialize from config:', error);
// Fallback to environment variables
try {
this.initializeFromEnv();
} catch (envError) {
logger.error('[AI Service] Environment fallback also failed:', envError);
this.isInitialized = true; // Mark as initialized even if failed
}
}
}
/**
* Fallback initialization from environment variables
*/
private initializeFromEnv(): void {
try {
const preferredProvider = (process.env.AI_PROVIDER || 'claude').toLowerCase();
logger.info(`[AI Service] Using environment variable configuration`);
switch (preferredProvider) {
case 'openai':
case 'gpt':
this.tryProvider(new OpenAIProvider(undefined, process.env.OPENAI_MODEL));
break;
case 'gemini':
case 'google':
this.tryProvider(new GeminiProvider(undefined, process.env.GEMINI_MODEL));
break;
case 'claude':
case 'anthropic':
default:
this.tryProvider(new ClaudeProvider(undefined, process.env.CLAUDE_MODEL));
break;
}
if (!this.provider) {
logger.warn('[AI Service] ⚠️ No provider available from environment variables either.');
}
this.isInitialized = true;
} catch (error) {
logger.error('[AI Service] Environment initialization failed:', error);
this.isInitialized = true; // Still mark as initialized to prevent infinite loops
}
}
/**
* Reinitialize AI provider (call after admin updates config)
*/
async reinitialize(): Promise<void> {
logger.info('[AI Service] Reinitializing AI provider from updated configuration...');
this.provider = null;
this.providerName = 'None';
this.isInitialized = false;
await this.initialize();
}
private tryProvider(provider: AIProvider): boolean {
if (provider.isAvailable()) {
this.provider = provider;
this.providerName = provider.getProviderName();
logger.info(`[AI Service] ✅ Active provider: ${this.providerName}`);
return true;
}
return false;
}
/**
* Get current AI provider name
*/
getProviderName(): string {
return this.providerName;
}
/**
* Generate conclusion remark for a workflow request
* @param context - All relevant data for generating the conclusion
* @returns AI-generated conclusion remark
*/
async generateConclusionRemark(context: {
requestTitle: string;
requestDescription: string;
requestNumber: string;
priority: string;
approvalFlow: Array<{
levelNumber: number;
approverName: string;
status: string;
comments?: string;
actionDate?: string;
tatHours?: number;
elapsedHours?: number;
}>;
workNotes: Array<{
userName: string;
message: string;
createdAt: string;
}>;
documents: Array<{
fileName: string;
uploadedBy: string;
uploadedAt: string;
}>;
activities: Array<{
type: string;
action: string;
details: string;
timestamp: string;
}>;
}): Promise<{ remark: string; confidence: number; keyPoints: string[]; provider: string }> {
// Ensure initialization is complete
if (!this.isInitialized) {
logger.warn('[AI Service] Not yet initialized, attempting initialization...');
await this.initialize();
}
if (!this.provider) {
logger.error('[AI Service] No AI provider available');
throw new Error('AI features are currently unavailable. Please configure an AI provider (Claude, OpenAI, or Gemini) in the admin panel, or write the conclusion manually.');
}
try {
// Build context prompt with max length from config
const prompt = await this.buildConclusionPrompt(context);
logger.info(`[AI Service] Generating conclusion for request ${context.requestNumber} using ${this.providerName}...`);
// Use provider's generateText method
let remarkText = await this.provider.generateText(prompt);
// Get max length from config for validation
const { getConfigValue } = require('./configReader.service');
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10);
// Validate length - AI should already be within limit, but trim as safety net
if (remarkText.length > maxLength) {
logger.warn(`[AI Service] ⚠️ AI exceeded character limit (${remarkText.length} > ${maxLength}). This should be rare - AI was instructed to prioritize and condense. Applying safety trim...`);
// Try to find a natural break point (sentence end) near the limit
const safeLimit = maxLength - 3;
let trimPoint = safeLimit;
// Look for last sentence end (. ! ?) within the safe limit
const lastPeriod = remarkText.lastIndexOf('.', safeLimit);
const lastExclaim = remarkText.lastIndexOf('!', safeLimit);
const lastQuestion = remarkText.lastIndexOf('?', safeLimit);
const bestBreak = Math.max(lastPeriod, lastExclaim, lastQuestion);
// Use sentence break if it's reasonably close to the limit (within 80% of max)
if (bestBreak > maxLength * 0.8) {
trimPoint = bestBreak + 1; // Include the punctuation
remarkText = remarkText.substring(0, trimPoint).trim();
} else {
// Fall back to hard trim with ellipsis
remarkText = remarkText.substring(0, safeLimit).trim() + '...';
}
logger.info(`[AI Service] Trimmed to ${remarkText.length} characters`);
}
// Extract key points (look for bullet points or numbered items)
const keyPoints = this.extractKeyPoints(remarkText);
// Calculate confidence based on response quality (simple heuristic)
const confidence = this.calculateConfidence(remarkText, context);
logger.info(`[AI Service] ✅ Generated conclusion (${remarkText.length}/${maxLength} chars, ${keyPoints.length} key points) via ${this.providerName}`);
return {
remark: remarkText,
confidence: confidence,
keyPoints: keyPoints,
provider: this.providerName
};
} catch (error: any) {
logger.error('[AI Service] Failed to generate conclusion:', error);
throw new Error(`AI generation failed (${this.providerName}): ${error.message}`);
}
}
/**
* Build the prompt for Claude to generate a professional conclusion remark
*/
private async buildConclusionPrompt(context: any): Promise<string> {
const {
requestTitle,
requestDescription,
requestNumber,
priority,
approvalFlow,
workNotes,
documents,
activities,
rejectionReason,
rejectedBy
} = context;
// Get max remark length from admin configuration
const { getConfigValue } = require('./configReader.service');
const maxLengthStr = await getConfigValue('AI_MAX_REMARK_LENGTH', '2000');
const maxLength = parseInt(maxLengthStr || '2000', 10);
const targetWordCount = Math.floor(maxLength / 6); // Approximate words (avg 6 chars per word)
logger.info(`[AI Service] Using max remark length: ${maxLength} characters (≈${targetWordCount} words) from admin config`);
// Check if this is a rejected request
const isRejected = rejectionReason || rejectedBy || approvalFlow.some((a: any) => a.status === 'REJECTED');
// Helper function to determine TAT risk status
const getTATRiskStatus = (tatPercentage: number): string => {
if (tatPercentage < 50) return 'ON_TRACK';
if (tatPercentage < 75) return 'AT_RISK';
if (tatPercentage < 100) return 'CRITICAL';
return 'BREACHED';
};
// Summarize approvals with TAT risk information
const approvalSummary = approvalFlow
.filter((a: any) => a.status === 'APPROVED' || a.status === 'REJECTED')
.map((a: any) => {
const tatPercentage = a.tatPercentageUsed !== undefined && a.tatPercentageUsed !== null
? Number(a.tatPercentageUsed)
: (a.elapsedHours && a.tatHours ? (Number(a.elapsedHours) / Number(a.tatHours)) * 100 : 0);
const riskStatus = getTATRiskStatus(tatPercentage);
const tatInfo = a.elapsedHours && a.tatHours
? ` (completed in ${a.elapsedHours.toFixed(1)}h of ${a.tatHours}h TAT, ${tatPercentage.toFixed(1)}% used)`
: '';
const riskInfo = riskStatus !== 'ON_TRACK' ? ` [${riskStatus}]` : '';
return `- Level ${a.levelNumber}: ${a.approverName} ${a.status}${tatInfo}${riskInfo}${a.comments ? `\n Comment: "${a.comments}"` : ''}`;
})
.join('\n');
// Summarize work notes (limit to important ones)
const workNoteSummary = workNotes
.slice(-10) // Last 10 work notes
.map((wn: any) => `- ${wn.userName}: "${wn.message.substring(0, 150)}${wn.message.length > 150 ? '...' : ''}"`)
.join('\n');
// Summarize documents
const documentSummary = documents
.map((d: any) => `- ${d.fileName} (by ${d.uploadedBy})`)
.join('\n');
// Build rejection context if applicable
const rejectionContext = isRejected
? `\n**Rejection Details:**\n- Rejected by: ${rejectedBy || 'Approver'}\n- Rejection reason: ${rejectionReason || 'Not specified'}`
: '';
const prompt = `You are writing a closure summary for a workflow request at Royal Enfield. Write a practical, realistic conclusion that an employee would write when closing a request.
**Request:**
${requestNumber} - ${requestTitle}
Description: ${requestDescription}
Priority: ${priority}
**What Happened:**
${approvalSummary || 'No approvals recorded'}${rejectionContext}
**Discussions (if any):**
${workNoteSummary || 'No work notes'}
**Documents:**
${documentSummary || 'No documents'}
**YOUR TASK:**
Write a brief, professional conclusion (approximately ${targetWordCount} words, max ${maxLength} characters) that:
${isRejected
? `- Summarizes what was requested and explains that it was rejected
- Mentions who rejected it and the rejection reason
- Notes the outcome and any learnings or next steps
- Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable)
- Uses clear, factual language without time-specific references
- Is suitable for permanent archiving and future reference
- Sounds natural and human-written (not AI-generated)
- Maintains a professional and constructive tone even for rejections`
: `- Summarizes what was requested and the final decision
- Mentions who approved it and any key comments
- Mentions if any approval levels were AT_RISK, CRITICAL, or BREACHED (if applicable)
- Notes the outcome and next steps (if applicable)
- Uses clear, factual language without time-specific references
- Is suitable for permanent archiving and future reference
- Sounds natural and human-written (not AI-generated)`}
**CRITICAL CHARACTER LIMIT - STRICT REQUIREMENT:**
- Your response MUST be EXACTLY within ${maxLength} characters (not words, CHARACTERS including spaces)
- Count your characters carefully before responding
- If you have too much content, PRIORITIZE the most important information:
1. Final decision (approved/rejected)
2. Key approvers and their decisions
3. Critical TAT breaches (if any)
4. Brief summary of the request
- OMIT less important details to fit within the limit rather than exceeding it
- Better to be concise than to exceed the limit
**WRITING GUIDELINES:**
- Be concise and direct - every word must add value
- No time-specific words like "today", "now", "currently", "recently"
- No corporate jargon or buzzwords
- No emojis
- Write like a professional documenting a completed process
- Focus on facts: what was requested, who ${isRejected ? 'rejected' : 'approved'}, what was decided
- Use past tense for completed actions
- Use short sentences and avoid filler words
**FORMAT REQUIREMENT - HTML Rich Text:**
- Generate content in HTML format for rich text editor display
- Use proper HTML tags for structure and formatting:
* <p>...</p> for paragraphs
* <strong>...</strong> for important text/headings
* <ul><li>...</li></ul> for bullet points
* <ol><li>...</li></ol> for numbered lists
* <br> for line breaks only when necessary
- Use semantic HTML to make the content readable and well-structured
- Example format:
<p><strong>Request Summary:</strong> [Brief description]</p>
<p><strong>Approval Decision:</strong> [Decision details]</p>
<ul>
<li>Key point 1</li>
<li>Key point 2</li>
</ul>
<p><strong>Outcome:</strong> [Final outcome]</p>
- Keep HTML clean and minimal - no inline styles, no divs, no classes
- The HTML should render nicely in a rich text editor
Write the conclusion now in HTML format. STRICT LIMIT: ${maxLength} characters maximum (including HTML tags). Prioritize and condense if needed:`;
return prompt;
}
/**
* Extract key points from the AI-generated remark
*/
private extractKeyPoints(remark: string): string[] {
const keyPoints: string[] = [];
// Look for bullet points (-, •, *) or numbered items (1., 2., etc.)
const lines = remark.split('\n');
for (const line of lines) {
const trimmed = line.trim();
// Match bullet points
if (trimmed.match(/^[-•*]\s+(.+)$/)) {
const point = trimmed.replace(/^[-•*]\s+/, '');
if (point.length > 10) { // Ignore very short lines
keyPoints.push(point);
}
}
// Match numbered items
if (trimmed.match(/^\d+\.\s+(.+)$/)) {
const point = trimmed.replace(/^\d+\.\s+/, '');
if (point.length > 10) {
keyPoints.push(point);
}
}
}
// If no bullet points found, extract first few sentences
if (keyPoints.length === 0) {
const sentences = remark.split(/[.!?]+/).filter(s => s.trim().length > 20);
keyPoints.push(...sentences.slice(0, 3).map(s => s.trim()));
}
return keyPoints.slice(0, 5); // Max 5 key points
}
/**
* Calculate confidence score based on response quality
*/
private calculateConfidence(remark: string, context: any): number {
let score = 0.6; // Base score (slightly higher for new prompt)
// Check if remark has good length (100-400 chars - more realistic)
if (remark.length >= 100 && remark.length <= 400) {
score += 0.2;
}
// Check if remark mentions key elements
if (remark.toLowerCase().includes('approv')) {
score += 0.1;
}
// Check if remark is not too generic
if (remark.length > 80 && !remark.toLowerCase().includes('lorem ipsum')) {
score += 0.1;
}
return Math.min(1.0, score);
}
/**
* Check if AI service is available
*/
isAvailable(): boolean {
return this.provider !== null;
}
}
export const aiService = new AIService();