chatbot-ui/src/components/ChatInterface.jsx

364 lines
12 KiB
JavaScript

import { useState, useEffect, useRef } from 'react'
import { Send, Loader2, Mic } from 'lucide-react'
import { sendQuery } from '../services/api'
import { formatStructuredMessage } from '../utils/formatMessage'
import './ChatInterface.css'
const ChatInterface = () => {
const [messages, setMessages] = useState([])
const [inputValue, setInputValue] = useState('')
const [isLoading, setIsLoading] = useState(false)
const [isListening, setIsListening] = useState(false)
const messagesEndRef = useRef(null)
const recognitionRef = useRef(null)
const baseInputValueRef = useRef('')
const getTimeBasedGreeting = () => {
const hour = new Date().getHours()
if (hour >= 5 && hour < 12) {
return 'Hi, Good Morning!'
} else if (hour >= 12 && hour < 17) {
return 'Hi, Good Afternoon!'
} else if (hour >= 17 && hour < 21) {
return 'Hi, Good Evening!'
} else {
return 'Hi, Good Night!'
}
}
const scrollToBottom = () => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
}
useEffect(() => {
if (messages.length > 0) {
scrollToBottom()
}
}, [messages])
// Initialize Speech Recognition
useEffect(() => {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
if (!SpeechRecognition) {
console.warn('Speech recognition not supported in this browser')
return
}
const recognition = new SpeechRecognition()
recognition.continuous = false
recognition.interimResults = true
recognition.lang = 'en-US'
recognition.onstart = () => {
setIsListening(true)
console.log('Speech recognition started')
}
recognition.onresult = (event) => {
let interimTranscript = ''
let finalTranscript = ''
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript
if (event.results[i].isFinal) {
finalTranscript += transcript + ' '
} else {
interimTranscript += transcript
}
}
console.log('Speech recognition result:', { finalTranscript, interimTranscript })
// Combine base value with final and interim transcripts
const baseText = baseInputValueRef.current
let newValue = baseText
if (finalTranscript) {
newValue = (baseText ? baseText + ' ' : '') + finalTranscript.trim()
baseInputValueRef.current = newValue // Update base for next recognition
console.log('Final transcript added, new value:', newValue)
} else if (interimTranscript) {
newValue = (baseText ? baseText + ' ' : '') + interimTranscript
console.log('Interim transcript added, new value:', newValue)
}
setInputValue(newValue)
}
recognition.onerror = (event) => {
console.error('Speech recognition error:', event.error)
setIsListening(false)
if (event.error === 'no-speech') {
console.log('No speech detected')
// Don't show alert for no-speech, just stop
} else if (event.error === 'not-allowed') {
alert('Microphone permission denied. Please enable microphone access in your browser settings.')
} else if (event.error === 'aborted') {
console.log('Speech recognition aborted')
} else {
console.error('Speech recognition error:', event.error)
}
}
recognition.onend = () => {
setIsListening(false)
console.log('Speech recognition ended')
}
recognitionRef.current = recognition
return () => {
if (recognitionRef.current) {
try {
recognitionRef.current.stop()
} catch (e) {
// Ignore errors when stopping
}
}
}
}, [])
const handleSend = async () => {
const query = inputValue.trim()
if (!query || isLoading) return
// Add user message to chat
const userMessage = {
type: 'user',
text: query
}
setMessages(prev => [...prev, userMessage])
setInputValue('')
setIsLoading(true)
// Add loading message
const loadingMessage = {
type: 'ai',
text: '',
isLoading: true
}
setMessages(prev => [...prev, loadingMessage])
try {
// Call API
const response = await sendQuery(query)
// Replace loading message with actual response
const answerText = response.answer || response.message || 'Sorry, I could not process your query.'
const structuredMessage = formatStructuredMessage(answerText)
setMessages(prev => {
const newMessages = [...prev]
const loadingIndex = newMessages.findIndex(msg => msg.isLoading === true)
if (loadingIndex !== -1) {
newMessages[loadingIndex] = {
type: 'ai',
text: answerText,
structuredMessage: structuredMessage,
imageUrl: response.source?.property_image_url || null,
isLoading: false
}
}
return newMessages
})
} catch (error) {
// Replace loading message with error message
setMessages(prev => {
const newMessages = [...prev]
const loadingIndex = newMessages.findIndex(msg => msg.isLoading === true)
if (loadingIndex !== -1) {
newMessages[loadingIndex] = {
type: 'ai',
text: 'Sorry, there was an error processing your query. Please try again.',
isLoading: false
}
}
return newMessages
})
} finally {
setIsLoading(false)
}
}
const handleKeyPress = (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
handleSend()
}
}
const handleVoiceToggle = () => {
if (!recognitionRef.current) {
alert('Speech recognition is not supported in your browser. Please use Chrome, Edge, or Safari.')
return
}
if (isListening) {
try {
recognitionRef.current.stop()
setIsListening(false)
} catch (error) {
console.error('Error stopping speech recognition:', error)
setIsListening(false)
}
} else {
// Store current input value as base before starting
baseInputValueRef.current = inputValue.trim()
console.log('Starting voice recognition, base text:', baseInputValueRef.current)
try {
recognitionRef.current.start()
} catch (error) {
console.error('Error starting speech recognition:', error)
// If recognition is already running, try to stop and restart
if (error.message && error.message.includes('already started')) {
try {
recognitionRef.current.stop()
setTimeout(() => {
recognitionRef.current.start()
}, 100)
} catch (retryError) {
console.error('Error retrying speech recognition:', retryError)
setIsListening(false)
}
} else {
setIsListening(false)
alert('Could not start voice recognition. Please check your microphone permissions.')
}
}
}
}
return (
<div className="chat-container">
<div className="chat-content">
{/* Header Logo */}
<div className="header-logo">
<img
src="/0x0.png"
alt="ONE BROKER GROUP"
className="logo-image"
/>
</div>
{/* Show greeting and heading only when no messages */}
{messages.length === 0 && (
<>
{/* Greeting Message - Above heading */}
<div className="greeting-container">
<p className="greeting-text">{getTimeBasedGreeting()}</p>
</div>
{/* Main Heading - Below greeting */}
<h1 className="main-heading">
Exploring Dubai real estate?<br />
Ask our smart chatbot
</h1>
</>
)}
{/* Messages Container - Show when there are messages */}
{messages.length > 0 && (
<div className="messages-container">
{messages.map((message, index) => (
<div key={index} className={`message-wrapper ${message.type}`}>
<div className="message-label">
{message.type === 'user' ? 'ME' : 'Chatbot'}
</div>
<div className={`message-bubble ${message.type}`}>
{message.isLoading ? (
<div className="loading-indicator">
<Loader2 size={20} className="spinner" />
<span>Thinking...</span>
</div>
) : (
<>
{message.imageUrl && (
<div className="message-image-container">
<img
src={message.imageUrl}
alt="Property"
className="message-image"
onError={(e) => {
e.target.style.display = 'none'
}}
/>
</div>
)}
{message.structuredMessage && message.structuredMessage.sections.length > 0 ? (
<div className="message-content">
{message.structuredMessage.intro && (
<p className="message-text">{message.structuredMessage.intro}</p>
)}
{message.structuredMessage.sections.map((section, idx) => (
<div key={idx} className="message-section">
<h3 className="message-section-title">{section.title}:</h3>
<ul className="message-section-list">
{section.content.map((item, itemIdx) => (
<li key={itemIdx} className="message-section-item">{item}</li>
))}
</ul>
</div>
))}
</div>
) : message.text && (
<p className="message-text">{message.text}</p>
)}
</>
)}
</div>
</div>
))}
<div ref={messagesEndRef} />
</div>
)}
{/* Input Field */}
<div className="input-container">
<input
type="text"
className="chat-input"
placeholder="Ask me anything about Dubai real estate"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
onKeyPress={handleKeyPress}
/>
<button
className="voice-button"
onClick={handleVoiceToggle}
type="button"
disabled={isLoading}
aria-label={isListening ? 'Stop listening' : 'Start voice input'}
title={isListening ? 'Stop listening' : 'Start voice input'}
>
<Mic
size={20}
strokeWidth={1.5}
className={isListening ? 'listening' : ''}
/>
</button>
<button
className="send-button"
onClick={handleSend}
type="button"
disabled={isLoading || !inputValue.trim()}
aria-label="Send message"
>
{isLoading ? (
<Loader2 size={20} strokeWidth={1.5} className="spinner" />
) : (
<Send size={20} strokeWidth={1.5} />
)}
</button>
</div>
</div>
</div>
)
}
export default ChatInterface