import { useState, useEffect, useRef } from 'react' import { Send, Loader2, Mic } from 'lucide-react' import { sendQuery } from '../services/api' import { formatStructuredMessage } from '../utils/formatMessage' import './ChatInterface.css' const ChatInterface = () => { const [messages, setMessages] = useState([]) const [inputValue, setInputValue] = useState('') const [isLoading, setIsLoading] = useState(false) const [isListening, setIsListening] = useState(false) const messagesEndRef = useRef(null) const recognitionRef = useRef(null) const baseInputValueRef = useRef('') const getTimeBasedGreeting = () => { const hour = new Date().getHours() if (hour >= 5 && hour < 12) { return 'Hi, Good Morning!' } else if (hour >= 12 && hour < 17) { return 'Hi, Good Afternoon!' } else if (hour >= 17 && hour < 21) { return 'Hi, Good Evening!' } else { return 'Hi, Good Night!' } } const scrollToBottom = () => { messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) } useEffect(() => { if (messages.length > 0) { scrollToBottom() } }, [messages]) // Initialize Speech Recognition useEffect(() => { const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition if (!SpeechRecognition) { console.warn('Speech recognition not supported in this browser') return } const recognition = new SpeechRecognition() recognition.continuous = false recognition.interimResults = true recognition.lang = 'en-US' recognition.onstart = () => { setIsListening(true) console.log('Speech recognition started') } recognition.onresult = (event) => { let interimTranscript = '' let finalTranscript = '' for (let i = event.resultIndex; i < event.results.length; i++) { const transcript = event.results[i][0].transcript if (event.results[i].isFinal) { finalTranscript += transcript + ' ' } else { interimTranscript += transcript } } console.log('Speech recognition result:', { finalTranscript, interimTranscript }) // Combine base value with final and interim transcripts const baseText = baseInputValueRef.current let newValue = baseText if (finalTranscript) { newValue = (baseText ? baseText + ' ' : '') + finalTranscript.trim() baseInputValueRef.current = newValue // Update base for next recognition console.log('Final transcript added, new value:', newValue) } else if (interimTranscript) { newValue = (baseText ? baseText + ' ' : '') + interimTranscript console.log('Interim transcript added, new value:', newValue) } setInputValue(newValue) } recognition.onerror = (event) => { console.error('Speech recognition error:', event.error) setIsListening(false) if (event.error === 'no-speech') { console.log('No speech detected') // Don't show alert for no-speech, just stop } else if (event.error === 'not-allowed') { alert('Microphone permission denied. Please enable microphone access in your browser settings.') } else if (event.error === 'aborted') { console.log('Speech recognition aborted') } else { console.error('Speech recognition error:', event.error) } } recognition.onend = () => { setIsListening(false) console.log('Speech recognition ended') } recognitionRef.current = recognition return () => { if (recognitionRef.current) { try { recognitionRef.current.stop() } catch (e) { // Ignore errors when stopping } } } }, []) const handleSend = async () => { const query = inputValue.trim() if (!query || isLoading) return // Add user message to chat const userMessage = { type: 'user', text: query } setMessages(prev => [...prev, userMessage]) setInputValue('') setIsLoading(true) // Add loading message const loadingMessage = { type: 'ai', text: '', isLoading: true } setMessages(prev => [...prev, loadingMessage]) try { // Call API const response = await sendQuery(query) // Replace loading message with actual response const answerText = response.answer || response.message || 'Sorry, I could not process your query.' const structuredMessage = formatStructuredMessage(answerText) setMessages(prev => { const newMessages = [...prev] const loadingIndex = newMessages.findIndex(msg => msg.isLoading === true) if (loadingIndex !== -1) { newMessages[loadingIndex] = { type: 'ai', text: answerText, structuredMessage: structuredMessage, imageUrl: response.source?.property_image_url || null, isLoading: false } } return newMessages }) } catch (error) { // Replace loading message with error message setMessages(prev => { const newMessages = [...prev] const loadingIndex = newMessages.findIndex(msg => msg.isLoading === true) if (loadingIndex !== -1) { newMessages[loadingIndex] = { type: 'ai', text: 'Sorry, there was an error processing your query. Please try again.', isLoading: false } } return newMessages }) } finally { setIsLoading(false) } } const handleKeyPress = (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault() handleSend() } } const handleVoiceToggle = () => { if (!recognitionRef.current) { alert('Speech recognition is not supported in your browser. Please use Chrome, Edge, or Safari.') return } if (isListening) { try { recognitionRef.current.stop() setIsListening(false) } catch (error) { console.error('Error stopping speech recognition:', error) setIsListening(false) } } else { // Store current input value as base before starting baseInputValueRef.current = inputValue.trim() console.log('Starting voice recognition, base text:', baseInputValueRef.current) try { recognitionRef.current.start() } catch (error) { console.error('Error starting speech recognition:', error) // If recognition is already running, try to stop and restart if (error.message && error.message.includes('already started')) { try { recognitionRef.current.stop() setTimeout(() => { recognitionRef.current.start() }, 100) } catch (retryError) { console.error('Error retrying speech recognition:', retryError) setIsListening(false) } } else { setIsListening(false) alert('Could not start voice recognition. Please check your microphone permissions.') } } } } return (
{/* Header Logo */}
ONE BROKER GROUP
{/* Show greeting and heading only when no messages */} {messages.length === 0 && ( <> {/* Greeting Message - Above heading */}

{getTimeBasedGreeting()}

{/* Main Heading - Below greeting */}

Exploring Dubai real estate?
Ask our smart chatbot

)} {/* Messages Container - Show when there are messages */} {messages.length > 0 && (
{messages.map((message, index) => (
{message.type === 'user' ? 'ME' : 'Chatbot'}
{message.isLoading ? (
Thinking...
) : ( <> {message.imageUrl && (
Property { e.target.style.display = 'none' }} />
)} {message.structuredMessage && message.structuredMessage.sections.length > 0 ? (
{message.structuredMessage.intro && (

{message.structuredMessage.intro}

)} {message.structuredMessage.sections.map((section, idx) => (

{section.title}:

    {section.content.map((item, itemIdx) => (
  • {item}
  • ))}
))}
) : message.text && (

{message.text}

)} )}
))}
)} {/* Input Field */}
setInputValue(e.target.value)} onKeyPress={handleKeyPress} />
) } export default ChatInterface