import { useState, useEffect, useCallback, useRef } from 'react'; export interface VoiceInputResult { transcript: string; confidence: number; isFinal: boolean; } export interface VoiceInputState { isListening: boolean; isSupported: boolean; transcript: string; error: string | null; } /** * Hook for voice input using browser Web Speech API * * Provides voice recording functionality with real-time transcription. * Falls back gracefully if browser doesn't support Speech Recognition. */ export function useVoiceInput() { const [state, setState] = useState({ isListening: false, isSupported: false, transcript: '', error: null, }); const recognitionRef = useRef(null); const timeoutRef = useRef(null); // Check if browser supports Speech Recognition useEffect(() => { const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition; if (SpeechRecognition) { setState(prev => ({ ...prev, isSupported: true })); // Initialize recognition const recognition = new SpeechRecognition(); recognition.continuous = false; // Single recognition recognition.interimResults = true; // Get interim results recognition.maxAlternatives = 1; recognition.lang = 'en-US'; // Default language recognitionRef.current = recognition; } else { setState(prev => ({ ...prev, isSupported: false })); } return () => { if (recognitionRef.current) { recognitionRef.current.stop(); } if (timeoutRef.current) { clearTimeout(timeoutRef.current); } }; }, []); // Start listening const startListening = useCallback(() => { if (!recognitionRef.current) { setState(prev => ({ ...prev, error: 'Speech recognition not supported in this browser', })); return; } const recognition = recognitionRef.current; // Clear previous state setState(prev => ({ ...prev, isListening: true, transcript: '', error: null, })); // Set up event handlers recognition.onstart = () => { console.log('[Voice] Started listening'); }; recognition.onresult = (event: any) => { let interimTranscript = ''; let finalTranscript = ''; for (let i = event.resultIndex; i < event.results.length; i++) { const transcript = event.results[i][0].transcript; if (event.results[i].isFinal) { finalTranscript += transcript; } else { interimTranscript += transcript; } } setState(prev => ({ ...prev, transcript: finalTranscript || interimTranscript, })); }; recognition.onerror = (event: any) => { console.error('[Voice] Error:', event.error); let errorMessage = 'Failed to recognize speech'; if (event.error === 'no-speech') { errorMessage = 'No speech detected. Please try again.'; } else if (event.error === 'audio-capture') { errorMessage = 'No microphone found. Please check your settings.'; } else if (event.error === 'not-allowed') { errorMessage = 'Microphone access denied. Please grant permission.'; } else if (event.error === 'network') { errorMessage = 'Network error. Please check your connection.'; } setState(prev => ({ ...prev, isListening: false, error: errorMessage, })); }; recognition.onend = () => { console.log('[Voice] Stopped listening'); setState(prev => ({ ...prev, isListening: false, })); }; // Auto-stop after 10 seconds timeoutRef.current = setTimeout(() => { if (recognitionRef.current) { recognitionRef.current.stop(); } }, 10000); // Start recognition try { recognition.start(); } catch (error) { console.error('[Voice] Failed to start:', error); setState(prev => ({ ...prev, isListening: false, error: 'Failed to start voice recognition', })); } }, []); // Stop listening const stopListening = useCallback(() => { if (recognitionRef.current) { recognitionRef.current.stop(); } if (timeoutRef.current) { clearTimeout(timeoutRef.current); timeoutRef.current = null; } }, []); // Reset state const reset = useCallback(() => { setState(prev => ({ ...prev, transcript: '', error: null, })); }, []); return { ...state, startListening, stopListening, reset, }; }