Fix Web Speech API desktop voice recognition
- Set continuous=true to keep listening through pauses - Only process final results, ignore interim transcripts - Add usesFallback check to route Web Speech API transcripts through classification - Desktop now captures complete phrases before classification - Add detailed logging for debugging recognition flow 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -44,10 +44,10 @@ export function VoiceFloatingButton() {
|
||||
severity: 'info',
|
||||
});
|
||||
|
||||
const { isListening, isSupported, transcript, classification, error, startListening, stopListening, reset } =
|
||||
const { isListening, isSupported, transcript, classification, error, usesFallback, startListening, stopListening, reset } =
|
||||
useVoiceInput();
|
||||
|
||||
// Auto-use classification from backend when transcription completes
|
||||
// Auto-use classification from backend when transcription completes (MediaRecorder fallback)
|
||||
React.useEffect(() => {
|
||||
if (classification && !isListening && !isProcessing && open) {
|
||||
setClassificationResult(classification);
|
||||
@@ -55,6 +55,13 @@ export function VoiceFloatingButton() {
|
||||
}
|
||||
}, [classification, isListening, isProcessing, open]);
|
||||
|
||||
// For Web Speech API (desktop), classify the transcript client-side
|
||||
React.useEffect(() => {
|
||||
if (!usesFallback && transcript && !isListening && !isProcessing && open && transcript !== lastClassifiedTranscript) {
|
||||
classifyTranscript(transcript);
|
||||
}
|
||||
}, [usesFallback, transcript, isListening, isProcessing, open, lastClassifiedTranscript]);
|
||||
|
||||
const handleOpen = () => {
|
||||
if (!isSupported) {
|
||||
setSnackbar({
|
||||
|
||||
Reference in New Issue
Block a user