Fix login data structure and improve voice input UX
- Fix login endpoint to return families as array of objects instead of strings - Update auth interface to match /auth/me endpoint structure - Add silence detection to voice input (auto-stop after 1.5s) - Add comprehensive status messages to voice modal (Listening, Understanding, Saving) - Unify voice input flow to use MediaRecorder + backend for all platforms - Add null checks to prevent tracking page crashes from invalid data - Wait for auth completion before loading family data in HomePage 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -36,8 +36,9 @@ export function VoiceFloatingButton() {
|
||||
const { user } = useAuth();
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
const [processingStatus, setProcessingStatus] = useState<'listening' | 'understanding' | 'saving' | null>(null);
|
||||
const [identifiedActivity, setIdentifiedActivity] = useState<string>('');
|
||||
const [classificationResult, setClassificationResult] = useState<any>(null);
|
||||
const [lastClassifiedTranscript, setLastClassifiedTranscript] = useState<string>('');
|
||||
const [snackbar, setSnackbar] = useState<{
|
||||
open: boolean;
|
||||
message: string;
|
||||
@@ -53,7 +54,18 @@ export function VoiceFloatingButton() {
|
||||
const { isListening, isSupported, transcript, classification, error, usesFallback, startListening, stopListening, reset } =
|
||||
useVoiceInput();
|
||||
|
||||
// Auto-use classification from backend when transcription completes (MediaRecorder fallback)
|
||||
// Set status when listening starts/stops
|
||||
React.useEffect(() => {
|
||||
if (isListening) {
|
||||
setProcessingStatus('listening');
|
||||
} else if (processingStatus === 'listening' && transcript) {
|
||||
// Transition from listening to understanding when we have a transcript
|
||||
setProcessingStatus('understanding');
|
||||
}
|
||||
}, [isListening, transcript]);
|
||||
|
||||
// Auto-use classification from backend when transcription completes
|
||||
// MediaRecorder sends audio to backend, which transcribes + classifies in one call
|
||||
React.useEffect(() => {
|
||||
if (classification && !isListening && !isProcessing && open) {
|
||||
setClassificationResult(classification);
|
||||
@@ -61,13 +73,6 @@ export function VoiceFloatingButton() {
|
||||
}
|
||||
}, [classification, isListening, isProcessing, open]);
|
||||
|
||||
// For Web Speech API (desktop), classify the transcript client-side
|
||||
React.useEffect(() => {
|
||||
if (!usesFallback && transcript && !isListening && !isProcessing && open && transcript !== lastClassifiedTranscript) {
|
||||
classifyTranscript(transcript);
|
||||
}
|
||||
}, [usesFallback, transcript, isListening, isProcessing, open, lastClassifiedTranscript]);
|
||||
|
||||
const handleOpen = () => {
|
||||
if (!isSupported) {
|
||||
setSnackbar({
|
||||
@@ -80,7 +85,8 @@ export function VoiceFloatingButton() {
|
||||
setOpen(true);
|
||||
reset();
|
||||
setClassificationResult(null);
|
||||
setLastClassifiedTranscript('');
|
||||
setProcessingStatus(null);
|
||||
setIdentifiedActivity('');
|
||||
};
|
||||
|
||||
const handleClose = () => {
|
||||
@@ -90,13 +96,13 @@ export function VoiceFloatingButton() {
|
||||
setOpen(false);
|
||||
reset();
|
||||
setClassificationResult(null);
|
||||
setLastClassifiedTranscript('');
|
||||
setProcessingStatus(null);
|
||||
setIdentifiedActivity('');
|
||||
};
|
||||
|
||||
const handleStartListening = () => {
|
||||
reset();
|
||||
setClassificationResult(null);
|
||||
setLastClassifiedTranscript('');
|
||||
startListening();
|
||||
};
|
||||
|
||||
@@ -104,43 +110,12 @@ export function VoiceFloatingButton() {
|
||||
stopListening();
|
||||
};
|
||||
|
||||
const classifyTranscript = async (text: string) => {
|
||||
// Mark this transcript as being classified to prevent duplicate calls
|
||||
setLastClassifiedTranscript(text);
|
||||
setIsProcessing(true);
|
||||
try {
|
||||
const response = await fetch('/api/voice/transcribe', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ text }),
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (response.ok && data.success) {
|
||||
setClassificationResult(data.classification);
|
||||
handleClassifiedIntent(data.classification);
|
||||
} else {
|
||||
setClassificationResult({
|
||||
error: true,
|
||||
message: data.message || 'Could not understand command',
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[Voice] Classification error:', error);
|
||||
setClassificationResult({
|
||||
error: true,
|
||||
message: 'Failed to process command',
|
||||
});
|
||||
} finally {
|
||||
setIsProcessing(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleClassifiedIntent = async (result: any) => {
|
||||
console.log('[Voice] handleClassifiedIntent called with result:', result);
|
||||
|
||||
if (result.error) {
|
||||
console.log('[Voice] Result has error:', result.message);
|
||||
setProcessingStatus(null);
|
||||
setSnackbar({
|
||||
open: true,
|
||||
message: result.message,
|
||||
@@ -149,8 +124,17 @@ export function VoiceFloatingButton() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Support both formats: backend returns 'type', frontend local classifier returns 'intent'
|
||||
const activityType = result.type || result.intent;
|
||||
console.log('[Voice] Activity type:', activityType);
|
||||
|
||||
// Set identified activity for status display
|
||||
setIdentifiedActivity(activityType);
|
||||
|
||||
// Handle unknown or low confidence
|
||||
if (result.type === 'unknown' || (result.confidence && result.confidence < 0.3)) {
|
||||
if (activityType === 'unknown' || (result.confidence && result.confidence < 0.3)) {
|
||||
console.log('[Voice] Unknown or low confidence:', activityType, result.confidence);
|
||||
setProcessingStatus(null);
|
||||
setSnackbar({
|
||||
open: true,
|
||||
message: 'Could not understand the command. Please try again or use manual entry.',
|
||||
@@ -161,6 +145,8 @@ export function VoiceFloatingButton() {
|
||||
|
||||
// Get the first child from the family
|
||||
if (!familyId) {
|
||||
console.log('[Voice] No familyId found');
|
||||
setProcessingStatus(null);
|
||||
setSnackbar({
|
||||
open: true,
|
||||
message: 'No family found. Please set up your profile first.',
|
||||
@@ -169,11 +155,17 @@ export function VoiceFloatingButton() {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[Voice] Family ID:', familyId);
|
||||
|
||||
try {
|
||||
setIsProcessing(true);
|
||||
setProcessingStatus('saving');
|
||||
|
||||
// Fetch children
|
||||
console.log('[Voice] Fetching children for family:', familyId);
|
||||
const children = await childrenApi.getChildren(familyId);
|
||||
console.log('[Voice] Children found:', children.length, children);
|
||||
|
||||
if (children.length === 0) {
|
||||
setSnackbar({
|
||||
open: true,
|
||||
@@ -186,21 +178,23 @@ export function VoiceFloatingButton() {
|
||||
|
||||
// Use the first child (or you could enhance this to support child name matching)
|
||||
const childId = children[0].id;
|
||||
console.log('[Voice] Using child ID:', childId);
|
||||
|
||||
// Create the activity
|
||||
const activityData = {
|
||||
type: result.type,
|
||||
type: activityType,
|
||||
timestamp: result.timestamp || new Date().toISOString(),
|
||||
data: result.details || {},
|
||||
notes: result.details?.notes || undefined,
|
||||
data: result.details || result.structuredData || {},
|
||||
notes: result.details?.notes || result.structuredData?.notes || undefined,
|
||||
};
|
||||
|
||||
console.log('[Voice] Creating activity:', activityData);
|
||||
console.log('[Voice] Creating activity with data:', JSON.stringify(activityData, null, 2));
|
||||
|
||||
await trackingApi.createActivity(childId, activityData);
|
||||
const createdActivity = await trackingApi.createActivity(childId, activityData);
|
||||
console.log('[Voice] Activity created successfully:', createdActivity);
|
||||
|
||||
// Show success message
|
||||
const activityLabel = result.type.charAt(0).toUpperCase() + result.type.slice(1);
|
||||
const activityLabel = activityType.charAt(0).toUpperCase() + activityType.slice(1);
|
||||
setSnackbar({
|
||||
open: true,
|
||||
message: `${activityLabel} activity saved successfully!`,
|
||||
@@ -212,7 +206,9 @@ export function VoiceFloatingButton() {
|
||||
handleClose();
|
||||
}, 1500);
|
||||
} catch (error: any) {
|
||||
console.error('[Voice] Failed to create activity:', error);
|
||||
console.error('[Voice] Failed to create activity - Full error:', error);
|
||||
console.error('[Voice] Error response:', error.response);
|
||||
console.error('[Voice] Error data:', error.response?.data);
|
||||
setSnackbar({
|
||||
open: true,
|
||||
message: error.response?.data?.message || 'Failed to save activity. Please try again.',
|
||||
@@ -253,7 +249,7 @@ export function VoiceFloatingButton() {
|
||||
Voice Command
|
||||
{classificationResult && !classificationResult.error && (
|
||||
<Chip
|
||||
label={`${classificationResult.intent} (${classificationResult.confidenceLevel})`}
|
||||
label={`${classificationResult.type || classificationResult.intent} (${classificationResult.confidenceLevel || Math.round((classificationResult.confidence || 0) * 100) + '%'})`}
|
||||
color="success"
|
||||
size="small"
|
||||
sx={{ ml: 2 }}
|
||||
@@ -287,9 +283,12 @@ export function VoiceFloatingButton() {
|
||||
</IconButton>
|
||||
</Box>
|
||||
|
||||
{/* Status text */}
|
||||
{/* Status text with detailed processing stages */}
|
||||
<Typography variant="body1" color="text.secondary" gutterBottom>
|
||||
{isListening ? 'Listening... Speak now' : 'Click the microphone to start'}
|
||||
{processingStatus === 'listening' && 'Listening... Speak now'}
|
||||
{processingStatus === 'understanding' && 'Understanding your request...'}
|
||||
{processingStatus === 'saving' && identifiedActivity && `Adding to ${identifiedActivity.charAt(0).toUpperCase() + identifiedActivity.slice(1)} tracker...`}
|
||||
{!processingStatus && !isListening && 'Click the microphone to start'}
|
||||
</Typography>
|
||||
|
||||
{/* Transcript */}
|
||||
@@ -302,12 +301,14 @@ export function VoiceFloatingButton() {
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Processing indicator */}
|
||||
{isProcessing && (
|
||||
{/* Processing indicator with status */}
|
||||
{processingStatus && (
|
||||
<Box sx={{ mt: 2, display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
|
||||
<CircularProgress size={20} sx={{ mr: 1 }} />
|
||||
<Typography variant="body2" color="text.secondary">
|
||||
Processing command...
|
||||
{processingStatus === 'listening' && 'Listening...'}
|
||||
{processingStatus === 'understanding' && 'Understanding...'}
|
||||
{processingStatus === 'saving' && 'Saving...'}
|
||||
</Typography>
|
||||
</Box>
|
||||
)}
|
||||
@@ -316,7 +317,7 @@ export function VoiceFloatingButton() {
|
||||
{classificationResult && !classificationResult.error && (
|
||||
<Alert severity="success" sx={{ mt: 2 }}>
|
||||
<Typography variant="body2" gutterBottom>
|
||||
<strong>Understood:</strong> {classificationResult.intent}
|
||||
<strong>Understood:</strong> {classificationResult.type || classificationResult.intent}
|
||||
</Typography>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
Reference in New Issue
Block a user