Fix login data structure and improve voice input UX
Some checks failed
CI/CD Pipeline / Lint and Test (push) Has been cancelled
CI/CD Pipeline / E2E Tests (push) Has been cancelled
CI/CD Pipeline / Build Application (push) Has been cancelled

- Fix login endpoint to return families as array of objects instead of strings
- Update auth interface to match /auth/me endpoint structure
- Add silence detection to voice input (auto-stop after 1.5s)
- Add comprehensive status messages to voice modal (Listening, Understanding, Saving)
- Unify voice input flow to use MediaRecorder + backend for all platforms
- Add null checks to prevent tracking page crashes from invalid data
- Wait for auth completion before loading family data in HomePage

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-10-02 10:25:13 +00:00
parent 4b8828fdad
commit c60467b6f9
9 changed files with 231 additions and 120 deletions

View File

@@ -16,7 +16,7 @@ export async function POST(request: NextRequest) {
let transcribedText: string;
if (contentType.includes('application/json')) {
// Text input (already transcribed)
// Text input (already transcribed) - forward to backend for LLM classification
const body = await request.json();
transcribedText = body.text;
@@ -29,6 +29,41 @@ export async function POST(request: NextRequest) {
{ status: 400 }
);
}
// Forward text to backend for LLM-based classification
const backendUrl = process.env.NEXT_PUBLIC_API_URL || 'http://localhost:3020';
const backendResponse = await fetch(`${backendUrl}/api/v1/voice/transcribe`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
// Forward auth token if present
...(request.headers.get('authorization') && {
authorization: request.headers.get('authorization')!,
}),
},
body: JSON.stringify({
text: transcribedText,
language: body.language || 'en',
childName: body.childName,
}),
});
if (!backendResponse.ok) {
const errorData = await backendResponse.json();
return NextResponse.json(errorData, { status: backendResponse.status });
}
const result = await backendResponse.json();
// Backend returns { success, transcript, classification }
return NextResponse.json(
{
success: true,
transcript: result.transcript,
classification: result.classification,
},
{ status: 200 }
);
} else if (contentType.includes('multipart/form-data')) {
// Audio file upload - forward to backend for Whisper transcription
const formData = await request.formData();