Fix voice command data structure and prevent duplicate activities
Some checks failed
CI/CD Pipeline / Lint and Test (push) Has been cancelled
CI/CD Pipeline / E2E Tests (push) Has been cancelled
CI/CD Pipeline / Build Application (push) Has been cancelled

Backend changes:
- Update LLM prompt to use correct field names matching frontend interfaces
  - Use 'diaperType' instead of 'type' for diaper activities
  - Use 'feedingType' instead of 'method' for feeding activities
  - Simplify sleep structure (duration, quality, location only)

Frontend changes:
- Add processedClassificationId tracking to prevent infinite loop
- Create unique ID for each classification to avoid duplicate processing
- Reset processed ID when dialog opens/closes or new recording starts

This fixes the issue where voice commands created multiple duplicate
activities and had mismatched data structures causing tracker warnings.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-10-02 10:44:52 +00:00
parent c60467b6f9
commit 77f2c1d767
2 changed files with 19 additions and 9 deletions

View File

@@ -172,23 +172,21 @@ Extract activity details from the user's text and respond ONLY with valid JSON (
"timestamp": "ISO 8601 datetime if mentioned (e.g., '3pm', '30 minutes ago'), otherwise use current time",
"details": {
// For feeding:
"feedingType": "bottle|breast|solids",
"amount": number or null,
"unit": "ml|oz" or null,
"method": "bottle|breast|solids" or null,
"side": "left|right|both" or null,
"side": "left|right|both" or null (for breastfeeding only),
"duration": number (minutes) or null,
"notes": string or null
// For sleep:
"start_time": "ISO 8601" or null,
"end_time": "ISO 8601" or null,
"quality": "peaceful|restless|fussy",
"location": "crib|bassinet|arms|bed|stroller|car seat",
"duration": number (minutes) or null,
"quality": "peaceful|restless|fussy" or null,
"location": string or null,
"notes": string or null
// For diaper:
"type": "wet|dirty|both",
"diaperType": "wet|dirty|both",
"color": string or null,
"consistency": string or null,
"rash": boolean or null,

View File

@@ -39,6 +39,7 @@ export function VoiceFloatingButton() {
const [processingStatus, setProcessingStatus] = useState<'listening' | 'understanding' | 'saving' | null>(null);
const [identifiedActivity, setIdentifiedActivity] = useState<string>('');
const [classificationResult, setClassificationResult] = useState<any>(null);
const [processedClassificationId, setProcessedClassificationId] = useState<string | null>(null);
const [snackbar, setSnackbar] = useState<{
open: boolean;
message: string;
@@ -67,11 +68,19 @@ export function VoiceFloatingButton() {
// Auto-use classification from backend when transcription completes
// MediaRecorder sends audio to backend, which transcribes + classifies in one call
React.useEffect(() => {
if (classification && !isListening && !isProcessing && open) {
// Create a unique ID for this classification based on transcript + type + timestamp
const classificationId = classification
? `${transcript}-${classification.type}-${classification.timestamp}`
: null;
// Only process if we haven't already processed this exact classification
if (classification && !isListening && !isProcessing && open && classificationId !== processedClassificationId) {
console.log('[Voice] New classification detected, processing...', classificationId);
setProcessedClassificationId(classificationId);
setClassificationResult(classification);
handleClassifiedIntent(classification);
}
}, [classification, isListening, isProcessing, open]);
}, [classification, isListening, isProcessing, open, transcript, processedClassificationId]);
const handleOpen = () => {
if (!isSupported) {
@@ -87,6 +96,7 @@ export function VoiceFloatingButton() {
setClassificationResult(null);
setProcessingStatus(null);
setIdentifiedActivity('');
setProcessedClassificationId(null);
};
const handleClose = () => {
@@ -98,11 +108,13 @@ export function VoiceFloatingButton() {
setClassificationResult(null);
setProcessingStatus(null);
setIdentifiedActivity('');
setProcessedClassificationId(null);
};
const handleStartListening = () => {
reset();
setClassificationResult(null);
setProcessedClassificationId(null);
startListening();
};