import { NextRequest, NextResponse } from 'next/server'; import { aiLimiter } from '@/lib/middleware/rateLimiter'; /** * AI chat endpoint with rate limiting * Limited to 10 queries per hour for free tier users */ export async function POST(request: NextRequest) { // Apply rate limiting const rateLimitResult = await aiLimiter(request); if (rateLimitResult) return rateLimitResult; try { const body = await request.json(); const { message, childId, conversationId } = body; // TODO: Implement actual AI chat logic // This is a placeholder - actual AI integration will be handled by backend // For now, forward to backend API const backendUrl = process.env.NEXT_PUBLIC_API_URL || 'http://localhost:3020'; const response = await fetch(`${backendUrl}/api/v1/ai/chat`, { method: 'POST', headers: { 'Content-Type': 'application/json', // Forward auth token from client Authorization: request.headers.get('Authorization') || '', }, body: JSON.stringify({ message, childId, conversationId }), }); const data = await response.json(); if (!response.ok) { return NextResponse.json(data, { status: response.status }); } return NextResponse.json(data, { status: 200 }); } catch (error) { console.error('[AI] Chat error:', error); return NextResponse.json( { error: 'AI_CHAT_FAILED', message: 'AI assistant is currently unavailable. Please try again later.', }, { status: 500 } ); } }