Files
maternal-app/maternal-web/app/api/ai/chat/route.ts
Andrei f640e091ce
Some checks failed
CI/CD Pipeline / Build Application (push) Has been cancelled
CI/CD Pipeline / Lint and Test (push) Has been cancelled
CI/CD Pipeline / E2E Tests (push) Has been cancelled
Add prompt injection protection for AI endpoints
Implemented comprehensive security against prompt injection attacks:

**Detection Patterns:**
- System prompt manipulation (ignore/disregard/forget instructions)
- Role manipulation (pretend to be, act as)
- Data exfiltration (show system prompt, list users)
- Command injection (execute code, run command)
- Jailbreak attempts (DAN mode, developer mode, admin mode)

**Input Validation:**
- Maximum length: 2,000 characters
- Maximum line length: 500 characters
- Maximum repeated characters: 20 consecutive
- Special character ratio limit: 30%
- HTML/JavaScript injection blocking

**Sanitization:**
- HTML tag removal
- Zero-width character stripping
- Control character removal
- Whitespace normalization

**Rate Limiting:**
- 5 suspicious attempts per minute per user
- Automatic clearing on successful validation
- Per-user tracking with session storage

**Context Awareness:**
- Parenting keyword validation
- Domain-appropriate scope checking
- Lenient validation for short prompts

**Implementation:**
- lib/security/promptSecurity.ts - Core validation logic
- app/api/ai/chat/route.ts - Integrated validation
- scripts/test-prompt-injection.mjs - 19 test cases (all passing)
- lib/security/README.md - Documentation

**Test Coverage:**
 Valid parenting questions (2 tests)
 System manipulation attempts (4 tests)
 Role manipulation (1 test)
 Data exfiltration (3 tests)
 Command injection (2 tests)
 Jailbreak techniques (2 tests)
 Length attacks (2 tests)
 Character encoding attacks (2 tests)
 Edge cases (1 test)

All suspicious attempts are logged with user ID, reason, risk level,
and timestamp for security monitoring.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-01 20:15:11 +00:00

91 lines
2.7 KiB
TypeScript

import { NextRequest, NextResponse } from 'next/server';
import { aiLimiter } from '@/lib/middleware/rateLimiter';
import { validateAIPrompt, logSuspiciousPrompt } from '@/lib/security/promptSecurity';
/**
* AI chat endpoint with rate limiting and prompt injection protection
* Limited to 10 queries per hour for free tier users
*/
export async function POST(request: NextRequest) {
// Apply rate limiting
const rateLimitResult = await aiLimiter(request);
if (rateLimitResult) return rateLimitResult;
try {
const body = await request.json();
const { message, childId, conversationId } = body;
// Validate message input
if (!message || typeof message !== 'string') {
return NextResponse.json(
{
error: 'AI_INVALID_INPUT',
message: 'Message must be a non-empty string',
},
{ status: 400 }
);
}
// Validate and sanitize prompt for injection attempts
const validationResult = validateAIPrompt(message);
if (!validationResult.isValid) {
// Log security event
logSuspiciousPrompt(
message,
request.headers.get('x-user-id') || undefined,
validationResult.reason || 'Unknown',
validationResult.riskLevel
);
return NextResponse.json(
{
error: 'AI_PROMPT_REJECTED',
message: validationResult.reason || 'Your message could not be processed',
riskLevel: validationResult.riskLevel,
},
{ status: 400 }
);
}
// Use sanitized prompt for AI request
const sanitizedMessage = validationResult.sanitizedPrompt || message;
// TODO: Implement actual AI chat logic
// This is a placeholder - actual AI integration will be handled by backend
// For now, forward to backend API with sanitized message
const backendUrl = process.env.NEXT_PUBLIC_API_URL || 'http://localhost:3020';
const response = await fetch(`${backendUrl}/api/v1/ai/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
// Forward auth token from client
Authorization: request.headers.get('Authorization') || '',
},
body: JSON.stringify({
message: sanitizedMessage, // Use sanitized message
childId,
conversationId,
}),
});
const data = await response.json();
if (!response.ok) {
return NextResponse.json(data, { status: response.status });
}
return NextResponse.json(data, { status: 200 });
} catch (error) {
console.error('[AI] Chat error:', error);
return NextResponse.json(
{
error: 'AI_CHAT_FAILED',
message: 'AI assistant is currently unavailable. Please try again later.',
},
{ status: 500 }
);
}
}