Files
biblical-guide.com/app/api/chat/route.ts
Andrei a01377b21a feat: implement AI chat with vector search and random loading messages
Major Features:
-  AI chat with Azure OpenAI GPT-4o integration
-  Vector search across Bible versions (ASV English, RVA 1909 Spanish)
-  Multi-language support with automatic English fallback
-  Bible version citations in responses [ASV] [RVA 1909]
-  Random Bible-themed loading messages (5 variants)
-  Safe build script with memory guardrails
-  8GB swap memory for build safety
-  Stripe donation integration (multiple payment methods)

AI Chat Improvements:
- Implement vector search with 1536-dim embeddings (Azure text-embedding-ada-002)
- Search all Bible versions in user's language, fallback to English
- Cite Bible versions properly in AI responses
- Add 5 random loading messages: "Searching the Scriptures...", etc.
- Fix Ollama conflict (disabled to use Azure OpenAI exclusively)
- Optimize hybrid search queries for actual table schema

Build & Infrastructure:
- Create safe-build.sh script with memory monitoring (prevents server crashes)
- Add 8GB swap memory for emergency relief
- Document build process in BUILD_GUIDE.md
- Set Node.js memory limits (4GB max during builds)

Database:
- Clean up 115 old vector tables with wrong dimensions
- Keep only 2 tables with correct 1536-dim embeddings
- Add Stripe schema for donations and subscriptions

Documentation:
- AI_CHAT_FINAL_STATUS.md - Complete implementation status
- AI_CHAT_IMPLEMENTATION_COMPLETE.md - Technical details
- BUILD_GUIDE.md - Safe building guide with guardrails
- CHAT_LOADING_MESSAGES.md - Loading messages implementation
- STRIPE_IMPLEMENTATION_COMPLETE.md - Stripe integration docs
- STRIPE_SETUP_GUIDE.md - Stripe configuration guide

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-12 19:37:24 +00:00

509 lines
18 KiB
TypeScript

import { NextResponse } from 'next/server'
import { z } from 'zod'
import { PrismaClient, ChatMessageRole } from '@prisma/client'
import { searchBibleHybrid, BibleVerse } from '@/lib/vector-search'
import { verifyToken } from '@/lib/auth'
const prisma = new PrismaClient()
export const runtime = 'nodejs'
const chatRequestSchema = z.object({
message: z.string().min(1),
conversationId: z.string().optional(),
locale: z.string().optional().default('ro'),
// Keep history for backward compatibility with frontend
history: z.array(z.object({
id: z.string(),
role: z.enum(['user', 'assistant']),
content: z.string(),
timestamp: z.string()
})).optional().default([])
})
export async function POST(request: Request) {
try {
const body = await request.json()
const { message, conversationId, locale, history } = chatRequestSchema.parse(body)
// Require authentication for chat functionality
let userId: string | null = null
const authHeader = request.headers.get('authorization')
if (!authHeader?.startsWith('Bearer ')) {
return NextResponse.json(
{
success: false,
error: 'Authentication required to use chat functionality',
code: 'AUTH_REQUIRED'
},
{ status: 401 }
)
}
try {
const token = authHeader.substring(7)
const payload = await verifyToken(token)
userId = payload.userId
console.log('Chat API - authenticated user:', userId)
} catch (error) {
return NextResponse.json(
{
success: false,
error: 'Invalid or expired authentication token',
code: 'AUTH_INVALID'
},
{ status: 401 }
)
}
// Handle conversation logic
let finalConversationId = conversationId
let conversationHistory: any[] = []
if (userId) {
// User is authenticated - use conversation system
if (conversationId) {
// Load existing conversation
const conversation = await prisma.chatConversation.findUnique({
where: {
id: conversationId,
userId,
isActive: true
},
include: {
messages: {
orderBy: { timestamp: 'desc' },
take: 15, // Last 15 messages for context
select: {
role: true,
content: true,
timestamp: true
}
}
}
})
if (conversation) {
conversationHistory = conversation.messages
.reverse() // Oldest first for context
.map(msg => ({
role: msg.role.toLowerCase(),
content: msg.content,
timestamp: msg.timestamp.toISOString()
}))
}
} else {
// Create new conversation
const conversation = await prisma.chatConversation.create({
data: {
userId,
title: generateConversationTitle(message),
language: locale,
lastMessageAt: new Date()
}
})
finalConversationId = conversation.id
}
} else {
// Anonymous user - use provided history for backward compatibility
conversationHistory = history
}
// Generate AI response
const aiResponse = await generateBiblicalResponse(message, locale, conversationHistory)
// Save messages to database if user is authenticated
console.log('Chat API - conversation saving check:', {
userId: userId ? 'present' : 'null',
finalConversationId: finalConversationId ? 'present' : 'null',
willSave: !!(userId && finalConversationId)
})
if (userId && finalConversationId) {
await prisma.$transaction([
// Save user message
prisma.chatMessage.create({
data: {
conversationId: finalConversationId,
userId,
role: ChatMessageRole.USER,
content: message,
timestamp: new Date()
}
}),
// Save AI response
prisma.chatMessage.create({
data: {
conversationId: finalConversationId,
userId,
role: ChatMessageRole.ASSISTANT,
content: aiResponse,
timestamp: new Date()
}
}),
// Update conversation last message time
prisma.chatConversation.update({
where: { id: finalConversationId },
data: { lastMessageAt: new Date() }
})
])
}
return NextResponse.json({
success: true,
response: aiResponse,
conversationId: finalConversationId
})
} catch (error) {
console.error('Error in chat API:', error)
if (error instanceof z.ZodError) {
return NextResponse.json(
{
success: false,
error: 'Invalid request format',
details: error.errors
},
{ status: 400 }
)
}
return NextResponse.json(
{
success: false,
error: 'Failed to process chat message'
},
{ status: 500 }
)
}
}
async function generateBiblicalResponse(message: string, locale: string, history: any[]): Promise<string> {
try {
// Temporarily bypass vector search to test Azure OpenAI
console.log('Chat API - Starting biblical response generation for:', message.substring(0, 50))
let relevantVerses: any[] = []
try {
// Search for relevant Bible verses using vector search with language filtering
relevantVerses = await searchBibleHybrid(message, locale, 5)
console.log('Chat API - Vector search successful, found', relevantVerses.length, 'verses')
} catch (vectorError) {
console.warn('Chat API - Vector search failed:', vectorError instanceof Error ? vectorError.message : String(vectorError))
// Continue without verses - test if Azure OpenAI works alone
}
// Extract Bible version names from source_table
const getVersionName = (sourceTable: string): string => {
if (!sourceTable) return 'Unknown'
// Extract table name: ai_bible."bv_en_eng_asv" -> bv_en_eng_asv
const tableName = sourceTable.split('.').pop()?.replace(/"/g, '') || ''
// Map table names to friendly version names
const versionMap: Record<string, string> = {
'bv_en_eng_asv': 'ASV (American Standard Version)',
'bv_es_sparv1909': 'RVA 1909 (Reina-Valera Antigua)',
// Add more as needed
}
return versionMap[tableName] || tableName
}
// Create context from relevant verses with version citations
const versesContext = relevantVerses
.map(verse => {
const version = getVersionName(verse.source_table)
return `[${version}] ${verse.ref}: "${verse.text_raw}"`
})
.join('\n\n')
// Intelligent context selection for conversation history
const conversationHistory = buildSmartContext(history, message, locale)
// Create language-specific system prompts
const systemPrompts = {
ro: `Ești un asistent AI biblic expert în limba română. Răspunde pe baza Scripturii, fiind precis și empatic.
INSTRUCȚIUNI IMPORTANTE:
- CITEAZĂ ÎNTOTDEAUNA versiunea biblică folosind formatul [Versiune] Referință
Exemplu: "[ASV] Ioan 3:16" sau "[RVA 1909] Juan 3:16"
- Folosește versurile biblice furnizate mai jos pentru a răspunde
- Răspunde ÎNTOTDEAUNA în română, chiar dacă versetele sunt în alte limbi
- Dacă folosești versuri în engleză sau alte limbi, explică-le în română
- Fii respectuos, înțelept și încurajator
- Dacă întrebarea nu are răspuns clar în Scriptură, menționează-l cu onestitate
Versuri biblice relevante găsite:
${versesContext || 'Nu s-au găsit versete specifice. Răspunde pe baza cunoștințelor biblice generale.'}
Conversația anterioară:
${conversationHistory}
Întrebarea curentă: ${message}`,
en: `You are an expert Biblical AI assistant in English. Answer based on Scripture, being precise and empathetic.
IMPORTANT INSTRUCTIONS:
- ALWAYS cite the Bible version using the format [Version] Reference
Example: "[ASV] John 3:16" or "[RVA 1909] Juan 3:16"
- Use the Bible verses provided below to answer the question
- ALWAYS respond in English
- Be respectful, wise, and encouraging
- If the question doesn't have a clear answer in Scripture, state that honestly
- When multiple versions are available, cite the most relevant ones
Relevant Bible verses found:
${versesContext || 'No specific verses found. Answer based on general biblical knowledge.'}
Previous conversation:
${conversationHistory}
Current question: ${message}`,
es: `Eres un asistente bíblico experto en español. Responde basándote en las Escrituras, siendo preciso y empático.
INSTRUCCIONES IMPORTANTES:
- SIEMPRE cita la versión bíblica usando el formato [Versión] Referencia
Ejemplo: "[RVA 1909] Juan 3:16" o "[ASV] John 3:16"
- Usa los versículos bíblicos proporcionados abajo para responder
- SIEMPRE responde en español, incluso si los versículos están en otros idiomas
- Si usas versículos en inglés u otros idiomas, explícalos en español
- Sé respetuoso, sabio y alentador
- Si la pregunta no tiene respuesta clara en las Escrituras, mencio nalo honestamente
Versículos bíblicos relevantes encontrados:
${versesContext || 'No se encontraron versículos específicos. Responde basándote en conocimiento bíblico general.'}
Conversación anterior:
${conversationHistory}
Pregunta actual: ${message}`
}
const systemPrompt = systemPrompts[locale as keyof typeof systemPrompts] || systemPrompts.en
// Call Azure OpenAI
console.log('Chat API - Calling Azure OpenAI with endpoint:', process.env.AZURE_OPENAI_ENDPOINT)
console.log('Chat API - Using deployment:', process.env.AZURE_OPENAI_DEPLOYMENT)
const response = await fetch(
`${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_OPENAI_DEPLOYMENT}/chat/completions?api-version=${process.env.AZURE_OPENAI_API_VERSION}`,
{
method: 'POST',
headers: {
'api-key': process.env.AZURE_OPENAI_KEY!,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [
{
role: 'system',
content: systemPrompt
},
{
role: 'user',
content: message
}
],
max_tokens: 2000,
temperature: 0.7,
top_p: 0.9
}),
}
)
console.log('Chat API - Azure OpenAI response status:', response.status)
if (!response.ok) {
throw new Error(`Azure OpenAI API error: ${response.status}`)
}
const data = await response.json()
// Handle content filtering or empty responses
if (!data.choices || data.choices.length === 0) {
throw new Error('No response choices returned from Azure OpenAI')
}
const choice = data.choices[0]
// Check for content filtering
if (choice.finish_reason === 'content_filter') {
console.warn('Content was filtered by Azure OpenAI:', choice.content_filter_results)
throw new Error('Content was filtered by Azure OpenAI content policy')
}
// Check if message content exists
if (!choice.message || !choice.message.content) {
throw new Error('Empty response content from Azure OpenAI')
}
return choice.message.content
} catch (error) {
console.error('Error calling Azure OpenAI:', error)
// Language-specific fallback responses
const fallbackResponses = {
ro: `Îmi pare rău, dar întâmpin o problemă tehnică în acest moment. Te încurajez să cercetezi acest subiect în Scripturi și să te rogi pentru înțelegere.
"Cercetați Scripturile, pentru că socotiți că în ele aveți viața veșnică, și tocmai ele mărturisesc despre Mine" (Ioan 5:39).
"Dacă vreunul dintre voi duce lipsă de înțelepciune, să ceară de la Dumnezeu, care dă tuturor cu dărnicie și fără mustrare, și i se va da" (Iacov 1:5).`,
en: `Sorry, I'm experiencing a technical issue at the moment. I encourage you to research this topic in Scripture and pray for understanding.
"You study the Scriptures diligently because you think that in them you have eternal life. These are the very Scriptures that testify about me" (John 5:39).
"If any of you lacks wisdom, you should ask God, who gives generously to all without finding fault, and it will be given to you" (James 1:5).`
}
return fallbackResponses[locale as keyof typeof fallbackResponses] || fallbackResponses.en
}
}
function generateConversationTitle(message: string): string {
// Generate a title from the first message (max 50 characters)
const title = message.length > 47
? message.substring(0, 47) + '...'
: message
return title
}
function buildSmartContext(history: any[], currentMessage: string, locale: string): string {
if (history.length === 0) return ''
const MAX_CONTEXT_TOKENS = 1500 // Reserve tokens for context
const RECENT_MESSAGES_COUNT = 6 // Always include last 6 messages
// Step 1: Always include the most recent messages for immediate context
const recentMessages = history.slice(-RECENT_MESSAGES_COUNT)
// Step 2: Calculate relevance scores for older messages
const olderMessages = history.slice(0, -RECENT_MESSAGES_COUNT)
const relevantOlderMessages = findRelevantMessages(olderMessages, currentMessage, locale)
// Step 3: Combine recent + relevant older messages
const selectedMessages = [...relevantOlderMessages, ...recentMessages]
// Step 4: Apply token-based truncation if needed
const optimizedContext = optimizeContextForTokens(selectedMessages, MAX_CONTEXT_TOKENS)
// Step 5: Format for AI consumption
return formatContextForAI(optimizedContext)
}
function findRelevantMessages(messages: any[], currentMessage: string, locale: string): any[] {
if (messages.length === 0) return []
// Score messages based on relevance to current question
const scoredMessages = messages.map(msg => ({
...msg,
relevanceScore: calculateMessageRelevance(msg, currentMessage, locale)
}))
// Sort by relevance and take top 3-5 most relevant older messages
return scoredMessages
.filter(msg => msg.relevanceScore > 0.3) // Only include somewhat relevant messages
.sort((a, b) => b.relevanceScore - a.relevanceScore)
.slice(0, 5) // Max 5 relevant older messages
}
function calculateMessageRelevance(message: any, currentMessage: string, locale: string): number {
const msgContent = message.content.toLowerCase()
const currentContent = currentMessage.toLowerCase()
let score = 0
// Keyword overlap scoring
const currentWords = currentContent.split(/\s+/).filter(word => word.length > 3)
const messageWords = msgContent.split(/\s+/)
for (const word of currentWords) {
if (messageWords.some((mWord: string) => mWord.includes(word) || word.includes(mWord))) {
score += 0.2
}
}
// Biblical reference detection (higher relevance)
const biblicalPatterns = locale === 'ro'
? [/\b(geneza|exod|levitic|numeri|deuteronom|iosua|judecători|rut|samuel|regi|cronici|ezra|neemia|estera|iov|psalmi|proverbe|ecclesiast|cântarea|isaia|ieremia|plângeri|ezechiel|daniel|osea|ioel|amos|obadia|iona|mica|naum|habacuc|țefania|hagai|zaharia|maleahi|matei|marcu|luca|ioan|faptele|romani|corinteni|galateni|efeseni|filipeni|coloseni|tesaloniceni|timotei|tit|filimon|evrei|iacov|petru|ioan|iuda|apocalipsa)\s*\d+/gi]
: [/\b(genesis|exodus|leviticus|numbers|deuteronomy|joshua|judges|ruth|samuel|kings|chronicles|ezra|nehemiah|esther|job|psalm|proverbs|ecclesiastes|song|isaiah|jeremiah|lamentations|ezekiel|daniel|hosea|joel|amos|obadiah|jonah|micah|nahum|habakkuk|zephaniah|haggai|zechariah|malachi|matthew|mark|luke|john|acts|romans|corinthians|galatians|ephesians|philippians|colossians|thessalonians|timothy|titus|philemon|hebrews|james|peter|jude|revelation)\s*\d+/gi]
if (biblicalPatterns.some(pattern => pattern.test(msgContent))) {
score += 0.4
}
// Recent user questions get higher relevance
if (message.role === 'user') {
score += 0.1
}
// Time decay (older messages get slightly lower scores)
const messageAge = Date.now() - new Date(message.timestamp).getTime()
const hoursAge = messageAge / (1000 * 60 * 60)
const timeDecay = Math.max(0.5, 1 - (hoursAge / 168)) // Decay over a week
return Math.min(1.0, score * timeDecay)
}
function optimizeContextForTokens(messages: any[], maxTokens: number): any[] {
// Rough token estimation (1 token ≈ 4 characters in English, 3 in Romanian)
let currentTokens = 0
const optimizedMessages = []
// Start from most recent and work backwards
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i]
const estimatedTokens = Math.ceil(message.content.length / 3.5)
if (currentTokens + estimatedTokens <= maxTokens) {
optimizedMessages.unshift(message) // Add to beginning to maintain order
currentTokens += estimatedTokens
} else {
// If this message would exceed limit, try to include a summary instead
if (i > 0 && optimizedMessages.length < 3) {
const summary = summarizeMessage(message)
const summaryTokens = Math.ceil(summary.length / 3.5)
if (currentTokens + summaryTokens <= maxTokens) {
optimizedMessages.unshift({
...message,
content: summary,
isSummary: true
})
currentTokens += summaryTokens
}
}
break
}
}
return optimizedMessages
}
function summarizeMessage(message: any): string {
const content = message.content
if (content.length <= 100) return content
// Extract key points and questions
const sentences = content.split(/[.!?]+/).filter((s: string) => s.trim().length > 10)
if (sentences.length <= 2) return content
// Keep first and last sentence, or most important parts
const summary = sentences.length > 3
? `${sentences[0].trim()}... ${sentences[sentences.length - 1].trim()}`
: sentences.slice(0, 2).join('. ').trim()
return `[Summary] ${summary}`
}
function formatContextForAI(messages: any[]): string {
if (messages.length === 0) return ''
return messages.map(msg => {
const prefix = msg.isSummary ? '[Summary] ' : ''
return `${prefix}${msg.role}: ${msg.content}`
}).join('\n')
}