Files
biblical-guide.com/app/api/chat/route.ts
andupetcu 24c0577f44 Fix chat history authentication and conversation saving
- Fix critical async/await bug in chat API token verification
- Add comprehensive authentication debugging logs
- Fix conversations API Zod schema validation for query parameters
- Remove problematic CircularProgress import causing build warnings
- Improve error handling and user feedback in chat component

The main issue was that verifyToken() was called without await, causing
the chat API to receive a Promise object instead of the user payload,
resulting in undefined userId and failed conversation persistence.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-22 12:47:59 +03:00

423 lines
14 KiB
TypeScript

import { NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { PrismaClient, ChatMessageRole } from '@prisma/client'
import { searchBibleHybrid, BibleVerse } from '@/lib/vector-search'
import { verifyToken } from '@/lib/auth'
const prisma = new PrismaClient()
export const runtime = 'nodejs'
const chatRequestSchema = z.object({
message: z.string().min(1),
conversationId: z.string().optional(),
locale: z.string().optional().default('ro'),
// Keep history for backward compatibility with frontend
history: z.array(z.object({
id: z.string(),
role: z.enum(['user', 'assistant']),
content: z.string(),
timestamp: z.string()
})).optional().default([])
})
export async function POST(request: NextRequest) {
try {
const body = await request.json()
const { message, conversationId, locale, history } = chatRequestSchema.parse(body)
// Try to get user from authentication (optional for backward compatibility)
let userId: string | null = null
const authHeader = request.headers.get('authorization')
console.log('Chat API - authHeader present:', !!authHeader)
if (authHeader?.startsWith('Bearer ')) {
try {
const token = authHeader.substring(7)
console.log('Chat API - token extracted, length:', token.length)
const payload = await verifyToken(token)
console.log('Chat API - token payload:', payload)
userId = payload.userId
console.log('Chat API - userId extracted from token:', userId)
} catch (error) {
// Continue without authentication for backward compatibility
console.log('Chat API - authentication failed:', error.message)
}
} else {
console.log('Chat API - no valid auth header')
}
// Handle conversation logic
let finalConversationId = conversationId
let conversationHistory: any[] = []
if (userId) {
// User is authenticated - use conversation system
if (conversationId) {
// Load existing conversation
const conversation = await prisma.chatConversation.findUnique({
where: {
id: conversationId,
userId,
isActive: true
},
include: {
messages: {
orderBy: { timestamp: 'desc' },
take: 15, // Last 15 messages for context
select: {
role: true,
content: true,
timestamp: true
}
}
}
})
if (conversation) {
conversationHistory = conversation.messages
.reverse() // Oldest first for context
.map(msg => ({
role: msg.role.toLowerCase(),
content: msg.content,
timestamp: msg.timestamp.toISOString()
}))
}
} else {
// Create new conversation
const conversation = await prisma.chatConversation.create({
data: {
userId,
title: generateConversationTitle(message),
language: locale,
lastMessageAt: new Date()
}
})
finalConversationId = conversation.id
}
} else {
// Anonymous user - use provided history for backward compatibility
conversationHistory = history
}
// Generate AI response
const aiResponse = await generateBiblicalResponse(message, locale, conversationHistory)
// Save messages to database if user is authenticated
console.log('Chat API - conversation saving check:', {
userId: userId ? 'present' : 'null',
finalConversationId: finalConversationId ? 'present' : 'null',
willSave: !!(userId && finalConversationId)
})
if (userId && finalConversationId) {
await prisma.$transaction([
// Save user message
prisma.chatMessage.create({
data: {
conversationId: finalConversationId,
userId,
role: ChatMessageRole.USER,
content: message,
timestamp: new Date()
}
}),
// Save AI response
prisma.chatMessage.create({
data: {
conversationId: finalConversationId,
userId,
role: ChatMessageRole.ASSISTANT,
content: aiResponse,
timestamp: new Date()
}
}),
// Update conversation last message time
prisma.chatConversation.update({
where: { id: finalConversationId },
data: { lastMessageAt: new Date() }
})
])
}
return NextResponse.json({
success: true,
response: aiResponse,
conversationId: finalConversationId
})
} catch (error) {
console.error('Error in chat API:', error)
if (error instanceof z.ZodError) {
return NextResponse.json(
{
success: false,
error: 'Invalid request format',
details: error.errors
},
{ status: 400 }
)
}
return NextResponse.json(
{
success: false,
error: 'Failed to process chat message'
},
{ status: 500 }
)
}
}
async function generateBiblicalResponse(message: string, locale: string, history: any[]): Promise<string> {
try {
// Search for relevant Bible verses using vector search with language filtering
const relevantVerses = await searchBibleHybrid(message, locale, 5)
// Create context from relevant verses
const versesContext = relevantVerses
.map(verse => `${verse.ref}: "${verse.text_raw}"`)
.join('\n\n')
// Intelligent context selection for conversation history
const conversationHistory = buildSmartContext(history, message, locale)
// Create language-specific system prompts
const systemPrompts = {
ro: `Ești un asistent AI pentru întrebări biblice în limba română. Răspunde pe baza Scripturii, fiind respectuos și înțelept.
Instrucțiuni:
- Folosește versurile biblice relevante pentru a răspunde la întrebare
- Citează întotdeauna referințele biblice (ex: Ioan 3:16)
- Răspunde în română
- Fii empatic și încurajator
- Dacă nu ești sigur, încurajează studiul personal și rugăciunea
Versuri relevante pentru această întrebare:
${versesContext}
Conversația anterioară:
${conversationHistory}
Întrebarea curentă: ${message}`,
en: `You are an AI assistant for biblical questions in English. Answer based on Scripture, being respectful and wise.
Instructions:
- Use the relevant Bible verses to answer the question
- Always cite biblical references (e.g., John 3:16)
- Respond in English
- Be empathetic and encouraging
- If unsure, encourage personal study and prayer
Relevant verses for this question:
${versesContext}
Previous conversation:
${conversationHistory}
Current question: ${message}`
}
const systemPrompt = systemPrompts[locale as keyof typeof systemPrompts] || systemPrompts.en
// Call Azure OpenAI
const response = await fetch(
`${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_OPENAI_DEPLOYMENT}/chat/completions?api-version=${process.env.AZURE_OPENAI_API_VERSION}`,
{
method: 'POST',
headers: {
'api-key': process.env.AZURE_OPENAI_KEY!,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [
{
role: 'system',
content: systemPrompt
},
{
role: 'user',
content: message
}
],
max_tokens: 2000,
temperature: 0.7,
top_p: 0.9
}),
}
)
if (!response.ok) {
throw new Error(`Azure OpenAI API error: ${response.status}`)
}
const data = await response.json()
return data.choices[0].message.content
} catch (error) {
console.error('Error calling Azure OpenAI:', error)
// Language-specific fallback responses
const fallbackResponses = {
ro: `Îmi pare rău, dar întâmpin o problemă tehnică în acest moment. Te încurajez să cercetezi acest subiect în Scripturi și să te rogi pentru înțelegere.
"Cercetați Scripturile, pentru că socotiți că în ele aveți viața veșnică, și tocmai ele mărturisesc despre Mine" (Ioan 5:39).
"Dacă vreunul dintre voi duce lipsă de înțelepciune, să ceară de la Dumnezeu, care dă tuturor cu dărnicie și fără mustrare, și i se va da" (Iacov 1:5).`,
en: `Sorry, I'm experiencing a technical issue at the moment. I encourage you to research this topic in Scripture and pray for understanding.
"You study the Scriptures diligently because you think that in them you have eternal life. These are the very Scriptures that testify about me" (John 5:39).
"If any of you lacks wisdom, you should ask God, who gives generously to all without finding fault, and it will be given to you" (James 1:5).`
}
return fallbackResponses[locale as keyof typeof fallbackResponses] || fallbackResponses.en
}
}
function generateConversationTitle(message: string): string {
// Generate a title from the first message (max 50 characters)
const title = message.length > 47
? message.substring(0, 47) + '...'
: message
return title
}
function buildSmartContext(history: any[], currentMessage: string, locale: string): string {
if (history.length === 0) return ''
const MAX_CONTEXT_TOKENS = 1500 // Reserve tokens for context
const RECENT_MESSAGES_COUNT = 6 // Always include last 6 messages
// Step 1: Always include the most recent messages for immediate context
const recentMessages = history.slice(-RECENT_MESSAGES_COUNT)
// Step 2: Calculate relevance scores for older messages
const olderMessages = history.slice(0, -RECENT_MESSAGES_COUNT)
const relevantOlderMessages = findRelevantMessages(olderMessages, currentMessage, locale)
// Step 3: Combine recent + relevant older messages
const selectedMessages = [...relevantOlderMessages, ...recentMessages]
// Step 4: Apply token-based truncation if needed
const optimizedContext = optimizeContextForTokens(selectedMessages, MAX_CONTEXT_TOKENS)
// Step 5: Format for AI consumption
return formatContextForAI(optimizedContext)
}
function findRelevantMessages(messages: any[], currentMessage: string, locale: string): any[] {
if (messages.length === 0) return []
// Score messages based on relevance to current question
const scoredMessages = messages.map(msg => ({
...msg,
relevanceScore: calculateMessageRelevance(msg, currentMessage, locale)
}))
// Sort by relevance and take top 3-5 most relevant older messages
return scoredMessages
.filter(msg => msg.relevanceScore > 0.3) // Only include somewhat relevant messages
.sort((a, b) => b.relevanceScore - a.relevanceScore)
.slice(0, 5) // Max 5 relevant older messages
}
function calculateMessageRelevance(message: any, currentMessage: string, locale: string): number {
const msgContent = message.content.toLowerCase()
const currentContent = currentMessage.toLowerCase()
let score = 0
// Keyword overlap scoring
const currentWords = currentContent.split(/\s+/).filter(word => word.length > 3)
const messageWords = msgContent.split(/\s+/)
for (const word of currentWords) {
if (messageWords.some(mWord => mWord.includes(word) || word.includes(mWord))) {
score += 0.2
}
}
// Biblical reference detection (higher relevance)
const biblicalPatterns = locale === 'ro'
? [/\b(geneza|exod|levitic|numeri|deuteronom|iosua|judecători|rut|samuel|regi|cronici|ezra|neemia|estera|iov|psalmi|proverbe|ecclesiast|cântarea|isaia|ieremia|plângeri|ezechiel|daniel|osea|ioel|amos|obadia|iona|mica|naum|habacuc|țefania|hagai|zaharia|maleahi|matei|marcu|luca|ioan|faptele|romani|corinteni|galateni|efeseni|filipeni|coloseni|tesaloniceni|timotei|tit|filimon|evrei|iacov|petru|ioan|iuda|apocalipsa)\s*\d+/gi]
: [/\b(genesis|exodus|leviticus|numbers|deuteronomy|joshua|judges|ruth|samuel|kings|chronicles|ezra|nehemiah|esther|job|psalm|proverbs|ecclesiastes|song|isaiah|jeremiah|lamentations|ezekiel|daniel|hosea|joel|amos|obadiah|jonah|micah|nahum|habakkuk|zephaniah|haggai|zechariah|malachi|matthew|mark|luke|john|acts|romans|corinthians|galatians|ephesians|philippians|colossians|thessalonians|timothy|titus|philemon|hebrews|james|peter|jude|revelation)\s*\d+/gi]
if (biblicalPatterns.some(pattern => pattern.test(msgContent))) {
score += 0.4
}
// Recent user questions get higher relevance
if (message.role === 'user') {
score += 0.1
}
// Time decay (older messages get slightly lower scores)
const messageAge = Date.now() - new Date(message.timestamp).getTime()
const hoursAge = messageAge / (1000 * 60 * 60)
const timeDecay = Math.max(0.5, 1 - (hoursAge / 168)) // Decay over a week
return Math.min(1.0, score * timeDecay)
}
function optimizeContextForTokens(messages: any[], maxTokens: number): any[] {
// Rough token estimation (1 token ≈ 4 characters in English, 3 in Romanian)
let currentTokens = 0
const optimizedMessages = []
// Start from most recent and work backwards
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i]
const estimatedTokens = Math.ceil(message.content.length / 3.5)
if (currentTokens + estimatedTokens <= maxTokens) {
optimizedMessages.unshift(message) // Add to beginning to maintain order
currentTokens += estimatedTokens
} else {
// If this message would exceed limit, try to include a summary instead
if (i > 0 && optimizedMessages.length < 3) {
const summary = summarizeMessage(message)
const summaryTokens = Math.ceil(summary.length / 3.5)
if (currentTokens + summaryTokens <= maxTokens) {
optimizedMessages.unshift({
...message,
content: summary,
isSummary: true
})
currentTokens += summaryTokens
}
}
break
}
}
return optimizedMessages
}
function summarizeMessage(message: any): string {
const content = message.content
if (content.length <= 100) return content
// Extract key points and questions
const sentences = content.split(/[.!?]+/).filter(s => s.trim().length > 10)
if (sentences.length <= 2) return content
// Keep first and last sentence, or most important parts
const summary = sentences.length > 3
? `${sentences[0].trim()}... ${sentences[sentences.length - 1].trim()}`
: sentences.slice(0, 2).join('. ').trim()
return `[Summary] ${summary}`
}
function formatContextForAI(messages: any[]): string {
if (messages.length === 0) return ''
return messages.map(msg => {
const prefix = msg.isSummary ? '[Summary] ' : ''
return `${prefix}${msg.role}: ${msg.content}`
}).join('\n')
}