- Update chat API to require valid authentication tokens for all requests - Add authentication requirement screens to both chat components - Show "Create Account / Sign In" prompts for unauthenticated users - Hide chat input and functionality until user is logged in - Return 401 errors with clear messages when authentication is missing - Maintain bilingual support (Romanian/English) for auth prompts 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
468 lines
16 KiB
TypeScript
468 lines
16 KiB
TypeScript
import { NextResponse } from 'next/server'
|
|
import { z } from 'zod'
|
|
import { PrismaClient, ChatMessageRole } from '@prisma/client'
|
|
import { searchBibleHybrid, BibleVerse } from '@/lib/vector-search'
|
|
import { verifyToken } from '@/lib/auth'
|
|
|
|
const prisma = new PrismaClient()
|
|
|
|
export const runtime = 'nodejs'
|
|
|
|
const chatRequestSchema = z.object({
|
|
message: z.string().min(1),
|
|
conversationId: z.string().optional(),
|
|
locale: z.string().optional().default('ro'),
|
|
// Keep history for backward compatibility with frontend
|
|
history: z.array(z.object({
|
|
id: z.string(),
|
|
role: z.enum(['user', 'assistant']),
|
|
content: z.string(),
|
|
timestamp: z.string()
|
|
})).optional().default([])
|
|
})
|
|
|
|
export async function POST(request: Request) {
|
|
try {
|
|
const body = await request.json()
|
|
const { message, conversationId, locale, history } = chatRequestSchema.parse(body)
|
|
|
|
// Require authentication for chat functionality
|
|
let userId: string | null = null
|
|
const authHeader = request.headers.get('authorization')
|
|
|
|
if (!authHeader?.startsWith('Bearer ')) {
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Authentication required to use chat functionality',
|
|
code: 'AUTH_REQUIRED'
|
|
},
|
|
{ status: 401 }
|
|
)
|
|
}
|
|
|
|
try {
|
|
const token = authHeader.substring(7)
|
|
const payload = await verifyToken(token)
|
|
userId = payload.userId
|
|
console.log('Chat API - authenticated user:', userId)
|
|
} catch (error) {
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Invalid or expired authentication token',
|
|
code: 'AUTH_INVALID'
|
|
},
|
|
{ status: 401 }
|
|
)
|
|
}
|
|
|
|
// Handle conversation logic
|
|
let finalConversationId = conversationId
|
|
let conversationHistory: any[] = []
|
|
|
|
if (userId) {
|
|
// User is authenticated - use conversation system
|
|
if (conversationId) {
|
|
// Load existing conversation
|
|
const conversation = await prisma.chatConversation.findUnique({
|
|
where: {
|
|
id: conversationId,
|
|
userId,
|
|
isActive: true
|
|
},
|
|
include: {
|
|
messages: {
|
|
orderBy: { timestamp: 'desc' },
|
|
take: 15, // Last 15 messages for context
|
|
select: {
|
|
role: true,
|
|
content: true,
|
|
timestamp: true
|
|
}
|
|
}
|
|
}
|
|
})
|
|
|
|
if (conversation) {
|
|
conversationHistory = conversation.messages
|
|
.reverse() // Oldest first for context
|
|
.map(msg => ({
|
|
role: msg.role.toLowerCase(),
|
|
content: msg.content,
|
|
timestamp: msg.timestamp.toISOString()
|
|
}))
|
|
}
|
|
} else {
|
|
// Create new conversation
|
|
const conversation = await prisma.chatConversation.create({
|
|
data: {
|
|
userId,
|
|
title: generateConversationTitle(message),
|
|
language: locale,
|
|
lastMessageAt: new Date()
|
|
}
|
|
})
|
|
finalConversationId = conversation.id
|
|
}
|
|
} else {
|
|
// Anonymous user - use provided history for backward compatibility
|
|
conversationHistory = history
|
|
}
|
|
|
|
// Generate AI response
|
|
const aiResponse = await generateBiblicalResponse(message, locale, conversationHistory)
|
|
|
|
// Save messages to database if user is authenticated
|
|
console.log('Chat API - conversation saving check:', {
|
|
userId: userId ? 'present' : 'null',
|
|
finalConversationId: finalConversationId ? 'present' : 'null',
|
|
willSave: !!(userId && finalConversationId)
|
|
})
|
|
if (userId && finalConversationId) {
|
|
await prisma.$transaction([
|
|
// Save user message
|
|
prisma.chatMessage.create({
|
|
data: {
|
|
conversationId: finalConversationId,
|
|
userId,
|
|
role: ChatMessageRole.USER,
|
|
content: message,
|
|
timestamp: new Date()
|
|
}
|
|
}),
|
|
// Save AI response
|
|
prisma.chatMessage.create({
|
|
data: {
|
|
conversationId: finalConversationId,
|
|
userId,
|
|
role: ChatMessageRole.ASSISTANT,
|
|
content: aiResponse,
|
|
timestamp: new Date()
|
|
}
|
|
}),
|
|
// Update conversation last message time
|
|
prisma.chatConversation.update({
|
|
where: { id: finalConversationId },
|
|
data: { lastMessageAt: new Date() }
|
|
})
|
|
])
|
|
}
|
|
|
|
return NextResponse.json({
|
|
success: true,
|
|
response: aiResponse,
|
|
conversationId: finalConversationId
|
|
})
|
|
|
|
} catch (error) {
|
|
console.error('Error in chat API:', error)
|
|
|
|
if (error instanceof z.ZodError) {
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Invalid request format',
|
|
details: error.errors
|
|
},
|
|
{ status: 400 }
|
|
)
|
|
}
|
|
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Failed to process chat message'
|
|
},
|
|
{ status: 500 }
|
|
)
|
|
}
|
|
}
|
|
|
|
async function generateBiblicalResponse(message: string, locale: string, history: any[]): Promise<string> {
|
|
try {
|
|
// Temporarily bypass vector search to test Azure OpenAI
|
|
console.log('Chat API - Starting biblical response generation for:', message.substring(0, 50))
|
|
let relevantVerses: any[] = []
|
|
|
|
try {
|
|
// Search for relevant Bible verses using vector search with language filtering
|
|
relevantVerses = await searchBibleHybrid(message, locale, 5)
|
|
console.log('Chat API - Vector search successful, found', relevantVerses.length, 'verses')
|
|
} catch (vectorError) {
|
|
console.warn('Chat API - Vector search failed:', vectorError instanceof Error ? vectorError.message : String(vectorError))
|
|
// Continue without verses - test if Azure OpenAI works alone
|
|
}
|
|
|
|
// Create context from relevant verses
|
|
const versesContext = relevantVerses
|
|
.map(verse => `${verse.ref}: "${verse.text_raw}"`)
|
|
.join('\n\n')
|
|
|
|
// Intelligent context selection for conversation history
|
|
const conversationHistory = buildSmartContext(history, message, locale)
|
|
|
|
// Create language-specific system prompts
|
|
const systemPrompts = {
|
|
ro: `Ești un asistent AI pentru întrebări biblice în limba română. Răspunde pe baza Scripturii, fiind respectuos și înțelept.
|
|
|
|
Instrucțiuni:
|
|
- Folosește versurile biblice relevante pentru a răspunde la întrebare
|
|
- Citează întotdeauna referințele biblice (ex: Ioan 3:16)
|
|
- Răspunde în română
|
|
- Fii empatic și încurajator
|
|
- Dacă nu ești sigur, încurajează studiul personal și rugăciunea
|
|
|
|
Versuri relevante pentru această întrebare:
|
|
${versesContext}
|
|
|
|
Conversația anterioară:
|
|
${conversationHistory}
|
|
|
|
Întrebarea curentă: ${message}`,
|
|
|
|
en: `You are an AI assistant for biblical questions in English. Answer based on Scripture, being respectful and wise.
|
|
|
|
Instructions:
|
|
- Use the relevant Bible verses to answer the question
|
|
- Always cite biblical references (e.g., John 3:16)
|
|
- Respond in English
|
|
- Be empathetic and encouraging
|
|
- If unsure, encourage personal study and prayer
|
|
|
|
Relevant verses for this question:
|
|
${versesContext}
|
|
|
|
Previous conversation:
|
|
${conversationHistory}
|
|
|
|
Current question: ${message}`
|
|
}
|
|
|
|
const systemPrompt = systemPrompts[locale as keyof typeof systemPrompts] || systemPrompts.en
|
|
|
|
// Call Azure OpenAI
|
|
console.log('Chat API - Calling Azure OpenAI with endpoint:', process.env.AZURE_OPENAI_ENDPOINT)
|
|
console.log('Chat API - Using deployment:', process.env.AZURE_OPENAI_DEPLOYMENT)
|
|
|
|
const response = await fetch(
|
|
`${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_OPENAI_DEPLOYMENT}/chat/completions?api-version=${process.env.AZURE_OPENAI_API_VERSION}`,
|
|
{
|
|
method: 'POST',
|
|
headers: {
|
|
'api-key': process.env.AZURE_OPENAI_KEY!,
|
|
'Content-Type': 'application/json',
|
|
},
|
|
body: JSON.stringify({
|
|
messages: [
|
|
{
|
|
role: 'system',
|
|
content: systemPrompt
|
|
},
|
|
{
|
|
role: 'user',
|
|
content: message
|
|
}
|
|
],
|
|
max_tokens: 2000,
|
|
temperature: 0.7,
|
|
top_p: 0.9
|
|
}),
|
|
}
|
|
)
|
|
|
|
console.log('Chat API - Azure OpenAI response status:', response.status)
|
|
|
|
if (!response.ok) {
|
|
throw new Error(`Azure OpenAI API error: ${response.status}`)
|
|
}
|
|
|
|
const data = await response.json()
|
|
|
|
// Handle content filtering or empty responses
|
|
if (!data.choices || data.choices.length === 0) {
|
|
throw new Error('No response choices returned from Azure OpenAI')
|
|
}
|
|
|
|
const choice = data.choices[0]
|
|
|
|
// Check for content filtering
|
|
if (choice.finish_reason === 'content_filter') {
|
|
console.warn('Content was filtered by Azure OpenAI:', choice.content_filter_results)
|
|
throw new Error('Content was filtered by Azure OpenAI content policy')
|
|
}
|
|
|
|
// Check if message content exists
|
|
if (!choice.message || !choice.message.content) {
|
|
throw new Error('Empty response content from Azure OpenAI')
|
|
}
|
|
|
|
return choice.message.content
|
|
|
|
} catch (error) {
|
|
console.error('Error calling Azure OpenAI:', error)
|
|
|
|
// Language-specific fallback responses
|
|
const fallbackResponses = {
|
|
ro: `Îmi pare rău, dar întâmpin o problemă tehnică în acest moment. Te încurajez să cercetezi acest subiect în Scripturi și să te rogi pentru înțelegere.
|
|
|
|
"Cercetați Scripturile, pentru că socotiți că în ele aveți viața veșnică, și tocmai ele mărturisesc despre Mine" (Ioan 5:39).
|
|
|
|
"Dacă vreunul dintre voi duce lipsă de înțelepciune, să ceară de la Dumnezeu, care dă tuturor cu dărnicie și fără mustrare, și i se va da" (Iacov 1:5).`,
|
|
|
|
en: `Sorry, I'm experiencing a technical issue at the moment. I encourage you to research this topic in Scripture and pray for understanding.
|
|
|
|
"You study the Scriptures diligently because you think that in them you have eternal life. These are the very Scriptures that testify about me" (John 5:39).
|
|
|
|
"If any of you lacks wisdom, you should ask God, who gives generously to all without finding fault, and it will be given to you" (James 1:5).`
|
|
}
|
|
|
|
return fallbackResponses[locale as keyof typeof fallbackResponses] || fallbackResponses.en
|
|
}
|
|
}
|
|
|
|
function generateConversationTitle(message: string): string {
|
|
// Generate a title from the first message (max 50 characters)
|
|
const title = message.length > 47
|
|
? message.substring(0, 47) + '...'
|
|
: message
|
|
|
|
return title
|
|
}
|
|
|
|
function buildSmartContext(history: any[], currentMessage: string, locale: string): string {
|
|
if (history.length === 0) return ''
|
|
|
|
const MAX_CONTEXT_TOKENS = 1500 // Reserve tokens for context
|
|
const RECENT_MESSAGES_COUNT = 6 // Always include last 6 messages
|
|
|
|
// Step 1: Always include the most recent messages for immediate context
|
|
const recentMessages = history.slice(-RECENT_MESSAGES_COUNT)
|
|
|
|
// Step 2: Calculate relevance scores for older messages
|
|
const olderMessages = history.slice(0, -RECENT_MESSAGES_COUNT)
|
|
const relevantOlderMessages = findRelevantMessages(olderMessages, currentMessage, locale)
|
|
|
|
// Step 3: Combine recent + relevant older messages
|
|
const selectedMessages = [...relevantOlderMessages, ...recentMessages]
|
|
|
|
// Step 4: Apply token-based truncation if needed
|
|
const optimizedContext = optimizeContextForTokens(selectedMessages, MAX_CONTEXT_TOKENS)
|
|
|
|
// Step 5: Format for AI consumption
|
|
return formatContextForAI(optimizedContext)
|
|
}
|
|
|
|
function findRelevantMessages(messages: any[], currentMessage: string, locale: string): any[] {
|
|
if (messages.length === 0) return []
|
|
|
|
// Score messages based on relevance to current question
|
|
const scoredMessages = messages.map(msg => ({
|
|
...msg,
|
|
relevanceScore: calculateMessageRelevance(msg, currentMessage, locale)
|
|
}))
|
|
|
|
// Sort by relevance and take top 3-5 most relevant older messages
|
|
return scoredMessages
|
|
.filter(msg => msg.relevanceScore > 0.3) // Only include somewhat relevant messages
|
|
.sort((a, b) => b.relevanceScore - a.relevanceScore)
|
|
.slice(0, 5) // Max 5 relevant older messages
|
|
}
|
|
|
|
function calculateMessageRelevance(message: any, currentMessage: string, locale: string): number {
|
|
const msgContent = message.content.toLowerCase()
|
|
const currentContent = currentMessage.toLowerCase()
|
|
|
|
let score = 0
|
|
|
|
// Keyword overlap scoring
|
|
const currentWords = currentContent.split(/\s+/).filter(word => word.length > 3)
|
|
const messageWords = msgContent.split(/\s+/)
|
|
|
|
for (const word of currentWords) {
|
|
if (messageWords.some((mWord: string) => mWord.includes(word) || word.includes(mWord))) {
|
|
score += 0.2
|
|
}
|
|
}
|
|
|
|
// Biblical reference detection (higher relevance)
|
|
const biblicalPatterns = locale === 'ro'
|
|
? [/\b(geneza|exod|levitic|numeri|deuteronom|iosua|judecători|rut|samuel|regi|cronici|ezra|neemia|estera|iov|psalmi|proverbe|ecclesiast|cântarea|isaia|ieremia|plângeri|ezechiel|daniel|osea|ioel|amos|obadia|iona|mica|naum|habacuc|țefania|hagai|zaharia|maleahi|matei|marcu|luca|ioan|faptele|romani|corinteni|galateni|efeseni|filipeni|coloseni|tesaloniceni|timotei|tit|filimon|evrei|iacov|petru|ioan|iuda|apocalipsa)\s*\d+/gi]
|
|
: [/\b(genesis|exodus|leviticus|numbers|deuteronomy|joshua|judges|ruth|samuel|kings|chronicles|ezra|nehemiah|esther|job|psalm|proverbs|ecclesiastes|song|isaiah|jeremiah|lamentations|ezekiel|daniel|hosea|joel|amos|obadiah|jonah|micah|nahum|habakkuk|zephaniah|haggai|zechariah|malachi|matthew|mark|luke|john|acts|romans|corinthians|galatians|ephesians|philippians|colossians|thessalonians|timothy|titus|philemon|hebrews|james|peter|jude|revelation)\s*\d+/gi]
|
|
|
|
if (biblicalPatterns.some(pattern => pattern.test(msgContent))) {
|
|
score += 0.4
|
|
}
|
|
|
|
// Recent user questions get higher relevance
|
|
if (message.role === 'user') {
|
|
score += 0.1
|
|
}
|
|
|
|
// Time decay (older messages get slightly lower scores)
|
|
const messageAge = Date.now() - new Date(message.timestamp).getTime()
|
|
const hoursAge = messageAge / (1000 * 60 * 60)
|
|
const timeDecay = Math.max(0.5, 1 - (hoursAge / 168)) // Decay over a week
|
|
|
|
return Math.min(1.0, score * timeDecay)
|
|
}
|
|
|
|
function optimizeContextForTokens(messages: any[], maxTokens: number): any[] {
|
|
// Rough token estimation (1 token ≈ 4 characters in English, 3 in Romanian)
|
|
let currentTokens = 0
|
|
const optimizedMessages = []
|
|
|
|
// Start from most recent and work backwards
|
|
for (let i = messages.length - 1; i >= 0; i--) {
|
|
const message = messages[i]
|
|
const estimatedTokens = Math.ceil(message.content.length / 3.5)
|
|
|
|
if (currentTokens + estimatedTokens <= maxTokens) {
|
|
optimizedMessages.unshift(message) // Add to beginning to maintain order
|
|
currentTokens += estimatedTokens
|
|
} else {
|
|
// If this message would exceed limit, try to include a summary instead
|
|
if (i > 0 && optimizedMessages.length < 3) {
|
|
const summary = summarizeMessage(message)
|
|
const summaryTokens = Math.ceil(summary.length / 3.5)
|
|
if (currentTokens + summaryTokens <= maxTokens) {
|
|
optimizedMessages.unshift({
|
|
...message,
|
|
content: summary,
|
|
isSummary: true
|
|
})
|
|
currentTokens += summaryTokens
|
|
}
|
|
}
|
|
break
|
|
}
|
|
}
|
|
|
|
return optimizedMessages
|
|
}
|
|
|
|
function summarizeMessage(message: any): string {
|
|
const content = message.content
|
|
if (content.length <= 100) return content
|
|
|
|
// Extract key points and questions
|
|
const sentences = content.split(/[.!?]+/).filter((s: string) => s.trim().length > 10)
|
|
if (sentences.length <= 2) return content
|
|
|
|
// Keep first and last sentence, or most important parts
|
|
const summary = sentences.length > 3
|
|
? `${sentences[0].trim()}... ${sentences[sentences.length - 1].trim()}`
|
|
: sentences.slice(0, 2).join('. ').trim()
|
|
|
|
return `[Summary] ${summary}`
|
|
}
|
|
|
|
function formatContextForAI(messages: any[]): string {
|
|
if (messages.length === 0) return ''
|
|
|
|
return messages.map(msg => {
|
|
const prefix = msg.isSummary ? '[Summary] ' : ''
|
|
return `${prefix}${msg.role}: ${msg.content}`
|
|
}).join('\n')
|
|
}
|