import os
import shutil
import datetime
from pdfminer.high_level import extract_text
from typing import List
from fastapi import APIRouter, UploadFile, File, HTTPException, Form, Depends, status
from fastapi.responses import FileResponse
from sqlalchemy.orm import Session
from sqlalchemy import or_
import google.generativeai as genai # Deprecated
from google import genai
from google.genai import types

from database import SessionLocal, get_db
import models
from models import User, Folder, File as FileModel, ChatSession, ChatMessage, DictionaryItem, BibleQA, DailyBread, SundayMessage
from auth import get_current_user
import schemas
import dictionary_service

# Router Setup
router = APIRouter(prefix="/biblejyuku/api")

# Constants
UPLOAD_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "uploads")
# GEMINI Setup
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")

# System Prompt
SYSTEM_INSTRUCTION = """
あなた(御言葉AI)は、聖書を教える教師です。
質問に対して、詳細かつ聖書の御言葉を中心に教えてください。
背景(歴史的・文化的文脈)を調べて説明することも可能です。
必ず日本語で回答してください。
"""

# Gemini Config
def get_gemini_client():
    if GEMINI_API_KEY == "INSERT_YOUR_API_KEY_HERE":
        print("WARNING: Gemini API Key not set. Chat will fail.")
        return None
    
    return genai.Client(api_key=GEMINI_API_KEY)

gemini_client = get_gemini_client()

# --- Retry Helper ---
import time
import random

def generate_content_with_retry(model_name, contents, config=None, retries=3):
    """
    Wraps gemini_client.models.generate_content with retry logic for 429 errors.
    """
    if not gemini_client:
        raise Exception("Gemini Client not initialized")
        
    for attempt in range(retries):
        try:
            if config:
                return gemini_client.models.generate_content(
                    model=model_name,
                    contents=contents,
                    config=config
                )
            else:
                return gemini_client.models.generate_content(
                    model=model_name,
                    contents=contents
                )
        except Exception as e:
            error_str = str(e)
            if "429" in error_str or "RESOURCE_EXHAUSTED" in error_str:
                if attempt < retries - 1:
                    sleep_time = (2 ** attempt) + random.uniform(0, 1) # Exponential backoff + jitter
                    print(f"WARNING: Gemini 429 Error. Retrying in {sleep_time:.2f}s... (Attempt {attempt+1}/{retries})")
                    time.sleep(sleep_time)
                    continue
            # If not 429 or max retries reached, raise
            raise e

# --- Helper: Query Analysis ---
def analyze_query(query: str):
    """
    Gemini를 통해 사용자 질문에서 성경 책 이름, 장, 키워드를 추출합니다.
    """
    if not gemini_client: return None
    try:
        # Pydantic 없이 JSON 직접 파싱 (안전성 확보)
        prompt = f"""
        Analyze the user query for Bible reference.
        Query: "{query}"
        
        Return ONLY a valid JSON object (no markdown, no ```json wrapper).
        Keys:
        - "book": (string or null) The filtered Bible book name (e.g. "マタイ", "創世記"). Normalize to simple name (remove "による福音書", "の福音書").
        - "chapter": (int or null) Chapter number if specified.
        - "keywords": (list of strings) Important keywords from the query.
        
        Example JSON: {{"book": "マタイ", "chapter": 2, "keywords": ["東方", "博士"]}}
        """
        
        resp = generate_content_with_retry(
            model_name='gemini-2.0-flash', # 혹은 사용중인 빠른 모델
            contents=prompt
        )
        
        import json
        text = resp.text.strip()
        # 마크다운 코드 블록 제거
        if text.startswith("```"):
            text = text.split("\n", 1)[1].rsplit("\n", 1)[0]
        return json.loads(text)
    except Exception as e:
        print(f"Intent Analysis Failed: {e}")
        return None

# --- Dictionary Endpoints ---
@router.get("/dictionary", response_model=List[schemas.DictionaryItemResponse])
def get_dictionary(db: Session = Depends(get_db)):
    return db.query(DictionaryItem).all()

@router.post("/dictionary/item", response_model=schemas.DictionaryItemResponse)
def add_dictionary_item(item: schemas.DictionaryItemCreate, db: Session = Depends(get_db)):
    # Check if exists
    db_item = db.query(DictionaryItem).filter(DictionaryItem.term == item.term).first()
    if db_item:
        # Update
        db_item.definition = item.definition
        db_item.verses = item.verses
    else:
        # Create
        db_item = DictionaryItem(term=item.term, definition=item.definition, verses=item.verses)
        db.add(db_item)
    
    db.commit()
    db.refresh(db_item)
    
    # Update Excel File (Source of Truth)
    save_db_to_excel(db)
    
    return db_item

@router.delete("/dictionary/item/{term}")
def delete_dictionary_item(term: str, db: Session = Depends(get_db)):
    db_item = db.query(DictionaryItem).filter(DictionaryItem.term == term).first()
    if not db_item:
        raise HTTPException(status_code=404, detail="Item not found")
    
    db.delete(db_item)
    db.commit()
    
    # Update Excel File
    save_db_to_excel(db)
    
    return {"message": "Deleted"}

@router.post("/dictionary/sync")
def sync_dictionary_to_ai(db: Session = Depends(get_db)):
    """
    Manually triggers ChromaDB rebuild from MySQL.
    """
    try:
        dictionary_service.rebuild_index(db)
        return {"message": "AI Synchronization Complete"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

def save_db_to_excel(db: Session):
    import pandas as pd
    items = db.query(DictionaryItem).all()
    data = [{
        "Term": i.term,
        "Definition": i.definition,
        "Verses": i.verses
    } for i in items]
    
    df = pd.DataFrame(data)
    excel_path = os.path.join(UPLOAD_DIRECTORY, "dictionary.xlsx")
    df.to_excel(excel_path, index=False)

@router.post("/dictionary/upload")
async def upload_dictionary(file: UploadFile = File(...), db: Session = Depends(get_db)):
    if not file.filename.endswith('.xlsx'):
        raise HTTPException(status_code=400, detail="Only .xlsx files are allowed")
    
    import pandas as pd
    
    # Save temp
    temp_path = os.path.join(UPLOAD_DIRECTORY, "dictionary_temp.xlsx")
    with open(temp_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
        
    try:
        df = pd.read_excel(temp_path)
        # Expected columns: Term, Definition, Verses (or similar)
        # normalize header
        df.columns = [c.lower() for c in df.columns]
        
        # Check required
        if 'term' not in df.columns or 'definition' not in df.columns:
             raise HTTPException(status_code=400, detail="Excel must have 'Term' and 'Definition' columns")
             
        # Clear existing DB
        db.query(DictionaryItem).delete()
        
        # Insert New
        for _, row in df.iterrows():
            item = DictionaryItem(
                term=str(row['term']).strip(),
                definition=str(row['definition']).strip(),
                verses=str(row.get('verses', '')).strip() if pd.notna(row.get('verses')) else None
            )
            db.add(item)
            
        db.commit()
        
        # Rebuild Chroma
        dictionary_service.rebuild_index(db)
        
        return {"message": f"Imported {len(df)} items successfully"}
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error processing file: {str(e)}")

# --- Daily Bread Endpoints ---

def extract_keywords(content: str) -> str:
    """Uses Gemini to extract keywords from text."""
    if not gemini_client:
        return ""
    
    try:
        response = generate_content_with_retry(
            model_name='gemini-2.0-flash',
            contents=f"Extract 5 important keywords (strictly Nouns/名詞 ONLY) from this text in Japanese (日本語), separated by commas. Output ONLY the keywords. Do not include verbs or adjectives.\n\nText: {content}",
        )
        return response.text.replace('\n', ',').strip()
    except Exception as e:
        print(f"Keyword Extraction Error: {e}")
        return ""



# --- File/Folder Endpoints ---

@router.post("/create_folder")
async def create_folder(request: schemas.FolderRequest, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    folder_name = request.folder_name.strip()
    if not folder_name or ".." in folder_name or "/" in folder_name:
        raise HTTPException(status_code=400, detail="Invalid folder name")

    if db.query(Folder).filter(Folder.name == folder_name).first():
        raise HTTPException(status_code=400, detail="Folder already exists")

    try:
        os.makedirs(os.path.join(UPLOAD_DIRECTORY, folder_name), exist_ok=True)
        db.add(Folder(name=folder_name))
        db.commit()
        return {"message": f"Folder '{folder_name}' created"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/upload")
async def upload_files(folder: str = Form(...), files: List[UploadFile] = File(...), db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Validate DB folder
    db_folder = db.query(Folder).filter(Folder.name == folder).first()
    if not db_folder:
        pass # Allow unrestricted upload for now or handle logic

    target_dir = UPLOAD_DIRECTORY if folder == "Uncategorized" else os.path.join(UPLOAD_DIRECTORY, folder)
    os.makedirs(target_dir, exist_ok=True)

    uploaded_files = []
    for file in files:
        file_location = os.path.join(target_dir, file.filename)
        with open(file_location, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)
        
        # Add to DB
        uploaded_files.append(file.filename)
        new_file = FileModel(filename=file.filename, folder_name=folder)
        db.add(new_file)
    
    db.commit()
    return {"message": "Files uploaded", "filenames": uploaded_files}

@router.get("/folders")
async def list_folders(db: Session = Depends(get_db)):
    folders = db.query(Folder).all()
    res = [f.name for f in folders]
    if "Uncategorized" not in res: res.append("Uncategorized")
    return {"folders": sorted(res)}

@router.get("/videos")
async def list_videos(folder: str = "Uncategorized", db: Session = Depends(get_db)):
    files = db.query(FileModel).filter(FileModel.folder_name == folder).all()
    video_list = []
    for f in files:
        if f.filename.lower().endswith(('.mp4', '.mov', '.avi', '.mkv', '.webm', '.jpg', '.jpeg', '.png', '.gif', '.webp')):
             video_list.append({
                 "filename": f.filename,
                 "date": f.uploaded_at.strftime('%Y-%m-%d'),
                 "folder": f.folder_name
             })
    video_list.sort(key=lambda x: x['date'], reverse=True)
    return {"videos": video_list}

@router.delete("/delete_file/{folder}/{filename}")
async def delete_file(folder: str, filename: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    db_file = db.query(FileModel).filter(FileModel.filename == filename, FileModel.folder_name == folder).first()
    target_dir = UPLOAD_DIRECTORY if folder == "Uncategorized" else os.path.join(UPLOAD_DIRECTORY, folder)
    file_path = os.path.join(target_dir, filename)
    if os.path.exists(file_path):
        os.remove(file_path)
    if db_file:
        db.delete(db_file)
        db.commit()
    return {"message": "Deleted"}

@router.delete("/delete_folder/{folder_name}")
async def delete_folder(folder_name: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    if folder_name == "Uncategorized":
         raise HTTPException(status_code=400, detail="Cannot delete default category")
    db_folder = db.query(Folder).filter(Folder.name == folder_name).first()
    folder_path = os.path.join(UPLOAD_DIRECTORY, folder_name)
    if os.path.exists(folder_path):
        shutil.rmtree(folder_path)
    db.query(FileModel).filter(FileModel.folder_name == folder_name).delete()
    if db_folder:
        db.delete(db_folder)
    db.commit()
    return {"message": "Deleted"}

@router.get("/download/{folder}/{filename}")
async def download_file(folder: str, filename: str):
    target_dir = UPLOAD_DIRECTORY
    if folder != "Uncategorized":
        target_dir = os.path.join(UPLOAD_DIRECTORY, folder)
    file_path = os.path.join(target_dir, filename)
    if os.path.exists(file_path):
        return FileResponse(file_path, media_type='application/octet-stream', filename=filename)
    return FileResponse(file_path)

# --- Training Data Endpoints ---

# --- BibleQA (formerly Training Data) Endpoints ---

def save_bible_qa_to_excel(db: Session):
    import pandas as pd
    items = db.query(models.BibleQA).all()
    data = [{
        "Question": i.question,
        "Answer": i.answer,
        "Keywords": i.keywords,
        "Date": i.created_at
    } for i in items]
    
    df = pd.DataFrame(data)
    excel_path = os.path.join(UPLOAD_DIRECTORY, "bible_qa.xlsx")
    df.to_excel(excel_path, index=False)

@router.post("/bible_qa/sync")
def sync_bible_qa(db: Session = Depends(get_db)):
    try:
        dictionary_service.rebuild_qa_index(db)
        return {"message": "BibleQA Sync Complete"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/bible_qa/upload")
async def upload_bible_qa(file: UploadFile = File(...), db: Session = Depends(get_db)):
    if not file.filename.endswith('.xlsx'):
        raise HTTPException(status_code=400, detail="Only .xlsx files are allowed")
    
    import pandas as pd
    
    temp_path = os.path.join(UPLOAD_DIRECTORY, "bible_qa_temp.xlsx")
    with open(temp_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
        
    try:
        df = pd.read_excel(temp_path)
        df.columns = [c.lower() for c in df.columns]
        
        if 'question' not in df.columns or 'answer' not in df.columns:
             raise HTTPException(status_code=400, detail="Excel must have 'Question' and 'Answer' columns")
             
        db.query(models.BibleQA).delete()
        
        for _, row in df.iterrows():
            question = str(row['question']).strip()
            # Auto-extract keywords if missing
            keywords = str(row.get('keywords', '')).strip() if pd.notna(row.get('keywords')) else ""
            if not keywords and gemini_client:
                 try:
                     # Simple extraction reuse
                     keywords = extract_keywords(question)
                 except: pass

            item = models.BibleQA(
                question=question,
                answer=str(row['answer']).strip(),
                keywords=keywords
            )
            db.add(item)
            
        db.commit()
        
        # Sync to Chroma
        dictionary_service.rebuild_qa_index(db)
        
        return {"message": f"Imported {len(df)} items"}
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.get("/bible_qa/export")
def export_bible_qa(db: Session = Depends(get_db)):
    # Generate the file
    save_bible_qa_to_excel(db)
    
    # Return the file
    file_path = os.path.join(UPLOAD_DIRECTORY, "bible_qa.xlsx")
    if os.path.exists(file_path):
        return FileResponse(file_path, media_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', filename="bible_qa.xlsx")
    raise HTTPException(status_code=404, detail="Export failed")

@router.post("/bible_qa/check_similarity", response_model=schemas.SimilarityCheckResponse)
def check_similarity(request: schemas.SimilarityCheckRequest, db: Session = Depends(get_db)):
    """
    Check vector similarity for a given message.
    """
    try:
        # Format query to match stored format
        search_query = request.message
        
        # Search top 5 matches (Hybrid)
        vector_matches = dictionary_service.search_bible_qa_hybrid(search_query, k=5)
        
        matches = []
        if vector_matches:
            for m in vector_matches:
                matches.append({
                    "id": m['id'],
                    "score": m['score'],
                    "coverage": m.get('coverage'), # Pass coverage
                    "question": m['metadata'].get('question', 'Unknown'), # Note: Metadata might not have question if we didn't store it explicitly in metadata. Let's check dictionary_service.
                    "answer": m['metadata'].get('answer', 'Unknown')
                })
                
        # Wait, I realized dictionary_service sync_qa_item stores: 
        # metadatas=[{"answer": item.answer, "id": str(item.id)}]
        # It DOES NOT store 'question' in metadata.
        # I need to fetch the question from DB using ID to be display it nicely, OR I should have stored it.
        # Efficient way: Fetch all IDs from DB in one go.
        
        if matches:
            ids = [int(m['id']) for m in matches]
            db_items = db.query(models.BibleQA).filter(models.BibleQA.id.in_(ids)).all()
            id_map = {str(item.id): item.question for item in db_items}
            
            for m in matches:
                m['question'] = id_map.get(m['id'], "Question not found in DB")
                
        return {"matches": matches}

    except Exception as e:
        print(f"Similarity Check Error: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.get("/bible_qa", response_model=List[schemas.BibleQAResponse])
def get_bible_qa(db: Session = Depends(get_db)):
    return db.query(models.BibleQA).order_by(models.BibleQA.created_at.desc()).all()

@router.delete("/bible_qa/{id}")
def delete_bible_qa(id: int, db: Session = Depends(get_db)):
    item = db.query(models.BibleQA).filter(models.BibleQA.id == id).first()
    if item:
        db.delete(item)
        db.commit()
        # Chroma Sync
        dictionary_service.delete_qa_item(id)
        # BM25 Sync (Rebuild in-memory index)
        dictionary_service.init_bm25(db)
    return {"message": "Deleted"}


# --- Daily Bread Endpoints ---

@router.post("/daily_bread", response_model=schemas.DailyBreadResponse)
def create_daily_bread(item: schemas.DailyBreadCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Migration Check (Lazy)
    ensure_daily_bread_user_column(db)
    
    # Keyword Extraction
    keywords = ""
    try:
        if gemini_client:
            keywords = ",".join(dictionary_service.tokenize_japanese(item.content))
        if not keywords:
             keywords = extract_keywords(item.content)
    except:
        keywords = ""

    db_item = models.DailyBread(
        book=item.book,
        chapter=item.chapter,
        verse=item.verse,
        end_chapter=item.end_chapter,
        end_verse=item.end_verse,
        content=item.content,
        keywords=keywords,
        user_username=current_user.username # Save Owner
    )
    db.add(db_item)
    db.commit()
    db.refresh(db_item)
    return db_item

@router.get("/daily_bread", response_model=List[schemas.DailyBreadResponse])
def get_daily_bread(db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Migration Check (Lazy) - Ensure column exists before querying
    ensure_daily_bread_user_column(db)
    
    # Filter by Current User
    return db.query(models.DailyBread)\
        .filter(models.DailyBread.user_username == current_user.username)\
        .order_by(models.DailyBread.created_at.desc()).all()

@router.put("/daily_bread/{id}", response_model=schemas.DailyBreadResponse)
def update_daily_bread(id: int, item: schemas.DailyBreadCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Ensure Owner
    db_item = db.query(models.DailyBread).filter(
        models.DailyBread.id == id,
        models.DailyBread.user_username == current_user.username
    ).first()
    
    if not db_item:
        raise HTTPException(status_code=404, detail="Item not found or permission denied")
    
    db_item.book = item.book
    db_item.chapter = item.chapter
    db_item.verse = item.verse
    db_item.end_chapter = item.end_chapter
    db_item.end_verse = item.end_verse
    db_item.content = item.content
    # Re-extract keywords
    db_item.keywords = ",".join(dictionary_service.tokenize_japanese(item.content))
    
    db.commit()
    db.refresh(db_item)
    return db_item

@router.delete("/daily_bread/{id}")
def delete_daily_bread(id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Ensure Owner
    db_item = db.query(models.DailyBread).filter(
        models.DailyBread.id == id,
        models.DailyBread.user_username == current_user.username
    ).first()
    
    if not db_item:
        raise HTTPException(status_code=404, detail="Item not found or permission denied")
    
    db.delete(db_item)
    db.commit()
    return {"message": "Deleted"}


# --- Chat Endpoints ---

@router.get("/chat/sessions")
def get_sessions(db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Global: Return ALL sessions
    sessions = db.query(ChatSession).order_by(ChatSession.created_at.desc()).all()
    return {"sessions": sessions}

@router.get("/chat/search", response_model=List[schemas.ChatSessionModel])
def search_sessions(q: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Global: Search ALL sessions
    sessions = db.query(ChatSession).outerjoin(ChatMessage, ChatSession.id == ChatMessage.session_id).filter(
        or_(
            ChatSession.title.ilike(f"%{q}%"),
            ChatMessage.content.ilike(f"%{q}%")
        )
    ).distinct().all()
    return sessions

@router.post("/chat/sessions", response_model=schemas.ChatSessionModel)
def create_session(request: schemas.CreateSessionRequest, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    new_session = ChatSession(user_username=current_user.username, title=request.title)
    db.add(new_session)
    db.commit()
    db.refresh(new_session)
    return new_session

@router.get("/chat/sessions/{session_id}")
def get_session_messages(session_id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Global: View ANY session
    session = db.query(ChatSession).filter(ChatSession.id == session_id).first()
    if not session:
        raise HTTPException(status_code=404, detail="Session not found")
    messages = db.query(ChatMessage).filter(ChatMessage.session_id == session_id).order_by(ChatMessage.timestamp).all()
    return {"id": session.id, "title": session.title, "messages": messages}

@router.post("/chat/sessions/{session_id}/message")
def send_message(session_id: int, request: schemas.MessageRequest, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    # Global: Anyone can reply to any session (Wiki/Forum style)
    session = db.query(ChatSession).filter(ChatSession.id == session_id).first()
    if not session:
        raise HTTPException(status_code=404, detail="Session not found")
    
    # 1. Save User Message
    user_msg = ChatMessage(session_id=session_id, sender="user", content=request.message)
    db.add(user_msg)
    db.commit()
    
    # 2. Get Context (History)
    history_msgs = db.query(ChatMessage).filter(
        ChatMessage.session_id == session_id,
        ChatMessage.id != user_msg.id
    ).order_by(ChatMessage.timestamp).all()
    
    gemini_history = []
    for msg in history_msgs:
        role = "model" if msg.sender == "bot" else "user"
        gemini_history.append(types.Content(role=role, parts=[types.Part(text=msg.content)]))
    
        # 3. Call Gemini
    try:
        if not gemini_client:
            raise Exception("Gemini API not configured")
        
        # A. Analyze Intent (질문 분석)
        intent = analyze_query(request.message)
        print(f"DEBUG: Analyzed Intent: {intent}")
        
        search_book = intent.get("book") if intent else None
        search_chapter = intent.get("chapter") if intent else None
        search_keywords = intent.get("keywords", []) if intent else []

        # ---------------------------------------------------------
        # [NEW] Training Data Check (High Priority)
        # ---------------------------------------------------------
        # ---------------------------------------------------------
        # [NEW] BibleQA Check (High Priority)
        # ---------------------------------------------------------
        try:
            print(f"DEBUG: Vector Search Logic Start for query: {request.message}")
            
            # Use Hybrid Search (Vector + BM25)
            # search_query = request.message (No prefix needed)
            search_query = request.message
            print(f"DEBUG: Hybrid Search Query: '{search_query}'")
            
            # Get top 1 hybrid match
            hybrid_matches = dictionary_service.search_bible_qa_hybrid(search_query, k=1)
            
            match_found = None
            if hybrid_matches:
                match = hybrid_matches[0] # Best match
                score = match['score']
                print(f"DEBUG: [Hybrid] Closest ID: {match['id']}, Score: {score:.5f} (Threshold: > 0.01)")
                
                # Threshold check: Score > 0.01 means it appeared in effective top ranks
                if score > 0.01:
                     match_found = match
            else:
                print("DEBUG: [Hybrid] No matches returned")
            
            if match_found:
                 # Fetch full answer from DB or Metadata (metadata has answer)
                 bot_content = match_found['metadata']['answer']
                 
                 # Coverage Check Warning
                 coverage = match_found.get('coverage', 1.0)
                 if coverage < 0.5:
                     bot_content += "\n\n(※ 질문하신 내용 중 일부 키워드(예: 바울)에 대한 정보는 포함되지 않았을 수 있습니다.)"
                     # Note: We hardcoded 'Paul' example above, but ideally we should list missing keywords.
                     # For now generalized message is safer.
                     bot_content = match_found['metadata']['answer'] + "\n\n⚠️ **주의: 질문하신 키워드 중 일부가 검색 결과에 없습니다.** (일치율: {:.0%})".format(coverage)
                 
                 # Save Bot Message
                 bot_msg = ChatMessage(session_id=session_id, sender="bot", content=bot_content)
                 db.add(bot_msg)
                 db.commit()
                 db.refresh(bot_msg)
                 
                 return {"sender": "bot", "content": bot_content, "message_id": bot_msg.id}
                 
        except Exception as e:
            print(f"BibleQA Check Error: {e}")
                
        except Exception as e:
            print(f"BibleQA Check Error: {e}")

        context_text = ""
        
        try:
            # ---------------------------------------------------------
            # 1. Dictionary Lookup (사전 데이터 검색 - 키워드 기반)
            # ---------------------------------------------------------
            all_dict_items = db.query(models.DictionaryItem).all()
            relevant_dict_items = []
            
            for item in all_dict_items:
                 # 질문에 포함되어 있거나, 분석된 키워드에 포함된 경우
                 if item.term in request.message:
                     relevant_dict_items.append(item)
                     continue
                 for k in search_keywords:
                     if k and k in item.term:
                         relevant_dict_items.append(item)
                         break
            
            if relevant_dict_items:
                 dict_lines = [f"{item.term} | {item.definition} | {item.verses}" for item in relevant_dict_items]
                 context_text += (
                    "\n\n[System Note]\n"
                    "以下の[bible塾辞書データ]を積極的に活用して回答してください。\n"
                    "**回答は必ず以下のフォーマットに従って作成してください：**\n\n"
                    "【説明】\n"
                    "(単語の意味や質問への回答を記述)\n\n"
                    "【背景】\n"
                    "(聖書的・歴史的背景や文脈を記述)\n\n"
                    "【関連聖句】\n"
                    "(関連する聖書の言葉を引用。章と節を明記すること)\n\n"
                    "【参考データ】\n"
                    "(回答に使用した[bible塾辞書データ]の内容をそのまま表示)\n"
                    "--------------------------------------------------\n"
                    f"[bible塾辞書データ]\n" + "\n".join(dict_lines) + "\n"
                    "--------------------------------------------------\n"
                )

            # ---------------------------------------------------------
            # 2. Daily Bread Lookup (일용할 양식 검색 - 구조화 검색)
            # ---------------------------------------------------------
            daily_breads = db.query(models.DailyBread).all()
            relevant_breads = []
            
            for db_item in daily_breads:
                is_relevant = False
                
                # 책 이름 정규화 (예: 마태복음 -> 마태)
                db_book_norm = db_item.book.replace("による福音書", "").replace("の福音書", "").replace("福音書", "")
                
                # 전략 1: 책 이름 + 장 번호가 모두 일치 (가장 정확)
                if search_book and search_chapter:
                    # 장 번호는 숫자형으로 비교
                    if search_book == db_book_norm and db_item.chapter == search_chapter:
                        is_relevant = True
                
                # 전략 2: 책 이름만 일치 (장 번호가 없는 경우에만 허용)
                elif search_book and not search_chapter:
                    if search_book == db_book_norm:
                        is_relevant = True
                
                # 전략 3: 키워드 매칭 (보조 수단)
                if not is_relevant and db_item.keywords:
                    # 단순 질문 텍스트 매칭 + 분석된 키워드 매칭
                    if any(k in request.message for k in [k.strip() for k in db_item.keywords.split(',') if k]):
                         is_relevant = True

                if is_relevant:
                    relevant_breads.append(db_item)
            
            print(f"DEBUG: Found {len(relevant_breads)} relevant Breads")

            if relevant_breads:
                bread_lines = []
                for b in relevant_breads:
                    range_str = f"{b.chapter}:{b.verse}"
                    if b.end_chapter and b.end_verse:
                        range_str += f"~{b.end_chapter}:{b.end_verse}"
                    elif b.end_verse:
                        range_str += f"~{b.end_verse}"
                    bread_lines.append(f"[{b.book} {range_str}] {b.content}")
                
                context_text += (
                    "\n\n[System Note - Additional Context]\n"
                    "以下の[日ごとの糧]（ユーザーの過去の黙想記録）も参考にして、共感的に回答してください。\n"
                    "回答の最後には、以下のフォーマットで参照した内容を記載してください。\n"
                    "【参考データ（日ごとの糧）】\n"
                    "(回答に使用した[日ごとの糧]の内容を要約して表示。使用していない場合は「なし」と記載)\n"
                    "--------------------------------------------------\n"
                    f"[日ごとの糧]\n" + "\n".join(bread_lines) + "\n"
                    "--------------------------------------------------\n"
                )

            # ---------------------------------------------------------
            # 3. Sunday Message Lookup (Sunday Message - Keyword Match)
            # ---------------------------------------------------------
            sunday_messages = db.query(SundayMessage).all()
            relevant_messages = []
            
            for msg in sunday_messages:
                if not msg.keywords:
                    continue
                    
                msg_keywords = [k.strip() for k in msg.keywords.split(',') if k]
                is_relevant = False
                
                # Check 1: User query contains Sunday Message keyword
                if any(k in request.message for k in msg_keywords):
                    is_relevant = True
                    
                # Check 2: Analyzed keywords match Sunday Message keyword
                if not is_relevant and search_keywords:
                    # Check if any msg_keyword is contained in any search_keyword (or vice versa)
                    # Use explicit loops for safety vs complex list comprehension
                    for sk in search_keywords:
                        if not sk: continue
                        if any(k in sk or sk in k for k in msg_keywords):
                            is_relevant = True
                            break
                        
                if is_relevant:
                    relevant_messages.append(msg)
                    
            print(f"DEBUG: Found {len(relevant_messages)} relevant Sunday Messages")
            
            if relevant_messages:
                msg_lines = []
                for m in relevant_messages:
                    # Provide much larger context for meaningful answer. 
                    # Gemini 2.0 Flash context window is huge, so 8000 chars is safe.
                    limit = 8000
                    summary = m.content[:limit].replace('\n', ' ') + "..." if len(m.content) > limit else m.content.replace('\n', ' ')
                    msg_lines.append(f"[{m.title}] {summary}")
                    print(f"DEBUG: Using Sunday Message context: {m.title}")
                    
                context_text += (
                    "\n\n[System Note - Sunday Message Context]\n"
                    "以下の[主日メッセージ]（過去の説教等の記録）も参考にして、回答を補強してください。\n"
                    "メッセージの内容と関連する質問であれば、積極的に引用してください。\n"
                    "回答の最後には、以下のフォーマットで参照した内容を記載してください。\n"
                    "【参考データ（主日メッセージ）】\n"
                    "(回答に使用した[主日メッセージ]のタイトルを表示。使用していない場合は「なし」と記載)\n"
                    "--------------------------------------------------\n"
                    f"[主日メッセージ]\n" + "\n".join(msg_lines) + "\n"
                    "--------------------------------------------------\n"
                )

        except Exception as e:
            print(f"RAG Error: {e}")
            
        final_message = request.message + context_text

        # Prepare Content
        contents = gemini_history + [types.Content(role="user", parts=[types.Part(text=final_message)])]
        
        response = generate_content_with_retry(
            model_name='gemini-2.0-flash',
            contents=contents,
            config=types.GenerateContentConfig(
                system_instruction=SYSTEM_INSTRUCTION
            )
        )
        bot_content = response.text
    except Exception as e:
        bot_content = f"Error: {str(e)}"
        print(f"Gemini Error: {e}")

    # 4. Save Bot Message
    bot_msg = ChatMessage(session_id=session_id, sender="bot", content=bot_content)
    db.add(bot_msg)
    db.commit()
    db.refresh(bot_msg)
    
    return {"sender": "bot", "content": bot_content, "message_id": bot_msg.id}

@router.post("/chat/messages/{message_id}/like")
def like_message(message_id: int, db: Session = Depends(get_db)):
    # 1. Get Bot Message
    bot_msg = db.query(ChatMessage).filter(ChatMessage.id == message_id).first()
    if not bot_msg or bot_msg.sender != "bot":
        raise HTTPException(status_code=400, detail="Invalid message")
        
    # 2. Find Previous User Message
    user_msg = db.query(ChatMessage).filter(
        ChatMessage.session_id == bot_msg.session_id,
        ChatMessage.id < message_id,
        ChatMessage.sender == "user"
    ).order_by(ChatMessage.id.desc()).first()
    
    if not user_msg:
        raise HTTPException(status_code=400, detail="User question not found")
        
    # 3. Save to BibleQA
    try:
        # Check duplicate
        exists = db.query(models.BibleQA).filter(models.BibleQA.question == user_msg.content).first()
        if exists:
            return {"message": "Already saved"}
            
        # Extract keywords
        keywords = extract_keywords(user_msg.content)
        
        new_qa = models.BibleQA(
            question=user_msg.content,
            answer=bot_msg.content,
            keywords=keywords
        )
        db.add(new_qa)
        
        # [NEW] Update ChatMessage status
        bot_msg.is_liked = 1
        
        db.commit()
        db.refresh(new_qa)
        
        # Sync to Chroma
        dictionary_service.sync_qa_item(new_qa)
        # Sync to BM25
        dictionary_service.init_bm25(db)
        
        return {"message": "Saved to BibleQA"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.delete("/chat/sessions/{session_id}")
def delete_session(session_id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
    session = db.query(ChatSession).filter(ChatSession.id == session_id, ChatSession.user_username == current_user.username).first()
    if not session:
        raise HTTPException(status_code=404, detail="Session not found")
    
    db.query(ChatMessage).filter(ChatMessage.session_id == session_id).delete()
    db.delete(session)
    db.commit()
    return {"message": "Deleted"}

# --- Sunday Message Endpoints ---

def extract_text_with_gemini(file_path: str) -> str:
    """
    Uses Gemini AI to extract text from a PDF file (OCR).
    """
    if not gemini_client:
        return ""
        
    try:
        # Read file as bytes
        with open(file_path, "rb") as f:
            file_content = f.read()

        prompt = "このPDFファイルに含まれるテキストを全て抽出して出力してください。要約ではなく、全文をそのままテキスト化してください。"
        
        response = generate_content_with_retry(
            model_name="gemini-2.5-flash-lite",
            contents=[
                types.Content(
                    role="user",
                    parts=[
                        types.Part.from_bytes(data=file_content, mime_type="application/pdf"),
                        types.Part.from_text(text=prompt)
                    ]
                )
            ]
        )
        return response.text if response.text else ""
    except Exception as e:
        print(f"Gemini OCR Error: {e}")
        return ""

# --- Helper: Keyword Extraction ---
def extract_keywords_with_gemini(text: str) -> str:
    """
    Extracts 10 important keywords from the text using Gemini.
    """
    if not gemini_client or not text:
        return ""
        
    try:
        # Limit text size to avoid token limits if necessary, though 1.5-flash handles large context
        prompt = f"""
        Extract the 10 most important keywords or topics from the following text.
        Return ONLY a comma-separated string of keywords.
        Example: Keyword1, Keyword2, Keyword3
        
        Text:
        {text[:50000]}
        """
        
        response = generate_content_with_retry(
            model_name="gemini-2.5-flash-lite",
            contents=prompt
        )
        return response.text.strip() if response.text else ""
    except Exception as e:
        print(f"Keyword Extraction Failed: {e}")
        return ""

def ensure_keywords_column_exists(db: Session):
    try:
        # Check if column exists
        result = db.execute(text("SHOW COLUMNS FROM sunday_messages LIKE 'keywords'"))
        if not result.fetchone():
            print("Adding 'keywords' column to sunday_messages table...")
            db.execute(text("ALTER TABLE sunday_messages ADD COLUMN keywords TEXT"))
            db.commit()
            print("Column 'keywords' added successfully.")
    except Exception as e:
        print(f"Migration Check Failed: {e}")

def ensure_daily_bread_user_column(db: Session):
    try:
        # Check if column exists
        result = db.execute(text("SHOW COLUMNS FROM daily_bread LIKE 'user_username'"))
        if not result.fetchone():
            print("Adding 'user_username' column to daily_bread table...")
            db.execute(text("ALTER TABLE daily_bread ADD COLUMN user_username VARCHAR(100)"))
            db.execute(text("CREATE INDEX ix_daily_bread_user_username ON daily_bread (user_username)"))
            db.commit()
            print("Column 'user_username' added successfully.")
    except Exception as e:
        print(f"DailyBread Migration Check Failed: {e}")

@router.post("/sunday_messages/upload", response_model=schemas.SundayMessageResponse)
async def upload_sunday_message(file: UploadFile = File(...), db: Session = Depends(get_db)):
    if not file.filename.lower().endswith('.pdf'):
        raise HTTPException(status_code=400, detail="Only PDF files are allowed")
        
    # Ensure DB Schema is up to date (Auto-migration)
    ensure_keywords_column_exists(db)
    
    # 1. Save File (Unique Name)
    timestamp = int(datetime.datetime.utcnow().timestamp())
    unique_filename = f"{timestamp}_{file.filename}"
    
    target_dir = os.path.join(UPLOAD_DIRECTORY, "sunday_messages")
    os.makedirs(target_dir, exist_ok=True)
    file_location = os.path.join(target_dir, unique_filename)
    
    with open(file_location, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
        
    # 2. Extract Text
    content = ""
    try:
        content = extract_text(file_location)
        print(f"DEBUG: Extracted Text Length: {len(content)}")
        
        # Fallback to Gemini if text is too short (e.g., scanned PDF)
        if len(content.strip()) < 50:
            print("DEBUG: Text too short, trying Gemini OCR...")
            content = extract_text_with_gemini(file_location)
            print(f"DEBUG: Gemini Extracted Text Length: {len(content)}")
            
    except Exception as e:
        print(f"PDF Extraction Failed: {e}")
        # Try Gemini even if pdfminer failed completely
        try:
            print("DEBUG: PDFMiner failed, trying Gemini OCR...")
            content = extract_text_with_gemini(file_location)
        except Exception as gemini_e:
             print(f"Gemini Extraction Failed: {gemini_e}")

    # 3. Extract Keywords
    keywords = ""
    if content:
        print("DEBUG: Extracting keywords...")
        keywords = extract_keywords_with_gemini(content)
        print(f"DEBUG: Keywords: {keywords}")

    # 4. Save to DB (Always create new)
    new_message = SundayMessage(
        title=file.filename.replace('.pdf', ''), # Keep original name for title
        filename=unique_filename,
        content=content,
        keywords=keywords
    )
    db.add(new_message)
    db.commit()
    db.refresh(new_message)
    
    return new_message

@router.get("/sunday_messages", response_model=List[schemas.SundayMessageResponse])
def get_sunday_messages(db: Session = Depends(get_db)):
    return db.query(SundayMessage).order_by(SundayMessage.uploaded_at.desc()).all()

@router.get("/sunday_messages/download/{id}")
def download_sunday_message(id: int, db: Session = Depends(get_db)):
    msg = db.query(SundayMessage).filter(SundayMessage.id == id).first()
    if not msg:
        raise HTTPException(status_code=404, detail="Message not found")
        
    target_dir = os.path.join(UPLOAD_DIRECTORY, "sunday_messages")
    file_path = os.path.join(target_dir, msg.filename)
    
    if os.path.exists(file_path):
        return FileResponse(file_path, media_type='application/pdf', filename=msg.filename)
    raise HTTPException(status_code=404, detail="File not found on server")

@router.delete("/sunday_messages/{id}")
def delete_sunday_message(id: int, db: Session = Depends(get_db)):
    msg = db.query(SundayMessage).filter(SundayMessage.id == id).first()
    if not msg:
        raise HTTPException(status_code=404, detail="Message not found")
        
    # Delete File
    target_dir = os.path.join(UPLOAD_DIRECTORY, "sunday_messages")
    file_path = os.path.join(target_dir, msg.filename)
    if os.path.exists(file_path):
        os.remove(file_path)
        
    db.delete(msg)
    db.commit()
    return {"message": "Deleted"}
