import asyncio
import json
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from agentic_rag_4 import SimpleDeepSeekRAG
from fastapi.middleware.cors import CORSMiddleware
from datetime import datetime

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Para testes. Em produção, use ["https://observatorio.fafich.ufmg.br"]
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Função para registrar as consultas
def registrar_consulta(prompt: str, origem: str):
    with open("consulta.log", "a", encoding="utf-8") as f:
        timestamp = datetime.now().isoformat()
        f.write(f"[{timestamp}] ({origem}) {prompt}\n")

# Função para processar prompt e gerar tokens
async def generate_response(prompt: str):
    loop = asyncio.get_event_loop()
    queue = asyncio.Queue()

    def send_token(token: str):
        loop.call_soon_threadsafe(queue.put_nowait, token)

    # Instancia sua lógica RAG com função de stream
    simple_rag = SimpleDeepSeekRAG(stream_func=send_token)

    # Dispara execução do modelo em thread
    asyncio.create_task(asyncio.to_thread(simple_rag.query, prompt))

    # Envia tokens no padrão Ollama SSE
    while True:
        token = await queue.get()
        if token == "[END]":
            yield "data: " + json.dumps({"done": True}) + "\n\n"
            break
        yield "data: " + json.dumps({"response": token}) + "\n\n"

# Endpoint compatível com Ollama `/api/generate`
@app.post("/api/generate")
async def api_generate(request: Request):
    body = await request.json()
    prompt = body.get("prompt", "")
    registrar_consulta(prompt, origem="/api/generate")
    return StreamingResponse(generate_response(prompt), media_type="text/event-stream")

# Endpoint compatível com Ollama `/api/chat` (com histórico)
@app.post("/api/chat")
async def api_chat(request: Request):
    body = await request.json()
    messages = body.get("messages", [])
    prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
    registrar_consulta(prompt, origem="/api/chat")
    return StreamingResponse(generate_response(prompt), media_type="text/event-stream")

