import json
import os
from typing import Dict, Tuple

import requests
from dotenv import load_dotenv

load_dotenv()

OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
GROQ_API_KEY = os.getenv("GROQ_API_KEY", "")
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://127.0.0.1:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")

OPENROUTER_MODELS = [
    "google/gemini-2.5-flash-preview",
    "deepseek/deepseek-chat",
]
GROQ_MODEL = os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile")


def _extract_content(data: Dict) -> str:
    choices = data.get("choices") or []
    if not choices:
        raise RuntimeError("AI response missing choices.")
    message = choices[0].get("message") or {}
    content = message.get("content")
    if not content:
        raise RuntimeError("AI response content is empty.")
    return content


def ask_openrouter_model(prompt: str, model: str) -> Dict:
    if not OPENROUTER_API_KEY:
        raise RuntimeError("OpenRouter API key not configured.")

    response = requests.post(
        "https://openrouter.ai/api/v1/chat/completions",
        headers={
            "Authorization": f"Bearer {OPENROUTER_API_KEY}",
            "Content-Type": "application/json",
        },
        json={
            "model": model,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.4,
            "max_tokens": 1200,
        },
        timeout=90,
    )
    response.raise_for_status()
    data = response.json()
    return {
        "success": True,
        "provider": "OpenRouter",
        "model": model,
        "response": _extract_content(data),
    }


def ask_groq(prompt: str) -> Dict:
    if not GROQ_API_KEY:
        raise RuntimeError("Groq API key not configured.")

    response = requests.post(
        "https://api.groq.com/openai/v1/chat/completions",
        headers={
            "Authorization": f"Bearer {GROQ_API_KEY}",
            "Content-Type": "application/json",
        },
        json={
            "model": GROQ_MODEL,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.4,
            "max_tokens": 1200,
        },
        timeout=90,
    )
    response.raise_for_status()
    data = response.json()
    return {
        "success": True,
        "provider": "Groq",
        "model": GROQ_MODEL,
        "response": _extract_content(data),
    }


def ask_ollama(prompt: str) -> Dict:
    response = requests.post(
        f"{OLLAMA_URL}/v1/chat/completions",
        json={
            "model": OLLAMA_MODEL,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.3,
            "max_tokens": 1200,
        },
        timeout=90,
    )
    response.raise_for_status()
    data = response.json()
    return {
        "success": True,
        "provider": "Ollama",
        "model": data.get("model", OLLAMA_MODEL),
        "response": _extract_content(data),
    }


def _admin_fallback_action(command_text: str) -> Tuple[str, Dict, str]:
    text = (command_text or "").lower()

    if "reset subscription" in text:
        return ("reset_subscriptions", {}, "Fallback interpreted reset subscription command.")
    if "disable" in text and "user" in text:
        return ("disable_user", {}, "Fallback interpreted disable user command.")
    if "create" in text and ("exam" in text or "ielts" in text or "ukvi" in text):
        return (
            "create_exam",
            {
                "exam": {
                    "name": "AI Generated Exam",
                    "slug": "ai-generated-exam",
                    "description": "Generated by local fallback parser.",
                    "examType": "IELTS",
                    "sections": [],
                }
            },
            "Fallback interpreted create exam command.",
        )
    if "weak student report" in text:
        return ("unknown", {"reportType": "weak_student_report"}, "Fallback captured weak student report request.")

    return ("unknown", {}, "Fallback could not map command with certainty.")


def local_fallback_response(prompt: str, errors: list) -> Dict:
    if "JSON:" in prompt and "Supported actions" in prompt and "Command:" in prompt:
        command_part = prompt.split("Command:\n", 1)[1] if "Command:\n" in prompt else ""
        command_text = command_part.split("\n\nJSON:", 1)[0].strip()
        action, data, message = _admin_fallback_action(command_text)
        payload = {"action": action, "data": data, "message": message}
        return {
            "success": True,
            "provider": "LocalFallback",
            "model": "rule-based-admin-parser",
            "response": json.dumps(payload),
            "fallbackErrors": errors,
        }

    return {
        "success": True,
        "provider": "LocalFallback",
        "model": "rule-based-assistant",
        "response": (
            "Primary AI providers are temporarily unavailable. "
            "Local fallback executed successfully. "
            "Please retry shortly for full semantic analysis."
        ),
        "fallbackErrors": errors,
    }


def ask_ai(prompt: str) -> Dict:
    errors = []

    # 1) OpenRouter Gemini Flash (primary)
    try:
        return ask_openrouter_model(prompt, OPENROUTER_MODELS[0])
    except Exception as exc:
        errors.append(f"OpenRouter/Gemini failed: {exc}")

    # 2) DeepSeek fallback
    try:
        return ask_openrouter_model(prompt, OPENROUTER_MODELS[1])
    except Exception as exc:
        errors.append(f"DeepSeek failed: {exc}")

    # 3) Groq fallback
    try:
        return ask_groq(prompt)
    except Exception as exc:
        errors.append(f"Groq failed: {exc}")

    # 4) Ollama fallback
    try:
        return ask_ollama(prompt)
    except Exception as exc:
        errors.append(f"Ollama failed: {exc}")

    # 5) Local fallback logic (must always return response)
    return local_fallback_response(prompt, errors)
