"""
Base agent class – wraps instructor for structured LLM calls.
"""

from __future__ import annotations

import logging
import os
from typing import Type, TypeVar

import instructor
from pydantic import BaseModel

logger = logging.getLogger(__name__)

T = TypeVar("T", bound=BaseModel)


class BaseAgent:
    """Thin wrapper around *instructor* that provides a unified call interface."""

    def __init__(self, model: str = "openai/gpt-4o", max_retries: int = 3):
        self.model = model
        self.max_retries = max_retries

        # Kimi Coding API uses Anthropic-compatible format
        # Requires KIMI_API_KEY and uses https://api.kimi.com/coding/ as base URL
        if model.startswith("kimi/"):
            from anthropic import Anthropic

            base_client = Anthropic(
                api_key=os.environ.get("KIMI_API_KEY", ""),
                base_url="https://api.kimi.com/coding/",
            )
            self.client = instructor.from_anthropic(base_client)
        else:
            self.client = instructor.from_provider(model, async_client=False)

    # The actual provider string may include a prefix like "openai/"
    # instructor.from_provider handles this automatically.

    def call(
        self,
        *,
        system_prompt: str,
        user_prompt: str,
        response_model: Type[T],
        temperature: float = 0.7,
        max_tokens: int | None = None,
    ) -> T:
        """Send a structured completion request and return a validated model."""
        logger.debug(
            "Agent call  model=%s  response_model=%s",
            self.model,
            response_model.__name__,
        )
        # Anthropic/Kimi API requires max_tokens, default to 8192 if not specified
        effective_max_tokens = max_tokens if max_tokens is not None else 8192
        
        kwargs: dict = dict(
            model=self.model.split("/", 1)[-1],  # strip provider prefix for the API
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt},
            ],
            response_model=response_model,
            temperature=temperature,
            max_retries=self.max_retries,
            max_tokens=effective_max_tokens,
        )
        result = self.client.chat.completions.create(**kwargs)
        logger.debug("Agent response received for %s", response_model.__name__)
        return result
