from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from models import ChatRequest
from unified_orchestrator_test import unified_orchestrator

import json
import uuid
import logging

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

router = APIRouter()

class ReasoningExtractor:
    """Simulates AI SDK's extractReasoningMiddleware functionality"""
    
    def __init__(self, tag_name: str = "thinking"):
        self.tag_name = tag_name
        self.buffer = ""
        self.in_reasoning = False
        self.reasoning_content = ""
    
    def process_chunk(self, chunk: str) -> tuple[str, str]:
        """Process a text chunk and extract reasoning content
        Returns: (reasoning_text, regular_text)
        """
        self.buffer += chunk
        reasoning_text = ""
        regular_text = ""
        
        start_tag = f"<{self.tag_name}>"
        end_tag = f"</{self.tag_name}>"
        
        while self.buffer:
            if not self.in_reasoning:
                # Look for start tag
                start_pos = self.buffer.find(start_tag)
                if start_pos >= 0:
                    # Extract text before reasoning
                    if start_pos > 0:
                        regular_text += self.buffer[:start_pos]
                    
                    # Enter reasoning mode
                    self.in_reasoning = True
                    self.buffer = self.buffer[start_pos + len(start_tag):]
                else:
                    # Check if we might have a partial start tag
                    for i in range(1, len(start_tag)):
                        if self.buffer.endswith(start_tag[:i]):
                            # Partial tag found, keep it in buffer
                            regular_text += self.buffer[:-i]
                            self.buffer = self.buffer[-i:]
                            return reasoning_text, regular_text
                    
                    # No start tag or partial tag, all is regular text
                    regular_text += self.buffer
                    self.buffer = ""
            else:
                # Look for end tag
                end_pos = self.buffer.find(end_tag)
                if end_pos >= 0:
                    # Extract reasoning content
                    reasoning_text += self.buffer[:end_pos]
                    
                    # Exit reasoning mode
                    self.in_reasoning = False
                    self.buffer = self.buffer[end_pos + len(end_tag):]
                else:
                    # Check if we might have a partial end tag
                    for i in range(1, len(end_tag)):
                        if self.buffer.endswith(end_tag[:i]):
                            # Partial tag found, keep it in buffer
                            reasoning_text += self.buffer[:-i]
                            self.buffer = self.buffer[-i:]
                            return reasoning_text, regular_text
                    
                    # No end tag or partial tag, all is reasoning content
                    reasoning_text += self.buffer
                    self.buffer = ""
                    break
        
        return reasoning_text, regular_text

@router.post("/stream")
async def stream_chat(request: ChatRequest):
    """Stream chat responses using Strands Agents SDK"""
    
    async def generate_response():
        try:
            chat_id = request.id
            
            # Get latest user message
            latest_user_msg = ""
            if request.messages:
                last_msg = request.messages[-1]
                if last_msg.get('role') == 'user':
                    text_parts = []
                    for part in last_msg.get('content', []):
                        if part.get('type') == 'text' and part.get('text'):
                            text_parts.append(part['text'])
                    latest_user_msg = ' '.join(text_parts)
            
            logger.info(f"Processing message through orchestrator for chat {chat_id}")
            
            # Use orchestrator for workflow-controlled processing
            message_id = str(uuid.uuid4())
            text_id = str(uuid.uuid4())
            
            # Send message start
            yield f'data: {{"type":"start","messageId":"{message_id}"}}\n\n'
            
            # Initialize reasoning extractor
            extractor = ReasoningExtractor("thinking")
            
            # Stream directly from graph
            text_started = False
            thinking_id = None
            sent_tool_results = set()  # Track sent tool results to avoid duplicates

            async for event in unified_orchestrator.stream_async(latest_user_msg):
                # Handle tool streaming events
                if "tool_stream_event" in event:
                    tool_stream = event["tool_stream_event"]
                    tool_use = tool_stream.get("tool_use", {})
                    tool_id = tool_use.get("toolUseId", "")
                    stream_data = tool_stream.get("data", "")

                    print(f"Tool Stream Event - ID: {tool_id}, Data: {stream_data}")
                    
                    if tool_id and stream_data:
                        yield f'data: {{"type":"tool-output-available","toolCallId":"{tool_id}","output":{json.dumps(stream_data)}}}\n\n'

                if "current_tool_use" in event:
                    tool_use = event["current_tool_use"]
                    tool_id = tool_use.get("toolUseId", str(uuid.uuid4()))
                    tool_name = tool_use.get("name", "unknown")
                    tool_input = tool_use.get("input", "{}")
                    
                    # Parse input if it's a string
                    try:
                        tool_args = json.loads(tool_input) if isinstance(tool_input, str) else tool_input
                    except:
                        tool_args = {"input": tool_input}
                    
                    # AI SDK tool streaming protocol
                    yield f'data: {{"type":"tool-input-start","toolCallId":"{tool_id}","toolName":"{tool_name}"}}\n\n'
                    
                    # Handle delta for streaming input
                    if "delta" in event and "toolUse" in event["delta"]:
                        delta_input = event["delta"]["toolUse"].get("input", "")
                        if delta_input:
                            yield f'data: {{"type":"tool-input-delta","toolCallId":"{tool_id}","inputTextDelta":{json.dumps(delta_input)}}}\n\n'
                    
                    yield f'data: {{"type":"tool-input-available","toolCallId":"{tool_id}","toolName":"{tool_name}","input":{json.dumps(tool_args)}}}\n\n'
                
                # Handle tool results from messages
                if "messages" in event:
                    messages = event["messages"]
                    if messages:
                        latest_msg = messages[-1]
                        if latest_msg.get("role") == "user" and "content" in latest_msg:
                            for content in latest_msg["content"]:
                                if "toolResult" in content:
                                    tool_result = content["toolResult"]
                                    tool_id = tool_result.get("toolUseId", "")
                                    
                                    # Only send if we haven't sent this tool result before
                                    if tool_id and tool_id not in sent_tool_results:
                                        result_content = tool_result.get("content", [])
                                        result_data = result_content[0].get("json", {}) if result_content else {}
                                        
                                        # AI SDK tool output protocol
                                        yield f'data: {{"type":"tool-output-available","toolCallId":"{tool_id}","output":{json.dumps(result_data)}}}\n\n'
                                        sent_tool_results.add(tool_id)
                
                if "data" in event:
                    chunk_text = event["data"]
                    reasoning_text, regular_text = extractor.process_chunk(chunk_text)
                    
                    # Send reasoning content if present
                    if reasoning_text:
                        if not thinking_id:
                            thinking_id = str(uuid.uuid4())
                            yield f'data: {{"type":"reasoning-start","id":"{thinking_id}"}}\n\n'
                        yield f'data: {{"type":"reasoning-delta","id":"{thinking_id}","delta":{json.dumps(reasoning_text)}}}\n\n'
                    
                    # Send regular text content if present
                    if regular_text:
                        if not text_started:
                            yield f'data: {{"type":"text-start","id":"{text_id}"}}\n\n'
                            text_started = True
                        yield f'data: {{"type":"text-delta","id":"{text_id}","delta":{json.dumps(regular_text)}}}\n\n'
            
            # Close streams
            if thinking_id:
                yield f'data: {{"type":"reasoning-end","id":"{thinking_id}"}}\n\n'
            if text_started:
                yield f'data: {{"type":"text-end","id":"{text_id}"}}\n\n'
            
            # Send message finish event
            yield f'data: {{"type":"finish"}}\n\n'
            
            # Send stream termination
            yield 'data: [DONE]\n\n'
                
        except Exception as e:
            logger.error(f"Error in tech orchestrator: {str(e)}")
            # Send structured error event
            yield f'data: {{"type":"error","error":{json.dumps(str(e))}}}\n\n'
            yield 'data: [DONE]\n\n'
    
    return StreamingResponse(
        generate_response(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*",
            "x-vercel-ai-ui-message-stream": "v1",
        }
    )