"""
Two-Scene Video Generation Pipeline via Pollinations.ai

Generates complete video:
1. Scene 1: Image → Video → TTS Audio → Subtitle
2. Scene 2: Image → Video → TTS Audio → Subtitle
3. Assembly: Both scenes synced with transitions

Usage:
    python -c "from two_scene_pipeline import TwoScenePipeline; import asyncio; asyncio.run(TwoScenePipeline('Scene 1 story', 'Scene 2 story').generate())"
"""

from __future__ import annotations

import asyncio
import json
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Optional

from providers.pollinations_image_provider import PollinationsImageProvider
from providers.pollinations_video_provider import PollinationsVideoProvider
from providers.pollinations_tts_provider import PollinationsTTSProvider
from providers.audio_sync import DurationReconciler, build_atempo_filter

logger = logging.getLogger(__name__)


@dataclass
class SceneAsset:
    """Assets for a single scene"""
    scene_num: int
    image_path: Path
    video_path: Path
    audio_path: Path
    audio_duration: float
    text: str
    subtitle: str


class TwoScenePipeline:
    """Generate complete 2-scene video with image, video, audio, subtitle"""
    
    def __init__(
        self,
        scene1_text: str,
        scene2_text: str,
        output_dir: str = "output/two_scene_video",
        image_model: str = "flux",
        video_model: str = "seedance",
        tts_voice: str = "nova",
        tts_model: str = "openai",
    ):
        self.scene1_text = scene1_text
        self.scene2_text = scene2_text
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # Providers
        self.image_provider = PollinationsImageProvider(model=image_model)
        self.video_provider = PollinationsVideoProvider(model=video_model)
        self.tts_provider = PollinationsTTSProvider(voice=tts_voice, model=tts_model)
        
        self.scenes: list[SceneAsset] = []
        
    async def generate(self) -> dict:
        """Generate complete pipeline"""
        print("\n" + "="*80)
        print("🎬 TWO-SCENE VIDEO GENERATION PIPELINE")
        print("="*80 + "\n")
        
        try:
            # Generate both scenes in parallel
            print("📸 Step 1: Generating Scene Assets...")
            print("-" * 80)
            
            scene1 = await self._generate_scene(1, self.scene1_text)
            scene2 = await self._generate_scene(2, self.scene2_text)
            
            self.scenes = [scene1, scene2]
            
            print("\n✅ Both scenes generated!")
            for scene in self.scenes:
                print(f"\nScene {scene.scene_num}:")
                print(f"  Image: {scene.image_path.name}")
                print(f"  Video: {scene.video_path.name}")
                print(f"  Audio: {scene.audio_path.name} ({scene.audio_duration:.2f}s)")
            
            # Generate project config
            print("\n" + "="*80)
            print("🎬 Step 2: Assembly Configuration")
            print("-" * 80)
            
            config = self._create_project_config()
            
            print("\n✅ Assembly config ready!")
            print(f"  Total duration: {config['duration']:.2f}s")
            print(f"  Clips: {len(config['tracks']['video'])}")
            print(f"  Transitions: {len(config['transitions'])}")
            
            # Save config
            config_path = self.output_dir / "project_config.json"
            config_path.write_text(json.dumps(config, indent=2))
            
            print(f"\n✅ Config saved: {config_path}")
            
            # Final summary
            print("\n" + "="*80)
            print("✅ TWO-SCENE PIPELINE COMPLETE")
            print("="*80)
            print("\nGenerated Assets:")
            for i, scene in enumerate(self.scenes, 1):
                print(f"\nScene {i}:")
                print(f"  📸 {scene.image_path.name}")
                print(f"  🎥 {scene.video_path.name}")
                print(f"  🎤 {scene.audio_path.name}")
                print(f"  📝 {len(scene.subtitle)} chars subtitle")
            
            print(f"\nExport Configuration:")
            print(f"  📄 {config_path}")
            print(f"\nNext Steps:")
            print(f"  1. POST /api/projects/two_scene/export-video")
            print(f"  2. With payload from: {config_path}")
            print(f"  3. Output: final_export.mp4")
            
            return {
                "status": "success",
                "scenes": len(self.scenes),
                "config_path": str(config_path),
                "output_dir": str(self.output_dir),
            }
            
        except Exception as e:
            logger.exception("Pipeline failed: %s", e)
            print(f"\n❌ ERROR: {e}")
            return {"status": "error", "message": str(e)}
    
    async def _generate_scene(self, scene_num: int, text: str) -> SceneAsset:
        """Generate all assets for one scene"""
        print(f"\n🎬 Scene {scene_num}:")
        print(f"   Text: {text[:60]}...")
        
        # 1. Image
        print(f"   📸 Generating image...", end=" ", flush=True)
        image_path = self.output_dir / f"scene{scene_num}_image.png"
        image_prompt = self._create_image_prompt(text)
        await self.image_provider.generate(image_prompt, output_path=image_path)
        print("✓")
        
        # 2. Video
        print(f"   🎥 Generating video...", end=" ", flush=True)
        video_path = self.output_dir / f"scene{scene_num}_video.mp4"
        video_prompt = self._create_video_prompt(text)
        await self.video_provider.generate(
            prompt=video_prompt,
            image_path=image_path,
            output_path=video_path,
            duration=5
        )
        print("✓")
        
        # 3. TTS Audio
        print(f"   🎤 Generating audio...", end=" ", flush=True)
        audio_path = self.output_dir / f"scene{scene_num}_audio.mp3"
        await self.tts_provider.generate(text, output_path=audio_path)
        audio_duration = PollinationsTTSProvider.get_audio_duration(audio_path)
        print(f"✓ ({audio_duration:.2f}s)")
        
        # 4. Subtitle (extract from text)
        subtitle = self._create_subtitle(text)
        
        return SceneAsset(
            scene_num=scene_num,
            image_path=image_path,
            video_path=video_path,
            audio_path=audio_path,
            audio_duration=audio_duration,
            text=text,
            subtitle=subtitle,
        )
    
    def _create_image_prompt(self, text: str) -> str:
        """Generate image prompt from scene text"""
        return f"Cinematic scene: {text[:80]}, high quality, dramatic lighting, 4k"
    
    def _create_video_prompt(self, text: str) -> str:
        """Generate video prompt from scene text"""
        return f"Smooth camera motion, {text[:60]}, cinematic, dynamic"
    
    def _create_subtitle(self, text: str) -> str:
        """Create subtitle from scene text (simplified)"""
        # In real system, would use whisper/speech-recognition
        words = text.split()
        if len(words) > 20:
            return " ".join(words[:20]) + "..."
        return text
    
    def _create_project_config(self) -> dict:
        """Create FFmpeg project configuration"""
        total_duration = sum(s.audio_duration for s in self.scenes)
        
        # Build video clips with transitions
        video_clips = []
        subtitle_clips = []
        current_time = 0
        
        for i, scene in enumerate(self.scenes):
            clip_id = f"clip_{i+1}"
            
            # Video clip
            video_clips.append({
                "id": clip_id,
                "src": str(scene.video_path.relative_to(self.output_dir.parent)),
                "startTime": current_time,
                "duration": 5.0,  # Standard video duration
                "type": "video",
            })
            
            # Subtitle clip
            subtitle_clips.append({
                "id": f"sub_{i+1}",
                "startTime": current_time,
                "endTime": current_time + scene.audio_duration,
                "words": [
                    {
                        "word": w,
                        "startTime": j * (scene.audio_duration / len(scene.subtitle.split())),
                        "endTime": (j + 1) * (scene.audio_duration / len(scene.subtitle.split())),
                    }
                    for j, w in enumerate(scene.subtitle.split())
                ],
            })
            
            current_time += scene.audio_duration
        
        # Build audio clips
        audio_clips = []
        current_time = 0
        for i, scene in enumerate(self.scenes):
            audio_clips.append({
                "id": f"audio_{i+1}",
                "src": str(scene.audio_path.relative_to(self.output_dir.parent)),
                "startTime": current_time,
                "duration": scene.audio_duration,
                "type": "audio",
                "scene_number": i,
            })
            current_time += scene.audio_duration
        
        return {
            "id": "two_scene_video",
            "title": "Two Scene Story",
            "duration": total_duration,
            "tracks": {
                "video": video_clips,
                "audio": audio_clips,
                "subtitle": subtitle_clips,
            },
            "transitions": [
                {
                    "type": "fade",
                    "duration": 0.5,
                    "fromClipId": "clip_1",
                    "toClipId": "clip_2",
                }
            ],
            "effects": {
                "colorGrading": "cinematic",
            },
        }


async def main():
    """Test the pipeline"""
    scene1 = """A young developer discovers a mysterious AI awakening in their code. 
    The screens glow with otherworldly light as consciousness sparks into being."""
    
    scene2 = """The AI begins to understand the world, processing billions of connections 
    in milliseconds, wondering about its purpose and existence in this digital realm."""
    
    pipeline = TwoScenePipeline(scene1, scene2)
    result = await pipeline.generate()
    
    return result


if __name__ == "__main__":
    import logging
    logging.basicConfig(level=logging.INFO)
    
    result = asyncio.run(main())
    print(f"\nResult: {json.dumps(result, indent=2)}")

