#!/usr/bin/env python3
"""
Archive Get Plugin

Retrieves an archived page from archive.is.
Can start from existing search results or perform a fresh search.
"""

import asyncio
from typing import Dict, Any
from lib.plugin_loader import PluginMetadata, PluginParameter
from lib.primitive.click_element import ClickElement
from lib.primitive.press_enter import PressEnter
from lib.primitive.type_text import TypeText
from lib.primitive.scroll_page import ScrollPage


class ArchiveGetPlugin:
    """
    Get an archived page from archive.is.
    
    Supports two modes:
    - Fresh search: Navigate to archive.is, search for URL, click first result
    - From check: Assume page already shows results, click first result directly
    """
    
    metadata = PluginMetadata(
        name="archive-get",
        description="Archive Get",
        methods=["POST"],
        bg_color=["#df5900", "#ff9924"],
        parameters=[
            PluginParameter(
                name="url",
                type="url",
                label="URL to Retrieve",
                required=False,
                placeholder="https://example.com",
                help_text="URL to retrieve from archive (required unless from_check is true)"
            ),
            PluginParameter(
                name="from_check",
                type="checkbox",
                label="From check page",
                default=False,
                help_text="Start from existing archive check results (skip search)"
            )
        ]
    )
    
    def validate_params(self, params: Dict[str, Any]) -> str | None:
        """Validate that URL is provided when not starting from check page."""
        url = params.get('url', '').strip()
        from_check = params.get('from_check', False)
        
        if not from_check and not url:
            return "URL is required when not starting from check page"
        
        return None
    
    async def execute(self, browser_navigator, page_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Execute archive get operation.
        
        Args:
            browser_navigator: BrowserNavigator instance
            page_id: Page to use for operation
            params: Validated parameters (url, from_check)
            
        Returns:
            Dict with success status, archive URL, and page content
        """
        url = params.get('url', '').strip()
        from_check = params.get('from_check', False)
        
        print(f"📥 Archive get: url={url}, from_check={from_check}")
        
        managed_page = await browser_navigator._get_page_to_use(page_id)
        page = managed_page.page
        
        try:
            await managed_page.set_busy(f"getting archive for {url}")
            
            # Initialize primitives
            clicker = ClickElement()
            typer = TypeText()
            presser = PressEnter()
            scroller = ScrollPage()
            
            # If not starting from check page, perform search
            if not from_check:
                print("🌐 Navigating to archive.is to search for the URL")
                await page.goto('https://archive.is', wait_until='domcontentloaded')
                await asyncio.sleep(1)
                
                # Click search input
                print("👆 Clicking search input '#q'")
                click_input = await clicker.click_selector(managed_page, '#q')
                if not click_input.get('success'):
                    return {
                        "success": False,
                        "url": url,
                        "error": f"Failed to click search input: {click_input.get('error')}"
                    }
                
                # Type URL
                print(f"⌨️ Typing URL: {url}")
                type_result = await typer.type(page, url)
                if not type_result.get('success'):
                    return {
                        "success": False,
                        "url": url,
                        "error": f"Failed to type URL: {type_result.get('error')}"
                    }
                
                # Press enter to search
                print("⏎ Pressing Enter to search")
                press_result = await presser.press(page)
                if not press_result.get('success'):
                    return {
                        "success": False,
                        "url": url,
                        "error": f"Failed to press Enter: {press_result.get('error')}"
                    }
                
                # Wait for results
                await asyncio.sleep(2)
            
            # Click first archive link
            first_link_selector = '.TEXT-BLOCK > a:nth-child(1)'
            print(f"👆 Clicking first result link: {first_link_selector}")
            click_first = await clicker.click_selector(managed_page, first_link_selector)
            if not click_first.get('success'):
                page_content = None
                try:
                    page_content = await page.content()
                except Exception:
                    pass
                
                return {
                    "success": False,
                    "url": url,
                    "error": f"Failed to click first result link: {click_first.get('error')}",
                    "page_url": page.url,
                    "page_content": page_content
                }
            
            # Wait for navigation
            await asyncio.sleep(1.5)
            
            # Scroll pattern: down, down, up
            print("🔽 Scrolling down (1)")
            await scroller.scroll(page, distance=400, slowmo=500)
            await asyncio.sleep(0.6)
            
            print("🔽 Scrolling down (2)")
            await scroller.scroll(page, distance=400, slowmo=500)
            await asyncio.sleep(0.6)
            
            print("🔼 Scrolling up")
            await scroller.scroll(page, distance=-250, slowmo=400)
            await asyncio.sleep(0.8)
            
            # Collect page info
            try:
                page_title = await page.title()
                page_content = await page.content()
            except Exception:
                page_title = None
                page_content = None
            
            result = {
                "success": True,
                "url": url,
                "archive_url": page.url,
                "page_title": page_title,
                "page_content": page_content,
                "message": "Clicked first result and performed scroll pattern"
            }
            
            print(f"✅ Archive get completed, resolved to: {page.url}")
            return result
        
        except Exception as e:
            print(f"❌ Archive get failed: {e}")
            await managed_page.set_error(str(e))
            raise
        
        finally:
            await managed_page.set_idle()
