import re
import requests
import dns.resolver
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
from datetime import datetime
from collections import deque
from helpers.db import get_connection

COMMON_PAGES = ["", "/contact", "/about", "/team"]
MAX_DEPTH = 3
VALID_EMAIL_REGEX = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")

BLACKLISTED_GYMS = [
    "gold's gym", "golds gym", "planet fitness", "trufit", "la fitness",
    "crunch fitness", "24 hour fitness", "anytime fitness",
    "blink fitness", "equinox", "orangetheory", "war house",
    "life time", "alio", "gnc", "shoppe", "rock's", "fitness connection"
]

def extract_emails(text):
    return re.findall(r"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+", text)

def has_mx_record(email):
    try:
        domain = email.split('@')[1]
        answers = dns.resolver.resolve(domain, 'MX')
        return len(answers) > 0
    except Exception:
        return False

def validate_email(email):
    if not VALID_EMAIL_REGEX.fullmatch(email):
        return False
    return has_mx_record(email)

def repair_email(email):
    # Try trimming from both sides until it validates or is empty
    for i in range(len(email)):
        trimmed_start = email[i:]
        if validate_email(trimmed_start):
            return trimmed_start
        trimmed_end = email[:len(email)-i]
        if validate_email(trimmed_end):
            return trimmed_end
    return None

def get_internal_links(base_url, html):
    soup = BeautifulSoup(html, 'html.parser')
    parsed_base = urlparse(base_url)
    domain = f"{parsed_base.scheme}://{parsed_base.netloc}"
    links = set()

    for tag in soup.find_all('a', href=True):
        href = tag['href']
        href = urljoin(domain, href)
        if urlparse(href).netloc == parsed_base.netloc and not any(x in href for x in ['#', '.pdf', 'tel:', 'mailto:']):
            links.add(href)
    return links

def crawl_and_find_emails(start_url):
    visited = set()
    queue = deque([(start_url, 0)])

    while queue:
        url, depth = queue.popleft()
        if url in visited or depth > MAX_DEPTH:
            continue

        visited.add(url)
        try:
            print(f"🔗 Crawling {url}")
            resp = requests.get(url, timeout=5)
            if resp.status_code != 200:
                continue

            emails = extract_emails(resp.text)
            for email in emails:
                if validate_email(email):
                    return email, url
                repaired = repair_email(email)
                if repaired:
                    return repaired, url

            if depth < MAX_DEPTH:
                for link in get_internal_links(url, resp.text):
                    queue.append((link, depth + 1))

        except Exception as e:
            continue

    return None, None

def find_email(base_url):
    # Quick check common subpages
    for page in COMMON_PAGES:
        try:
            full_url = urljoin(base_url, page)
            print(f"🔍 Checking {full_url}")
            resp = requests.get(full_url, timeout=5)
            if resp.status_code != 200:
                continue

            emails = extract_emails(resp.text)
            for email in emails:
                if validate_email(email):
                    return email, full_url
                repaired = repair_email(email)
                if repaired:
                    return repaired, full_url
        except Exception:
            continue

    # Deep crawl if none found
    print("🔁 Starting deep crawl...")
    return crawl_and_find_emails(base_url)

def enrich_gyms():
    conn = get_connection()
    cursor = conn.cursor(dictionary=True)

    cursor.execute("SELECT * FROM gym_contacts WHERE email IS NULL AND website IS NOT NULL")
    gyms = cursor.fetchall()

    for gym in gyms:
        name_lower = gym['name'].lower()
        if any(banned in name_lower for banned in BLACKLISTED_GYMS):
            print(f"🚫 Skipping blacklisted gym: {gym['name']}")
            continue

        print(f"\n➡️ Processing: {gym['name']} ({gym['website']})")
        email, source = find_email(gym['website'])
        if email:
            cursor.execute("""
                UPDATE gym_contacts
                SET email = %s, email_source_page = %s, enriched_at = NOW()
                WHERE id = %s
            """, (email, source, gym['id']))
            print(f"✅ Found: {email} from {source}")
        else:
            print(f"❌ No email found for {gym['name']}")
        conn.commit()

    cursor.close()
    conn.close()

if __name__ == "__main__":
    enrich_gyms()