import os
import re
import csv
import json
import time
import uuid
import mimetypes
from collections import defaultdict
from urllib.parse import urlparse

import requests

# ----------------------------
# TOKENS / CONFIG (keep intact)
# ----------------------------
SHOPIFY_ADMIN_ACCESS_TOKEN = "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"
# Storefront token not needed for images; Admin API is best for full catalog + images
SHOPIFY_STOREFRONT_TOKEN = "53473c1d3ffeb61f339119ce6b3921df"

# Square tokens (kept intact)
ACCESS_TOKEN = "EAAAl60tZLgxdxhvD2Jdh7dRXcJz8nSsvcBW6g9rlXDVof6xFrXWMd29UCbOYmyW"

# ----------------------------
# ENDPOINTS
# ----------------------------
SHOPIFY_SHOP_DOMAIN = "fitfoodiesa.com"
SHOPIFY_API_VERSION = "2024-01"

# IMPORTANT: set this to your real myshopify domain (the old one "former-fatty-meals" caused your 404)
SHOPIFY_MYSHOPIFY_DOMAIN = "former-fatty-meals.myshopify.com"  # <-- change if different

# We'll try both (custom domain + myshopify). Some setups 404 on one but not the other.
SHOPIFY_GRAPHQL_URLS = [
    f"https://{SHOPIFY_SHOP_DOMAIN}/admin/api/{SHOPIFY_API_VERSION}/graphql.json",
    f"https://{SHOPIFY_MYSHOPIFY_DOMAIN}/admin/api/{SHOPIFY_API_VERSION}/graphql.json",
]

SQUARE_BASE_URL = "https://connect.squareup.com/v2"
SQUARE_VERSION = "2026-01-22"

# ----------------------------
# OUTPUTS / FLAGS
# ----------------------------
DOWNLOAD_DIR = "./_tmp_shopify_images"
OUT_MATCH_REPORT = "shopify_to_square_image_map.csv"
OUT_UPLOAD_RESULTS = "shopify_to_square_upload_results.csv"

# Safety switch
DRY_RUN = False   # True = no Square writes (still fetch + map + download)
MAX_IMAGES_PER_PRODUCT = 10
SLEEP_BETWEEN_CALLS_S = 0.25

# ----------------------------
# HEADERS
# ----------------------------
SHOPIFY_HEADERS = {
    "Content-Type": "application/json",
    "X-Shopify-Access-Token": SHOPIFY_ADMIN_ACCESS_TOKEN,
}

SQUARE_HEADERS = {
    "Authorization": f"Bearer {ACCESS_TOKEN}",
    "Square-Version": SQUARE_VERSION,
    "Accept": "application/json",
}


# ----------------------------
# HELPERS
# ----------------------------
def norm_name(s: str) -> str:
    if not s:
        return ""
    s = s.strip().lower()
    s = s.replace("&", "and")
    s = re.sub(r"[\u2018\u2019\u201c\u201d]", "'", s)
    s = re.sub(r"[^a-z0-9\s\-]", " ", s)
    s = re.sub(r"\s+", " ", s).strip()
    return s


def safe_filename(s: str) -> str:
    return "".join(c if c.isalnum() or c in ("-", "_", ".") else "_" for c in s)[:180]


def infer_mime_and_ext(url: str, content_type: str | None):
    if content_type and content_type.startswith("image/"):
        mime = content_type.split(";")[0].strip()
        ext = mimetypes.guess_extension(mime) or ""
        return mime, ext

    path = urlparse(url).path
    _, ext = os.path.splitext(path)
    ext = ext.lower()
    mime = mimetypes.types_map.get(ext, "application/octet-stream")
    return mime, ext


def download_image(url: str, dest_dir: str, name_hint: str):
    os.makedirs(dest_dir, exist_ok=True)

    r = requests.get(url, timeout=60, allow_redirects=True)
    r.raise_for_status()

    ct = r.headers.get("Content-Type")
    mime, ext = infer_mime_and_ext(url, ct)
    if not ext:
        ext = mimetypes.guess_extension(mime) or ".img"

    path = os.path.join(dest_dir, safe_filename(name_hint) + ext)
    with open(path, "wb") as f:
        f.write(r.content)

    return path, mime, len(r.content), ct


def square_search_items():
    """Pull all ITEM objects from the NEW Square store."""
    items = []
    cursor = None
    while True:
        payload = {"object_types": ["ITEM"]}
        if cursor:
            payload["cursor"] = cursor

        resp = requests.post(
            f"{SQUARE_BASE_URL}/catalog/search",
            headers=SQUARE_HEADERS,
            json=payload,
            timeout=60
        )
        resp.raise_for_status()
        data = resp.json()
        items.extend(data.get("objects", []))
        cursor = data.get("cursor")
        if not cursor:
            break

        time.sleep(SLEEP_BETWEEN_CALLS_S)
    return items


def square_create_and_attach_image(new_item_id: str, image_path: str, mime: str, caption: str, is_primary: bool):
    """
    POST /v2/catalog/images multipart:
      - file: image bytes
      - request: JSON with object_id (attach)
    """
    req_body = {
        "idempotency_key": str(uuid.uuid4()),
        "object_id": new_item_id,
        "image": {
            "id": "#TEMP_ID",
            "type": "IMAGE",
            "image_data": {"caption": (caption or "")[:1000]},
        },
        "is_primary": bool(is_primary),
    }

    with open(image_path, "rb") as f:
        files = {"file": (os.path.basename(image_path), f, mime)}
        data = {"request": json.dumps(req_body)}
        resp = requests.post(
            f"{SQUARE_BASE_URL}/catalog/images",
            headers=SQUARE_HEADERS,
            files=files,
            data=data,
            timeout=120
        )

    try:
        payload = resp.json()
    except Exception:
        payload = {"raw_text": resp.text}

    if resp.status_code >= 400:
        return False, None, payload

    image_obj = payload.get("image") or {}
    return True, image_obj.get("id"), payload


def shopify_graphql(query: str, variables: dict | None = None):
    """
    Tries multiple Admin GraphQL hosts because some merchants 404 on custom domain admin routes.
    Prints which host succeeded the first time.
    """
    payload = {"query": query, "variables": variables or {}}
    last_error = None

    for url in SHOPIFY_GRAPHQL_URLS:
        try:
            resp = requests.post(url, headers=SHOPIFY_HEADERS, json=payload, timeout=60)

            # Shopify throttling guard (soft)
            if resp.status_code == 429:
                time.sleep(2.0)
                resp = requests.post(url, headers=SHOPIFY_HEADERS, json=payload, timeout=60)

            # If 404, try next URL
            if resp.status_code == 404:
                last_error = f"404 Not Found at {url}"
                continue

            resp.raise_for_status()

            data = resp.json()
            if "errors" in data:
                raise RuntimeError(json.dumps(data["errors"])[:2000])

            # success
            if not hasattr(shopify_graphql, "_printed_ok_host"):
                print(f"✅ Shopify Admin GraphQL OK: {url}")
                shopify_graphql._printed_ok_host = True  # type: ignore[attr-defined]
            return data

        except requests.HTTPError as e:
            last_error = f"HTTPError at {url}: {str(e)}"
        except Exception as e:
            last_error = f"Error at {url}: {str(e)}"

    raise RuntimeError(f"Shopify Admin GraphQL failed on all hosts. Last error: {last_error}")


def fetch_shopify_products_with_images(limit_images=10):
    """
    Pull all Shopify products (title + images) via Admin GraphQL with pagination.
    Returns list of: {title, product_id, image_urls[]}
    """
    query = """
    query ProductsWithImages($cursor: String, $imgFirst: Int!) {
      products(first: 250, after: $cursor) {
        pageInfo { hasNextPage }
        edges {
          cursor
          node {
            id
            title
            images(first: $imgFirst) {
              edges {
                node {
                  url
                }
              }
            }
          }
        }
      }
    }
    """

    results = []
    cursor = None
    while True:
        data = shopify_graphql(query, {"cursor": cursor, "imgFirst": int(limit_images)})
        edges = data["data"]["products"]["edges"]
        for e in edges:
            node = e["node"]
            urls = [ie["node"]["url"] for ie in node["images"]["edges"] if ie.get("node", {}).get("url")]
            results.append({
                "product_id": node["id"],
                "title": node["title"],
                "image_urls": urls
            })
            cursor = e["cursor"]

        if not data["data"]["products"]["pageInfo"]["hasNextPage"]:
            break

        time.sleep(SLEEP_BETWEEN_CALLS_S)

    return results


def main():
    print("Shopify Admin GraphQL hosts to try:")
    for u in SHOPIFY_GRAPHQL_URLS:
        print(" -", u)

    # 1) Load Shopify products + images
    shopify_products = fetch_shopify_products_with_images(limit_images=MAX_IMAGES_PER_PRODUCT)
    print(f"Shopify products pulled: {len(shopify_products)}")

    # Index Shopify by normalized title
    shopify_index = defaultdict(list)
    for p in shopify_products:
        key = norm_name(p["title"])
        if key:
            shopify_index[key].append(p)

    # 2) Load Square NEW store items
    square_items = square_search_items()
    print(f"Square NEW store items pulled: {len(square_items)}")

    # Index Square items by normalized name
    square_index = defaultdict(list)
    for it in square_items:
        item_id = it.get("id")
        name = (it.get("item_data") or {}).get("name") or ""
        key = norm_name(name)
        if key:
            square_index[key].append({"new_item_id": item_id, "new_item_name": name})

    # 3) Build mapping report (Shopify title -> Square item id)
    mapping_rows = []
    unmatched_shopify_titles = 0
    ambiguous = 0

    for key, products in shopify_index.items():
        candidates = square_index.get(key, [])
        if not candidates:
            for p in products:
                mapping_rows.append({
                    "match_status": "NO_MATCH_IN_SQUARE",
                    "shopify_title": p["title"],
                    "shopify_product_id": p["product_id"],
                    "shopify_image_url": "",
                    "new_item_id": "",
                    "new_item_name": ""
                })
            unmatched_shopify_titles += len(products)
            continue

        if len(candidates) > 1:
            ambiguous += 1

        for p in products:
            for url in (p["image_urls"] or []):
                for c in candidates:
                    mapping_rows.append({
                        "match_status": "AMBIGUOUS" if len(candidates) > 1 else "MATCHED",
                        "shopify_title": p["title"],
                        "shopify_product_id": p["product_id"],
                        "shopify_image_url": url,
                        "new_item_id": c["new_item_id"],
                        "new_item_name": c["new_item_name"],
                    })

    # Write mapping report
    with open(OUT_MATCH_REPORT, "w", newline="") as f:
        fields = ["match_status","shopify_title","shopify_product_id","shopify_image_url","new_item_id","new_item_name"]
        w = csv.DictWriter(f, fieldnames=fields)
        w.writeheader()
        w.writerows(mapping_rows)

    print(f"Wrote mapping report: {OUT_MATCH_REPORT}")
    print(f"Ambiguous title keys: {ambiguous}")
    print(f"Unmatched Shopify titles (rows): {unmatched_shopify_titles}")

    # 4) Upload to Square (optional / DRY_RUN)
    upload_candidates = [
        r for r in mapping_rows
        if r["match_status"] == "MATCHED"
        and r["shopify_image_url"]
        and r["new_item_id"]
    ]
    print(f"Upload candidates (MATCHED only): {len(upload_candidates)}")

    if DRY_RUN:
        print("DRY_RUN=True → skipping Square uploads.")
        return

    by_item = defaultdict(list)
    for r in upload_candidates:
        by_item[r["new_item_id"]].append(r)

    results = []
    ok = 0
    total = 0

    for new_item_id, rows in by_item.items():
        rows = sorted(rows, key=lambda x: x["shopify_image_url"])

        for idx, r in enumerate(rows):
            total += 1
            title = r["shopify_title"]
            url = r["shopify_image_url"]

            try:
                hint = f"{title}_{new_item_id}_{idx}"
                img_path, mime, size_b, ct = download_image(url, DOWNLOAD_DIR, hint)
            except Exception as e:
                results.append({
                    "status": "DOWNLOAD_FAIL",
                    "new_item_id": new_item_id,
                    "new_item_name": r["new_item_name"],
                    "shopify_title": title,
                    "shopify_image_url": url,
                    "download_path": "",
                    "square_new_image_id": "",
                    "error": str(e),
                })
                continue

            try:
                is_primary = (idx == 0)
                caption = title
                success, new_image_id, payload = square_create_and_attach_image(new_item_id, img_path, mime, caption, is_primary)
                if success:
                    ok += 1
                    results.append({
                        "status": "UPLOADED_ATTACHED",
                        "new_item_id": new_item_id,
                        "new_item_name": r["new_item_name"],
                        "shopify_title": title,
                        "shopify_image_url": url,
                        "download_path": img_path,
                        "square_new_image_id": new_image_id or "",
                        "error": "",
                    })
                else:
                    results.append({
                        "status": "UPLOAD_FAIL",
                        "new_item_id": new_item_id,
                        "new_item_name": r["new_item_name"],
                        "shopify_title": title,
                        "shopify_image_url": url,
                        "download_path": img_path,
                        "square_new_image_id": "",
                        "error": json.dumps(payload)[:4000],
                    })
            except Exception as e:
                results.append({
                    "status": "UPLOAD_EXCEPTION",
                    "new_item_id": new_item_id,
                    "new_item_name": r["new_item_name"],
                    "shopify_title": title,
                    "shopify_image_url": url,
                    "download_path": img_path,
                    "square_new_image_id": "",
                    "error": str(e),
                })

            time.sleep(SLEEP_BETWEEN_CALLS_S)

    with open(OUT_UPLOAD_RESULTS, "w", newline="") as f:
        fields = ["status","new_item_id","new_item_name","shopify_title","shopify_image_url","download_path","square_new_image_id","error"]
        w = csv.DictWriter(f, fieldnames=fields)
        w.writeheader()
        w.writerows(results)

    print(f"Upload complete: {ok}/{total} successful")
    print(f"Wrote upload results: {OUT_UPLOAD_RESULTS}")


if __name__ == "__main__":
    main()